From 98620458e31a676325f4d21a28410b0d3a69653a Mon Sep 17 00:00:00 2001
From: Derek McGowan
Date: Fri, 12 Feb 2016 17:15:19 -0800
Subject: [PATCH 001/546] Add credential authenticator interface
Signed-off-by: Derek McGowan (github: dmcgowan)
---
registry/auth/auth.go | 14 ++++++++++++++
registry/auth/htpasswd/access.go | 19 +++++++------------
registry/auth/htpasswd/htpasswd.go | 6 ++++--
3 files changed, 25 insertions(+), 14 deletions(-)
diff --git a/registry/auth/auth.go b/registry/auth/auth.go
index 0ba2eba3..0164246c 100644
--- a/registry/auth/auth.go
+++ b/registry/auth/auth.go
@@ -33,6 +33,7 @@
package auth
import (
+ "errors"
"fmt"
"net/http"
@@ -49,6 +50,14 @@ const (
UserNameKey = "auth.user.name"
)
+var (
+ // ErrInvalidCredential is returned when the auth token does not authenticate correctly.
+ ErrInvalidCredential = errors.New("invalid authorization credential")
+
+ // ErrAuthenticationFailure returned when authentication failure to be presented to agent.
+ ErrAuthenticationFailure = errors.New("authentication failure")
+)
+
// UserInfo carries information about
// an autenticated/authorized client.
type UserInfo struct {
@@ -97,6 +106,11 @@ type AccessController interface {
Authorized(ctx context.Context, access ...Access) (context.Context, error)
}
+// CredentialAuthenticator is an object which is able to validate credentials
+type CredentialAuthenticator interface {
+ AuthenticateUser(username, password string) error
+}
+
// WithUser returns a context with the authorized user info.
func WithUser(ctx context.Context, user UserInfo) context.Context {
return userInfoContext{
diff --git a/registry/auth/htpasswd/access.go b/registry/auth/htpasswd/access.go
index 6e7ba180..4f71dc27 100644
--- a/registry/auth/htpasswd/access.go
+++ b/registry/auth/htpasswd/access.go
@@ -6,7 +6,6 @@
package htpasswd
import (
- "errors"
"fmt"
"net/http"
"os"
@@ -15,14 +14,6 @@ import (
"github.com/docker/distribution/registry/auth"
)
-var (
- // ErrInvalidCredential is returned when the auth token does not authenticate correctly.
- ErrInvalidCredential = errors.New("invalid authorization credential")
-
- // ErrAuthenticationFailure returned when authentication failure to be presented to agent.
- ErrAuthenticationFailure = errors.New("authentication failure")
-)
-
type accessController struct {
realm string
htpasswd *htpasswd
@@ -65,21 +56,25 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut
if !ok {
return nil, &challenge{
realm: ac.realm,
- err: ErrInvalidCredential,
+ err: auth.ErrInvalidCredential,
}
}
- if err := ac.htpasswd.authenticateUser(username, password); err != nil {
+ if err := ac.AuthenticateUser(username, password); err != nil {
context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err)
return nil, &challenge{
realm: ac.realm,
- err: ErrAuthenticationFailure,
+ err: auth.ErrAuthenticationFailure,
}
}
return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil
}
+func (ac *accessController) AuthenticateUser(username, password string) error {
+ return ac.htpasswd.authenticateUser(username, password)
+}
+
// challenge implements the auth.Challenge interface.
type challenge struct {
realm string
diff --git a/registry/auth/htpasswd/htpasswd.go b/registry/auth/htpasswd/htpasswd.go
index 494ad0a7..83f797f7 100644
--- a/registry/auth/htpasswd/htpasswd.go
+++ b/registry/auth/htpasswd/htpasswd.go
@@ -6,6 +6,8 @@ import (
"io"
"strings"
+ "github.com/docker/distribution/registry/auth"
+
"golang.org/x/crypto/bcrypt"
)
@@ -33,12 +35,12 @@ func (htpasswd *htpasswd) authenticateUser(username string, password string) err
// timing attack paranoia
bcrypt.CompareHashAndPassword([]byte{}, []byte(password))
- return ErrAuthenticationFailure
+ return auth.ErrAuthenticationFailure
}
err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password))
if err != nil {
- return ErrAuthenticationFailure
+ return auth.ErrAuthenticationFailure
}
return nil
From 1126e322341b8e2da397b0e1e2caa298d543cfae Mon Sep 17 00:00:00 2001
From: Derek McGowan
Date: Fri, 4 Mar 2016 13:53:06 -0800
Subject: [PATCH 002/546] Add post token implementation
Signed-off-by: Derek McGowan (github: dmcgowan)
---
contrib/token-server/errors.go | 38 ++++++
contrib/token-server/main.go | 219 ++++++++++++++++++++++++++++++---
contrib/token-server/token.go | 17 +++
registry/auth/auth.go | 4 +-
4 files changed, 260 insertions(+), 18 deletions(-)
create mode 100644 contrib/token-server/errors.go
diff --git a/contrib/token-server/errors.go b/contrib/token-server/errors.go
new file mode 100644
index 00000000..bcac8ee3
--- /dev/null
+++ b/contrib/token-server/errors.go
@@ -0,0 +1,38 @@
+package main
+
+import (
+ "net/http"
+
+ "github.com/docker/distribution/registry/api/errcode"
+)
+
+var (
+ errGroup = "tokenserver"
+
+ // ErrorBadTokenOption is returned when a token parameter is invalid
+ ErrorBadTokenOption = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "BAD_TOKEN_OPTION",
+ Message: "bad token option",
+ Description: `This error may be returned when a request for a
+ token contains an option which is not valid`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorMissingRequiredField is returned when a required form field is missing
+ ErrorMissingRequiredField = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "MISSING_REQUIRED_FIELD",
+ Message: "missing required field",
+ Description: `This error may be returned when a request for a
+ token does not contain a required form field`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorUnsupportedValue is returned when a form field has an unsupported value
+ ErrorUnsupportedValue = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "UNSUPPORTED_VALUE",
+ Message: "unsupported value",
+ Description: `This error may be returned when a request for a
+ token contains a form field with an unsupported value`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+)
diff --git a/contrib/token-server/main.go b/contrib/token-server/main.go
index e47e11c2..edd894f4 100644
--- a/contrib/token-server/main.go
+++ b/contrib/token-server/main.go
@@ -3,8 +3,11 @@ package main
import (
"encoding/json"
"flag"
+ "math/rand"
"net/http"
+ "strconv"
"strings"
+ "time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/context"
@@ -73,15 +76,20 @@ func main() {
logrus.Fatalf("Error initializing access controller: %v", err)
}
+ // TODO: Make configurable
+ issuer.Expiration = 15 * time.Minute
+
ctx := context.Background()
ts := &tokenServer{
issuer: issuer,
accessController: ac,
+ refreshCache: map[string]refreshToken{},
}
router := mux.NewRouter()
router.Path("/token/").Methods("GET").Handler(handlerWithContext(ctx, ts.getToken))
+ router.Path("/token/").Methods("POST").Handler(handlerWithContext(ctx, ts.postToken))
if cert == "" {
err = http.ListenAndServe(addr, router)
@@ -120,9 +128,52 @@ func handleError(ctx context.Context, err error, w http.ResponseWriter) {
context.GetResponseLogger(ctx).Info("application error")
}
+var refreshCharacters = []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+
+const refreshTokenLength = 15
+
+func newRefreshToken() string {
+ s := make([]rune, refreshTokenLength)
+ for i := range s {
+ s[i] = refreshCharacters[rand.Intn(len(refreshCharacters))]
+ }
+ return string(s)
+}
+
+type refreshToken struct {
+ subject string
+ service string
+}
+
type tokenServer struct {
issuer *TokenIssuer
accessController auth.AccessController
+ refreshCache map[string]refreshToken
+}
+
+type tokenResponse struct {
+ Token string `json:"access_token"`
+ RefreshToken string `json:"refresh_token,omitempty"`
+ ExpiresIn int `json:"expires_in,omitempty"`
+}
+
+func filterAccessList(ctx context.Context, scope string, requestedAccessList []auth.Access) []auth.Access {
+ if !strings.HasSuffix(scope, "/") {
+ scope = scope + "/"
+ }
+ grantedAccessList := make([]auth.Access, 0, len(requestedAccessList))
+ for _, access := range requestedAccessList {
+ if access.Type != "repository" {
+ context.GetLogger(ctx).Debugf("Skipping unsupported resource type: %s", access.Type)
+ continue
+ }
+ if !strings.HasPrefix(access.Name, scope) {
+ context.GetLogger(ctx).Debugf("Resource scope not allowed: %s", access.Name)
+ continue
+ }
+ grantedAccessList = append(grantedAccessList, access)
+ }
+ return grantedAccessList
}
// getToken handles authenticating the request and authorizing access to the
@@ -133,6 +184,15 @@ func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *h
params := r.URL.Query()
service := params.Get("service")
scopeSpecifiers := params["scope"]
+ var offline bool
+ if offlineStr := params.Get("offline_token"); offlineStr != "" {
+ var err error
+ offline, err = strconv.ParseBool(offlineStr)
+ if err != nil {
+ handleError(ctx, ErrorBadTokenOption.WithDetail(err), w)
+ return
+ }
+ }
requestedAccessList := ResolveScopeSpecifiers(ctx, scopeSpecifiers)
@@ -166,20 +226,7 @@ func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *h
ctx = context.WithValue(ctx, "requestedAccess", requestedAccessList)
ctx = context.WithLogger(ctx, context.GetLogger(ctx, "requestedAccess"))
- scopePrefix := username + "/"
- grantedAccessList := make([]auth.Access, 0, len(requestedAccessList))
- for _, access := range requestedAccessList {
- if access.Type != "repository" {
- context.GetLogger(ctx).Debugf("Skipping unsupported resource type: %s", access.Type)
- continue
- }
- if !strings.HasPrefix(access.Name, scopePrefix) {
- context.GetLogger(ctx).Debugf("Resource scope not allowed: %s", access.Name)
- continue
- }
- grantedAccessList = append(grantedAccessList, access)
- }
-
+ grantedAccessList := filterAccessList(ctx, username, requestedAccessList)
ctx = context.WithValue(ctx, "grantedAccess", grantedAccessList)
ctx = context.WithLogger(ctx, context.GetLogger(ctx, "grantedAccess"))
@@ -191,11 +238,151 @@ func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *h
context.GetLogger(ctx).Info("authorized client")
- // Get response context.
+ response := tokenResponse{
+ Token: token,
+ ExpiresIn: int(ts.issuer.Expiration.Seconds()),
+ }
+
+ if offline {
+ response.RefreshToken = newRefreshToken()
+ ts.refreshCache[response.RefreshToken] = refreshToken{
+ subject: username,
+ service: service,
+ }
+ }
+
ctx, w = context.WithResponseWriter(ctx, w)
w.Header().Set("Content-Type", "application/json")
- json.NewEncoder(w).Encode(map[string]string{"token": token})
+ json.NewEncoder(w).Encode(response)
context.GetResponseLogger(ctx).Info("get token complete")
}
+
+type postTokenResponse struct {
+ Token string `json:"access_token"`
+ Scope string `json:"scope,omitempty"`
+ ExpiresIn int `json:"expires_in,omitempty"`
+ IssuedAt string `json:"issued_at,omitempty"`
+ RefreshToken string `json:"refresh_token,omitempty"`
+}
+
+// postToken handles authenticating the request and authorizing access to the
+// requested scopes.
+func (ts *tokenServer) postToken(ctx context.Context, w http.ResponseWriter, r *http.Request) {
+ grantType := r.PostFormValue("grant_type")
+ if grantType == "" {
+ handleError(ctx, ErrorMissingRequiredField.WithDetail("missing grant_type value"), w)
+ return
+ }
+
+ service := r.PostFormValue("service")
+ if service == "" {
+ handleError(ctx, ErrorMissingRequiredField.WithDetail("missing service value"), w)
+ return
+ }
+
+ clientID := r.PostFormValue("client_id")
+ if clientID == "" {
+ handleError(ctx, ErrorMissingRequiredField.WithDetail("missing client_id value"), w)
+ return
+ }
+
+ var offline bool
+ switch r.PostFormValue("access_type") {
+ case "", "online":
+ case "offline":
+ offline = true
+ default:
+ handleError(ctx, ErrorUnsupportedValue.WithDetail("unknown access_type value"), w)
+ return
+ }
+
+ requestedAccessList := ResolveScopeList(ctx, r.PostFormValue("scope"))
+
+ var subject string
+ var rToken string
+ switch grantType {
+ case "refresh_token":
+ rToken = r.PostFormValue("refresh_token")
+ if rToken == "" {
+ handleError(ctx, ErrorUnsupportedValue.WithDetail("missing refresh_token value"), w)
+ return
+ }
+ rt, ok := ts.refreshCache[rToken]
+ if !ok || rt.service != service {
+ handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail("invalid refresh token"), w)
+ return
+ }
+ subject = rt.subject
+ case "password":
+ ca, ok := ts.accessController.(auth.CredentialAuthenticator)
+ if !ok {
+ handleError(ctx, ErrorUnsupportedValue.WithDetail("password grant type not supported"), w)
+ return
+ }
+ subject = r.PostFormValue("username")
+ if subject == "" {
+ handleError(ctx, ErrorUnsupportedValue.WithDetail("missing username value"), w)
+ return
+ }
+ password := r.PostFormValue("password")
+ if password == "" {
+ handleError(ctx, ErrorUnsupportedValue.WithDetail("missing password value"), w)
+ return
+ }
+ if err := ca.AuthenticateUser(subject, password); err != nil {
+ handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail("invalid credentials"), w)
+ return
+ }
+ default:
+ handleError(ctx, ErrorUnsupportedValue.WithDetail("unknown grant_type value"), w)
+ return
+ }
+
+ ctx = context.WithValue(ctx, "acctSubject", subject)
+ ctx = context.WithLogger(ctx, context.GetLogger(ctx, "acctSubject"))
+
+ context.GetLogger(ctx).Info("authenticated client")
+
+ ctx = context.WithValue(ctx, "requestedAccess", requestedAccessList)
+ ctx = context.WithLogger(ctx, context.GetLogger(ctx, "requestedAccess"))
+
+ grantedAccessList := filterAccessList(ctx, subject, requestedAccessList)
+ ctx = context.WithValue(ctx, "grantedAccess", grantedAccessList)
+ ctx = context.WithLogger(ctx, context.GetLogger(ctx, "grantedAccess"))
+
+ token, err := ts.issuer.CreateJWT(subject, service, grantedAccessList)
+ if err != nil {
+ handleError(ctx, err, w)
+ return
+ }
+
+ context.GetLogger(ctx).Info("authorized client")
+
+ response := postTokenResponse{
+ Token: token,
+ ExpiresIn: int(ts.issuer.Expiration.Seconds()),
+ IssuedAt: time.Now().UTC().Format(time.RFC3339),
+ Scope: ToScopeList(grantedAccessList),
+ }
+
+ if offline {
+ rToken = newRefreshToken()
+ ts.refreshCache[rToken] = refreshToken{
+ subject: subject,
+ service: service,
+ }
+ }
+
+ if rToken != "" {
+ response.RefreshToken = rToken
+ }
+
+ ctx, w = context.WithResponseWriter(ctx, w)
+
+ w.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(w).Encode(response)
+
+ context.GetResponseLogger(ctx).Info("post token complete")
+}
diff --git a/contrib/token-server/token.go b/contrib/token-server/token.go
index 15ace622..e69fb9c1 100644
--- a/contrib/token-server/token.go
+++ b/contrib/token-server/token.go
@@ -55,6 +55,23 @@ func ResolveScopeSpecifiers(ctx context.Context, scopeSpecs []string) []auth.Acc
return requestedAccessList
}
+// ResolveScopeList converts a scope list from a token request's
+// `scope` parameter into a list of standard access objects.
+func ResolveScopeList(ctx context.Context, scopeList string) []auth.Access {
+ scopes := strings.Split(scopeList, " ")
+ return ResolveScopeSpecifiers(ctx, scopes)
+}
+
+// ToScopeList converts a list of access to a
+// scope list string
+func ToScopeList(access []auth.Access) string {
+ var s []string
+ for _, a := range access {
+ s = append(s, fmt.Sprintf("%s:%s:%s", a.Type, a.Name, a.Action))
+ }
+ return strings.Join(s, ",")
+}
+
// TokenIssuer represents an issuer capable of generating JWT tokens
type TokenIssuer struct {
Issuer string
diff --git a/registry/auth/auth.go b/registry/auth/auth.go
index 0164246c..0cb37235 100644
--- a/registry/auth/auth.go
+++ b/registry/auth/auth.go
@@ -54,7 +54,7 @@ var (
// ErrInvalidCredential is returned when the auth token does not authenticate correctly.
ErrInvalidCredential = errors.New("invalid authorization credential")
- // ErrAuthenticationFailure returned when authentication failure to be presented to agent.
+ // ErrAuthenticationFailure returned when authentication fails.
ErrAuthenticationFailure = errors.New("authentication failure")
)
@@ -106,7 +106,7 @@ type AccessController interface {
Authorized(ctx context.Context, access ...Access) (context.Context, error)
}
-// CredentialAuthenticator is an object which is able to validate credentials
+// CredentialAuthenticator is an object which is able to authenticate credentials
type CredentialAuthenticator interface {
AuthenticateUser(username, password string) error
}
From ab2394446ccde47ed07fb86743f4a425107c6298 Mon Sep 17 00:00:00 2001
From: Derek McGowan
Date: Tue, 12 Apr 2016 14:03:56 -0700
Subject: [PATCH 003/546] Integration test readme update
Updates the readme to mention running the tests using golem.
Also provides instructions for making test development easier.
Signed-off-by: Derek McGowan (github: dmcgowan)
---
contrib/docker-integration/README.md | 153 +++++++--------------------
1 file changed, 40 insertions(+), 113 deletions(-)
diff --git a/contrib/docker-integration/README.md b/contrib/docker-integration/README.md
index 15e0a080..bc5be9d9 100644
--- a/contrib/docker-integration/README.md
+++ b/contrib/docker-integration/README.md
@@ -1,136 +1,63 @@
# Docker Registry Integration Testing
-These integration tests cover interactions between the Docker daemon and the
-registry server. All tests are run using the docker cli.
+These integration tests cover interactions between registry clients such as
+the docker daemon and the registry server. All tests can be run using the
+[golem integration test runner](https://github.com/docker/golem)
-The compose configuration is intended to setup a testing environment for Docker
-using multiple registry configurations. These configurations include different
-combinations of a v1 and v2 registry as well as TLS configurations.
+The integration tests configure components using docker compose
+(see docker-compose.yaml) and the runner can be using the golem
+configuration file (see golem.conf).
-## Running inside of Docker
-### Get integration container
-The container image to run the integation tests will need to be pulled or built
-locally.
+## Running integration tests
-*Building locally*
-```
-$ docker build -t distribution/docker-integration .
-```
+### Run using multiversion script
-### Run script
+The integration tests in the `contrib/docker-integration` directory can be simply
+run by executing the run script `./run_multiversion.sh`. If there is no running
+daemon to connect to, run as `./run_multiversion.sh -d`.
-Invoke the tests within Docker through the `run.sh` script.
+This command will build the distribution image from the locally checked out
+version and run against multiple versions of docker defined in the script. To
+run a specific version of the registry or docker, Golem will need to be
+executed manually.
-```
-$ ./run.sh
-```
+### Run manually using Golem
-Run with aufs driver and tmp volume
-**NOTE: Using a volume will prevent multiple runs from needing to
-re-pull images**
-```
-$ DOCKER_GRAPHDRIVER=aufs DOCKER_VOLUME=/tmp/volume ./run.sh
-```
+Using the golem tool directly allows running against multiple versions of
+the registry and docker. Running against multiple versions of the registry
+can be useful for testing changes in the docker daemon which are not
+covered by the default run script.
-### Example developer flow
+#### Installing Golem
-These tests are useful for developing both as a registry and docker
-core developer. The following setup may be used to do integration
-testing between development versions
+Golem is distributed as an executable binary which can be installed from
+the [release page](https://github.com/docker/golem/releases/tag/v0.1).
-Insert into your `.zshrc` or `.bashrc`
+#### Running golem with docker
-```
-# /usr/lib/docker for Docker-in-Docker
-# Set this directory to make each invocation run much faster, without
-# the need to repull images.
-export DOCKER_VOLUME=$HOME/.docker-test-volume
+Additionally golem can be run as a docker image requiring no additonal
+installation.
-# Use overlay for all Docker testing, try aufs if overlay not supported
-export DOCKER_GRAPHDRIVER=overlay
+`docker run --privileged -v "$GOPATH/src/github.com/docker/distribution/contrib/docker-integration:/test" -w /test distribution/golem golem -rundaemon .`
-# Name this according to personal preference
-function rdtest() {
- if [ "$1" != "" ]; then
- DOCKER_BINARY=$GOPATH/src/github.com/docker/docker/bundles/$1/binary/docker
- if [ ! -f $DOCKER_BINARY ]; then
- current_version=`cat $GOPATH/src/github.com/docker/docker/VERSION`
- echo "$DOCKER_BINARY does not exist"
- echo "Current checked out docker version: $current_version"
- echo "Checkout desired version and run 'make binary' from $GOPATH/src/github.com/docker/docker"
- return 1
- fi
- fi
+#### Golem custom images
- $GOPATH/src/github.com/docker/distribution/contrib/docker-integration/run.sh
-}
-```
+Golem tests version of software by defining the docker image to test.
-Run with Docker release version
-```
-$ rdtest
-```
+Run with registry 2.2.1 and docker 1.10.3
-Run using local development version of docker
-```
-$ cd $GOPATH/src/github.com/docker/docker
-$ make binary
-$ rdtest `cat VERSION`
-```
+`golem -i golem-dind:latest,docker:1.10.3-dind,1.10.3 -i golem-distribution:latest,registry:2.2.1 .`
-## Running manually outside of Docker
-### Install Docker Compose
+#### Use golem caching for developing tests
-[Docker Compose Installation Guide](https://docs.docker.com/compose/install/)
+Golem allows caching image configuration to reduce test start up time.
+Using this cache will allow tests with the same set of images to start
+up quickly. This can be useful when developing tests and needing the
+test to run quickly. If there are changes which effect the image (such as
+building a new registry image), then startup time will be slower.
-### Start compose setup
-```
-docker-compose up
-```
+Run this command multiple times and after the first time test runs
+should start much quicker.
+`golem -cache ~/.cache/docker/golem -i golem-dind:latest,docker:1.10.3-dind,1.10.3 -i golem-distribution:latest,registry:2.2.1 .`
-### Install Certificates
-The certificates must be installed in /etc/docker/cert.d in order to use TLS
-client auth and use the CA certificate.
-```
-sudo sh ./install_certs.sh
-```
-
-### Test with Docker
-Tag an image as with any other private registry. Attempt to push the image.
-
-```
-docker pull hello-world
-docker tag hello-world localhost:5440/hello-world
-docker push localhost:5440/hello-world
-
-docker tag hello-world localhost:5441/hello-world
-docker push localhost:5441/hello-world
-# Perform login using user `testuser` and password `passpassword`
-```
-
-### Set /etc/hosts entry
-Find the non-localhost ip address of local machine
-
-### Run bats
-Run the bats tests after updating /etc/hosts, installing the certificates, and
-running the `docker-compose` script.
-```
-bats -p .
-```
-
-## Configurations
-
-Port | V2 | TLS | Authentication
---- | --- | --- | ---
-5000 | yes | no | none
-5002 | yes | no | none
-5440 | yes | yes | none
-5441 | yes | yes | basic (testuser/passpassword)
-5442 | yes | yes | TLS client
-5443 | yes | yes | TLS client (no CA)
-5444 | yes | yes | TLS client + basic (testuser/passpassword)
-5445 | yes | yes (no CA) | none
-5446 | yes | yes (no CA) | basic (testuser/passpassword)
-5447 | yes | yes (no CA) | TLS client
-5448 | yes | yes (SSLv3) | none
From e6f8d7c28e672168b47dc19128f6e37680e7e345 Mon Sep 17 00:00:00 2001
From: Nikita
Date: Wed, 13 Apr 2016 19:32:10 +0400
Subject: [PATCH 004/546] Update swift.go
Signed-off-by: Nikita Tarasov
---
registry/storage/driver/swift/swift.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/registry/storage/driver/swift/swift.go b/registry/storage/driver/swift/swift.go
index 91384828..4c115030 100644
--- a/registry/storage/driver/swift/swift.go
+++ b/registry/storage/driver/swift/swift.go
@@ -69,6 +69,7 @@ type Parameters struct {
DomainID string
TrustID string
Region string
+ AuthVersion int
Container string
Prefix string
InsecureSkipVerify bool
@@ -174,6 +175,7 @@ func New(params Parameters) (*Driver, error) {
ApiKey: params.Password,
AuthUrl: params.AuthURL,
Region: params.Region,
+ AuthVersion: params.AuthVersion,
UserAgent: "distribution/" + version.Version,
Tenant: params.Tenant,
TenantId: params.TenantID,
From bcb7989fca4b65cae10f3d553b4a3cf633b4477d Mon Sep 17 00:00:00 2001
From: Nikita
Date: Wed, 13 Apr 2016 19:37:45 +0400
Subject: [PATCH 005/546] test
Signed-off-by: Nikita Tarasov
---
registry/storage/driver/swift/swift_test.go | 3 +++
1 file changed, 3 insertions(+)
diff --git a/registry/storage/driver/swift/swift_test.go b/registry/storage/driver/swift/swift_test.go
index b2ff6001..bffd54e8 100644
--- a/registry/storage/driver/swift/swift_test.go
+++ b/registry/storage/driver/swift/swift_test.go
@@ -33,6 +33,7 @@ func init() {
trustID string
container string
region string
+ AuthVersion int
insecureSkipVerify bool
secretKey string
accessKey string
@@ -52,6 +53,7 @@ func init() {
trustID = os.Getenv("SWIFT_TRUST_ID")
container = os.Getenv("SWIFT_CONTAINER_NAME")
region = os.Getenv("SWIFT_REGION_NAME")
+ AuthVersion = os.Getenv("SWIFT_AUTH_VERSION")
insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY"))
secretKey = os.Getenv("SWIFT_SECRET_KEY")
accessKey = os.Getenv("SWIFT_ACCESS_KEY")
@@ -85,6 +87,7 @@ func init() {
domainID,
trustID,
region,
+ AuthVersion,
container,
root,
insecureSkipVerify,
From 098005177f224e5ae3dd8c6e73e94dcd6dd128f2 Mon Sep 17 00:00:00 2001
From: yuzou
Date: Thu, 14 Apr 2016 16:41:35 +0800
Subject: [PATCH 006/546] fix typepo for log message of layer push event in
blobServiceListener Put function.
Signed-off-by: yuzou
---
notifications/listener.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/notifications/listener.go b/notifications/listener.go
index fa4b722d..625e4d24 100644
--- a/notifications/listener.go
+++ b/notifications/listener.go
@@ -157,7 +157,7 @@ func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []b
desc, err := bsl.BlobStore.Put(ctx, mediaType, p)
if err == nil {
if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Named(), desc); err != nil {
- context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err)
+ context.GetLogger(ctx).Errorf("error dispatching layer push to listener: %v", err)
}
}
From e8feabc77572a4ddd62ef62cb0c3307af141f9d9 Mon Sep 17 00:00:00 2001
From: Richard Scothern
Date: Fri, 8 Apr 2016 08:55:11 -0700
Subject: [PATCH 007/546] Use Alpine Linux as the parent image for the
registry.
Signed-off-by: Richard Scothern
---
Dockerfile | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index abb3e3bb..fa9cd462 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,8 +1,4 @@
-FROM golang:1.6
-
-RUN apt-get update && \
- apt-get install -y apache2-utils && \
- rm -rf /var/lib/apt/lists/*
+FROM golang:1.6-alpine
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
ENV DOCKER_BUILDTAGS include_oss include_gcs
@@ -10,6 +6,10 @@ ENV DOCKER_BUILDTAGS include_oss include_gcs
WORKDIR $DISTRIBUTION_DIR
COPY . $DISTRIBUTION_DIR
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
+
+RUN set -ex \
+ && apk add --no-cache make git
+
RUN make PREFIX=/go clean binaries
VOLUME ["/var/lib/registry"]
From f2686b8db4a2bc08978641730040aa435823b1cf Mon Sep 17 00:00:00 2001
From: yuzou
Date: Fri, 15 Apr 2016 11:17:04 +0800
Subject: [PATCH 008/546] use context.GetLogger to replace logrus in listener
Signed-off-by: yuzou
---
notifications/listener.go | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/notifications/listener.go b/notifications/listener.go
index 625e4d24..b99133da 100644
--- a/notifications/listener.go
+++ b/notifications/listener.go
@@ -3,7 +3,6 @@ package notifications
import (
"net/http"
- "github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
@@ -71,7 +70,7 @@ func (msl *manifestServiceListener) Delete(ctx context.Context, dgst digest.Dige
err := msl.ManifestService.Delete(ctx, dgst)
if err == nil {
if err := msl.parent.listener.ManifestDeleted(msl.parent.Repository.Named(), dgst); err != nil {
- logrus.Errorf("error dispatching manifest delete to listener: %v", err)
+ context.GetLogger(ctx).Errorf("error dispatching manifest delete to listener: %v", err)
}
}
@@ -82,7 +81,7 @@ func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest,
sm, err := msl.ManifestService.Get(ctx, dgst)
if err == nil {
if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Named(), sm, options...); err != nil {
- logrus.Errorf("error dispatching manifest pull to listener: %v", err)
+ context.GetLogger(ctx).Errorf("error dispatching manifest pull to listener: %v", err)
}
}
@@ -94,7 +93,7 @@ func (msl *manifestServiceListener) Put(ctx context.Context, sm distribution.Man
if err == nil {
if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Named(), sm, options...); err != nil {
- logrus.Errorf("error dispatching manifest push to listener: %v", err)
+ context.GetLogger(ctx).Errorf("error dispatching manifest push to listener: %v", err)
}
}
From b72d74464aef6cedae39c73e63255dd13f9afa3d Mon Sep 17 00:00:00 2001
From: Richard Scothern
Date: Fri, 15 Apr 2016 09:22:44 -0700
Subject: [PATCH 009/546] Correction for JSON example.
Signed-off-by: Richard Scothern
---
docs/storage-drivers/s3.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/storage-drivers/s3.md b/docs/storage-drivers/s3.md
index 2a7aa9f5..d78fc988 100644
--- a/docs/storage-drivers/s3.md
+++ b/docs/storage-drivers/s3.md
@@ -198,7 +198,7 @@ The following IAM permissions are required by the registry for push and pull. S
"Action": [
"s3:ListBucket",
"s3:GetBucketLocation",
- "s3:ListBucketMultipartUploads",
+ "s3:ListBucketMultipartUploads"
],
"Resource": "arn:aws:s3:::mybucket"
},
From 17756eb43e8bf7946de521a284514b860e1e9b1e Mon Sep 17 00:00:00 2001
From: Fabio Huser
Date: Sun, 17 Apr 2016 12:04:15 +0200
Subject: [PATCH 010/546] Clarify kid format for JWT token auth in docs
The kid value can have an arbitrary format according JOSE specification, but Docker distribution expects a specific format (libtrust fingerprint) to work. This is not written in the documentation so far and is only mentioned in the libtrust source code itself.
Signed-off-by: Fabio Huser
---
docs/spec/auth/jwt.md | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/docs/spec/auth/jwt.md b/docs/spec/auth/jwt.md
index f627b17a..87de62af 100644
--- a/docs/spec/auth/jwt.md
+++ b/docs/spec/auth/jwt.md
@@ -69,8 +69,17 @@ Token has 3 main parts:
The header of a JSON Web Token is a standard JOSE header. The "typ" field
will be "JWT" and it will also contain the "alg" which identifies the
- signing algorithm used to produce the signature. It will also usually have
- a "kid" field, the ID of the key which was used to sign the token.
+ signing algorithm used to produce the signature. It also must have a "kid"
+ field, representing the ID of the key which was used to sign the token.
+
+ The "kid" field has to be in a libtrust fingerprint compatible format.
+ Such a format can be generated by following steps:
+
+ 1. Take the DER encoded public key which the JWT token was signed against.
+
+ 2. Create a SHA256 hash out of it and truncate to 240bits.
+
+ 3. Split the result into 12 base32 encoded groups with `:` as delimiter.
Here is an example JOSE Header for a JSON Web Token (formatted with
whitespace for readability):
From b51607f9f01576fa9cc4821ee7fb3f40171f1d64 Mon Sep 17 00:00:00 2001
From: Nikita Tarasov
Date: Wed, 13 Apr 2016 19:06:33 +0300
Subject: [PATCH 011/546] fix test Signed-off-by: Nikita Tarasov
Signed-off-by: Nikita Tarasov
---
registry/storage/driver/swift/swift_test.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/registry/storage/driver/swift/swift_test.go b/registry/storage/driver/swift/swift_test.go
index bffd54e8..b4f1c738 100644
--- a/registry/storage/driver/swift/swift_test.go
+++ b/registry/storage/driver/swift/swift_test.go
@@ -53,7 +53,7 @@ func init() {
trustID = os.Getenv("SWIFT_TRUST_ID")
container = os.Getenv("SWIFT_CONTAINER_NAME")
region = os.Getenv("SWIFT_REGION_NAME")
- AuthVersion = os.Getenv("SWIFT_AUTH_VERSION")
+ AuthVersion = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION"))
insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY"))
secretKey = os.Getenv("SWIFT_SECRET_KEY")
accessKey = os.Getenv("SWIFT_ACCESS_KEY")
From b4f060599ad885aad25dcf94b04dba4673b49143 Mon Sep 17 00:00:00 2001
From: Nikita Tarasov
Date: Sun, 17 Apr 2016 20:05:51 +0300
Subject: [PATCH 012/546] docs + fix test Signed-off-by: Nikita Tarasov
---
docs/storage-drivers/swift.md | 10 ++++++++++
registry/storage/driver/swift/swift_test.go | 2 +-
2 files changed, 11 insertions(+), 1 deletion(-)
diff --git a/docs/storage-drivers/swift.md b/docs/storage-drivers/swift.md
index cab0bbd2..28e87d4d 100644
--- a/docs/storage-drivers/swift.md
+++ b/docs/storage-drivers/swift.md
@@ -298,6 +298,16 @@ An implementation of the `storagedriver.StorageDriver` interface that uses [Open
+
+
+ authversion
+ |
+
+
+ Optionally, specify the OpenStack Auth's version,for example 3 . The driver use 0 (autodetect) by default.
+
+ |
+
chunksize
diff --git a/registry/storage/driver/swift/swift_test.go b/registry/storage/driver/swift/swift_test.go
index b4f1c738..655aa996 100644
--- a/registry/storage/driver/swift/swift_test.go
+++ b/registry/storage/driver/swift/swift_test.go
@@ -53,7 +53,7 @@ func init() {
trustID = os.Getenv("SWIFT_TRUST_ID")
container = os.Getenv("SWIFT_CONTAINER_NAME")
region = os.Getenv("SWIFT_REGION_NAME")
- AuthVersion = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION"))
+ AuthVersion, _ = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION"))
insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY"))
secretKey = os.Getenv("SWIFT_SECRET_KEY")
accessKey = os.Getenv("SWIFT_ACCESS_KEY")
From 9a67520af7158c32e338be5ef214d59802bec412 Mon Sep 17 00:00:00 2001
From: Stefan Majewsky
Date: Tue, 19 Apr 2016 13:48:08 +0200
Subject: [PATCH 013/546] wait for DLO segments to show up when Close()ing the
writer
Not just when Commit()ing the result. This fixes some errors I observed
when the layer (i.e. the DLO) is Stat()ed immediately after closing,
and reports the wrong file size because the container listing is not
yet up-to-date.
Signed-off-by: Stefan Majewsky
---
registry/storage/driver/swift/swift.go | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/registry/storage/driver/swift/swift.go b/registry/storage/driver/swift/swift.go
index 91384828..0cc037af 100644
--- a/registry/storage/driver/swift/swift.go
+++ b/registry/storage/driver/swift/swift.go
@@ -742,6 +742,9 @@ func (w *writer) Close() error {
if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil {
return err
}
+ if err := w.waitForSegmentsToShowUp(); err != nil {
+ return err
+ }
}
w.closed = true
@@ -776,10 +779,14 @@ func (w *writer) Commit() error {
}
w.committed = true
+ return w.waitForSegmentsToShowUp()
+}
+func (w *writer) waitForSegmentsToShowUp() error {
var err error
waitingTime := readAfterWriteWait
endTime := time.Now().Add(readAfterWriteTimeout)
+
for {
var info swift.Object
if info, _, err = w.driver.Conn.Object(w.driver.Container, w.driver.swiftPath(w.path)); err == nil {
From f76c622d8c80eece81f103e0b56cc9db8a0bc726 Mon Sep 17 00:00:00 2001
From: jhaohai
Date: Thu, 21 Apr 2016 11:51:34 +0800
Subject: [PATCH 014/546] add cn-north-1 to valid check
Signed-off-by: jhaohai
---
registry/storage/driver/s3-aws/s3.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go
index f7facb61..bfaa6ba9 100644
--- a/registry/storage/driver/s3-aws/s3.go
+++ b/registry/storage/driver/s3-aws/s3.go
@@ -80,6 +80,7 @@ func init() {
"ap-northeast-1",
"ap-northeast-2",
"sa-east-1",
+ "cn-north-1",
} {
validRegions[region] = struct{}{}
}
From 4b217ccbf566e6e38f2c62292ebd980d920fbcda Mon Sep 17 00:00:00 2001
From: Andrew Hsu
Date: Thu, 21 Apr 2016 15:54:48 -0700
Subject: [PATCH 015/546] add middleware storage driver for redirect
Signed-off-by: Andrew Hsu (github: andrewhsu)
---
.../driver/middleware/redirect/middleware.go | 47 ++++++++++++++
.../middleware/redirect/middleware_test.go | 62 +++++++++++++++++++
2 files changed, 109 insertions(+)
create mode 100644 registry/storage/driver/middleware/redirect/middleware.go
create mode 100644 registry/storage/driver/middleware/redirect/middleware_test.go
diff --git a/registry/storage/driver/middleware/redirect/middleware.go b/registry/storage/driver/middleware/redirect/middleware.go
new file mode 100644
index 00000000..286a84ab
--- /dev/null
+++ b/registry/storage/driver/middleware/redirect/middleware.go
@@ -0,0 +1,47 @@
+package middleware
+
+import (
+ "fmt"
+ "github.com/docker/distribution/context"
+ storagedriver "github.com/docker/distribution/registry/storage/driver"
+ storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware"
+ "net/url"
+ "strings"
+)
+
+type redirectStorageMiddleware struct {
+ storagedriver.StorageDriver
+ scheme string
+ host string
+}
+
+var _ storagedriver.StorageDriver = &redirectStorageMiddleware{}
+
+func newRedirectStorageMiddleware(sd storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) {
+ o, ok := options["baseurl"]
+ if !ok {
+ return nil, fmt.Errorf("no baseurl provided")
+ }
+ b, ok := o.(string)
+ if !ok {
+ return nil, fmt.Errorf("baseurl must be a string")
+ }
+ if !strings.Contains(b, "://") {
+ b = "https://" + b
+ }
+ u, err := url.Parse(b)
+ if err != nil {
+ return nil, fmt.Errorf("invalid baseurl: %v", err)
+ }
+
+ return &redirectStorageMiddleware{StorageDriver: sd, scheme: u.Scheme, host: u.Host}, nil
+}
+
+func (r *redirectStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
+ u := &url.URL{Scheme: r.scheme, Host: r.host, Path: path}
+ return u.String(), nil
+}
+
+func init() {
+ storagemiddleware.Register("redirect", storagemiddleware.InitFunc(newRedirectStorageMiddleware))
+}
diff --git a/registry/storage/driver/middleware/redirect/middleware_test.go b/registry/storage/driver/middleware/redirect/middleware_test.go
new file mode 100644
index 00000000..31b661b6
--- /dev/null
+++ b/registry/storage/driver/middleware/redirect/middleware_test.go
@@ -0,0 +1,62 @@
+package middleware
+
+import (
+ check "gopkg.in/check.v1"
+ "testing"
+)
+
+func Test(t *testing.T) { check.TestingT(t) }
+
+type MiddlewareSuite struct{}
+
+var _ = check.Suite(&MiddlewareSuite{})
+
+func (s *MiddlewareSuite) TestNoConfig(c *check.C) {
+ options := make(map[string]interface{})
+ _, err := newRedirectStorageMiddleware(nil, options)
+ c.Assert(err, check.ErrorMatches, "no baseurl provided")
+}
+
+func (s *MiddlewareSuite) TestDefaultScheme(c *check.C) {
+ options := make(map[string]interface{})
+ options["baseurl"] = "example.com"
+ middleware, err := newRedirectStorageMiddleware(nil, options)
+ c.Assert(err, check.Equals, nil)
+
+ m, ok := middleware.(*redirectStorageMiddleware)
+ c.Assert(ok, check.Equals, true)
+ c.Assert(m.scheme, check.Equals, "https")
+ c.Assert(m.host, check.Equals, "example.com")
+}
+
+func (s *MiddlewareSuite) TestHTTPS(c *check.C) {
+ options := make(map[string]interface{})
+ options["baseurl"] = "https://example.com"
+ middleware, err := newRedirectStorageMiddleware(nil, options)
+ c.Assert(err, check.Equals, nil)
+
+ m, ok := middleware.(*redirectStorageMiddleware)
+ c.Assert(ok, check.Equals, true)
+ c.Assert(m.scheme, check.Equals, "https")
+ c.Assert(m.host, check.Equals, "example.com")
+
+ url, err := middleware.URLFor(nil, "/rick/data", nil)
+ c.Assert(err, check.Equals, nil)
+ c.Assert(url, check.Equals, "https://example.com/rick/data")
+}
+
+func (s *MiddlewareSuite) TestHTTP(c *check.C) {
+ options := make(map[string]interface{})
+ options["baseurl"] = "http://example.com"
+ middleware, err := newRedirectStorageMiddleware(nil, options)
+ c.Assert(err, check.Equals, nil)
+
+ m, ok := middleware.(*redirectStorageMiddleware)
+ c.Assert(ok, check.Equals, true)
+ c.Assert(m.scheme, check.Equals, "http")
+ c.Assert(m.host, check.Equals, "example.com")
+
+ url, err := middleware.URLFor(nil, "morty/data", nil)
+ c.Assert(err, check.Equals, nil)
+ c.Assert(url, check.Equals, "http://example.com/morty/data")
+}
From 21f38a74e69b4616a6af1e4a4982205f5626b335 Mon Sep 17 00:00:00 2001
From: Serge Dubrouski
Date: Thu, 21 Apr 2016 20:04:22 -0600
Subject: [PATCH 016/546] Add blobWrtiter.Close() call into blobWriter.Commit()
Signed-off-by: Serge Dubrouski
---
registry/storage/blobwriter.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/registry/storage/blobwriter.go b/registry/storage/blobwriter.go
index 7f280d36..418df818 100644
--- a/registry/storage/blobwriter.go
+++ b/registry/storage/blobwriter.go
@@ -56,6 +56,8 @@ func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor)
return distribution.Descriptor{}, err
}
+ bw.Close()
+
canonical, err := bw.validateBlob(ctx, desc)
if err != nil {
return distribution.Descriptor{}, err
From 987faca8a63a462ba34b063d8f8ee9aa09d1beb9 Mon Sep 17 00:00:00 2001
From: Anis Elleuch
Date: Sat, 23 Apr 2016 11:13:15 +0100
Subject: [PATCH 017/546] Sorting completed parts by part number for a better
accordance with the S3 spec
Signed-off-by: Anis Elleuch
---
registry/storage/driver/s3-aws/s3.go | 26 ++++++++++++++++++++------
1 file changed, 20 insertions(+), 6 deletions(-)
diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go
index f7facb61..4122a4af 100644
--- a/registry/storage/driver/s3-aws/s3.go
+++ b/registry/storage/driver/s3-aws/s3.go
@@ -18,6 +18,7 @@ import (
"io/ioutil"
"net/http"
"reflect"
+ "sort"
"strconv"
"strings"
"time"
@@ -718,6 +719,12 @@ func (d *driver) newWriter(key, uploadID string, parts []*s3.Part) storagedriver
}
}
+type completedParts []*s3.CompletedPart
+
+func (a completedParts) Len() int { return len(a) }
+func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
+
func (w *writer) Write(p []byte) (int, error) {
if w.closed {
return 0, fmt.Errorf("already closed")
@@ -730,19 +737,22 @@ func (w *writer) Write(p []byte) (int, error) {
// If the last written part is smaller than minChunkSize, we need to make a
// new multipart upload :sadface:
if len(w.parts) > 0 && int(*w.parts[len(w.parts)-1].Size) < minChunkSize {
- var completedParts []*s3.CompletedPart
+ var completedUploadedParts completedParts
for _, part := range w.parts {
- completedParts = append(completedParts, &s3.CompletedPart{
+ completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
+
+ sort.Sort(completedUploadedParts)
+
_, err := w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
MultipartUpload: &s3.CompletedMultipartUpload{
- Parts: completedParts,
+ Parts: completedUploadedParts,
},
})
if err != nil {
@@ -882,19 +892,23 @@ func (w *writer) Commit() error {
return err
}
w.committed = true
- var completedParts []*s3.CompletedPart
+
+ var completedUploadedParts completedParts
for _, part := range w.parts {
- completedParts = append(completedParts, &s3.CompletedPart{
+ completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
+
+ sort.Sort(completedUploadedParts)
+
_, err = w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
MultipartUpload: &s3.CompletedMultipartUpload{
- Parts: completedParts,
+ Parts: completedUploadedParts,
},
})
if err != nil {
From 059bc5f5ef965b8b668c9fc61b5af0eda589d033 Mon Sep 17 00:00:00 2001
From: Andrew Hsu
Date: Mon, 25 Apr 2016 09:32:36 -0700
Subject: [PATCH 018/546] separate the go/non-go imports and reorder
Signed-off-by: Andrew Hsu (github: andrewhsu)
---
registry/storage/driver/middleware/redirect/middleware.go | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/registry/storage/driver/middleware/redirect/middleware.go b/registry/storage/driver/middleware/redirect/middleware.go
index 286a84ab..a806bc0f 100644
--- a/registry/storage/driver/middleware/redirect/middleware.go
+++ b/registry/storage/driver/middleware/redirect/middleware.go
@@ -2,11 +2,12 @@ package middleware
import (
"fmt"
+ "net/url"
+ "strings"
+
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware"
- "net/url"
- "strings"
)
type redirectStorageMiddleware struct {
From 80248c3d3aabec255bd3e2c4132b985b467931ee Mon Sep 17 00:00:00 2001
From: Andrew Hsu
Date: Mon, 25 Apr 2016 10:28:32 -0700
Subject: [PATCH 019/546] scheme and host mandatory in baseurl
Signed-off-by: Andrew Hsu (github: andrewhsu)
---
.../storage/driver/middleware/redirect/middleware.go | 12 +++++++-----
.../driver/middleware/redirect/middleware_test.go | 11 +++--------
2 files changed, 10 insertions(+), 13 deletions(-)
diff --git a/registry/storage/driver/middleware/redirect/middleware.go b/registry/storage/driver/middleware/redirect/middleware.go
index a806bc0f..20cd7daa 100644
--- a/registry/storage/driver/middleware/redirect/middleware.go
+++ b/registry/storage/driver/middleware/redirect/middleware.go
@@ -3,7 +3,6 @@ package middleware
import (
"fmt"
"net/url"
- "strings"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
@@ -27,12 +26,15 @@ func newRedirectStorageMiddleware(sd storagedriver.StorageDriver, options map[st
if !ok {
return nil, fmt.Errorf("baseurl must be a string")
}
- if !strings.Contains(b, "://") {
- b = "https://" + b
- }
u, err := url.Parse(b)
if err != nil {
- return nil, fmt.Errorf("invalid baseurl: %v", err)
+ return nil, fmt.Errorf("unable to parse redirect baseurl: %s", b)
+ }
+ if u.Scheme == "" {
+ return nil, fmt.Errorf("no scheme specified for redirect baseurl")
+ }
+ if u.Host == "" {
+ return nil, fmt.Errorf("no host specified for redirect baseurl")
}
return &redirectStorageMiddleware{StorageDriver: sd, scheme: u.Scheme, host: u.Host}, nil
diff --git a/registry/storage/driver/middleware/redirect/middleware_test.go b/registry/storage/driver/middleware/redirect/middleware_test.go
index 31b661b6..5fffafba 100644
--- a/registry/storage/driver/middleware/redirect/middleware_test.go
+++ b/registry/storage/driver/middleware/redirect/middleware_test.go
@@ -17,16 +17,11 @@ func (s *MiddlewareSuite) TestNoConfig(c *check.C) {
c.Assert(err, check.ErrorMatches, "no baseurl provided")
}
-func (s *MiddlewareSuite) TestDefaultScheme(c *check.C) {
+func (s *MiddlewareSuite) TestMissingScheme(c *check.C) {
options := make(map[string]interface{})
options["baseurl"] = "example.com"
- middleware, err := newRedirectStorageMiddleware(nil, options)
- c.Assert(err, check.Equals, nil)
-
- m, ok := middleware.(*redirectStorageMiddleware)
- c.Assert(ok, check.Equals, true)
- c.Assert(m.scheme, check.Equals, "https")
- c.Assert(m.host, check.Equals, "example.com")
+ _, err := newRedirectStorageMiddleware(nil, options)
+ c.Assert(err, check.ErrorMatches, "no scheme specified for redirect baseurl")
}
func (s *MiddlewareSuite) TestHTTPS(c *check.C) {
From fe9509f8f3bd075a46b57f1bd86bec63a96e02fb Mon Sep 17 00:00:00 2001
From: Andrew Hsu
Date: Mon, 25 Apr 2016 11:31:02 -0700
Subject: [PATCH 020/546] added config doc for redirect middleware
Signed-off-by: Andrew Hsu (github: andrewhsu)
---
docs/configuration.md | 18 +++++++++++++++---
1 file changed, 15 insertions(+), 3 deletions(-)
diff --git a/docs/configuration.md b/docs/configuration.md
index f9b89feb..91d85231 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -165,6 +165,10 @@ information about each option that appears later in this page.
privatekey: /path/to/pem
keypairid: cloudfrontkeypairid
duration: 3000s
+ storage:
+ - name: redirect
+ options:
+ baseurl: https://example.com/
reporting:
bugsnag:
apikey: bugsnagapikey
@@ -676,8 +680,7 @@ object they're wrapping. This means a registry middleware must implement the
`distribution.Repository`, and storage middleware must implement
`driver.StorageDriver`.
-Currently only one middleware, `cloudfront`, a storage middleware, is supported
-in the registry implementation.
+An example configuration of the `cloudfront` middleware, a storage middleware:
middleware:
registry:
@@ -758,6 +761,15 @@ interpretation of the options.
|
+### redirect
+
+In place of the `cloudfront` storage middleware, the `redirect`
+storage middleware can be used to specify a custom URL to a location
+of a proxy for the layer stored by the S3 storage driver.
+
+| Parameter | Required | Description |
+| --- | --- | --- |
+| baseurl | yes | `SCHEME://HOST` at which layers are served. Can also contain port. For example, `https://example.com:5443`. |
## reporting
@@ -1794,7 +1806,7 @@ This example illustrates how to configure storage middleware in a registry.
Middleware allows the registry to serve layers via a content delivery network
(CDN). This is useful for reducing requests to the storage layer.
-Currently, the registry supports [Amazon
+The registry supports [Amazon
Cloudfront](http://aws.amazon.com/cloudfront/). You can only use Cloudfront in
conjunction with the S3 storage driver.
From c4df027d41ce5af9b87aef02f08241216f3f3524 Mon Sep 17 00:00:00 2001
From: Andrew Hsu
Date: Mon, 25 Apr 2016 11:40:21 -0700
Subject: [PATCH 021/546] modify redirect test to include port
Signed-off-by: Andrew Hsu (github: andrewhsu)
---
.../storage/driver/middleware/redirect/middleware_test.go | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/registry/storage/driver/middleware/redirect/middleware_test.go b/registry/storage/driver/middleware/redirect/middleware_test.go
index 5fffafba..82f4a561 100644
--- a/registry/storage/driver/middleware/redirect/middleware_test.go
+++ b/registry/storage/driver/middleware/redirect/middleware_test.go
@@ -24,20 +24,20 @@ func (s *MiddlewareSuite) TestMissingScheme(c *check.C) {
c.Assert(err, check.ErrorMatches, "no scheme specified for redirect baseurl")
}
-func (s *MiddlewareSuite) TestHTTPS(c *check.C) {
+func (s *MiddlewareSuite) TestHttpsPort(c *check.C) {
options := make(map[string]interface{})
- options["baseurl"] = "https://example.com"
+ options["baseurl"] = "https://example.com:5443"
middleware, err := newRedirectStorageMiddleware(nil, options)
c.Assert(err, check.Equals, nil)
m, ok := middleware.(*redirectStorageMiddleware)
c.Assert(ok, check.Equals, true)
c.Assert(m.scheme, check.Equals, "https")
- c.Assert(m.host, check.Equals, "example.com")
+ c.Assert(m.host, check.Equals, "example.com:5443")
url, err := middleware.URLFor(nil, "/rick/data", nil)
c.Assert(err, check.Equals, nil)
- c.Assert(url, check.Equals, "https://example.com/rick/data")
+ c.Assert(url, check.Equals, "https://example.com:5443/rick/data")
}
func (s *MiddlewareSuite) TestHTTP(c *check.C) {
From 09a9b0cf902d23a84506934a652257e0370398a1 Mon Sep 17 00:00:00 2001
From: Andrew Hsu
Date: Tue, 26 Apr 2016 14:33:54 -0700
Subject: [PATCH 022/546] separate the go/non-go imports and reorder
Signed-off-by: Andrew Hsu (github: andrewhsu)
---
registry/storage/driver/middleware/redirect/middleware_test.go | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/registry/storage/driver/middleware/redirect/middleware_test.go b/registry/storage/driver/middleware/redirect/middleware_test.go
index 82f4a561..1eb6309f 100644
--- a/registry/storage/driver/middleware/redirect/middleware_test.go
+++ b/registry/storage/driver/middleware/redirect/middleware_test.go
@@ -1,8 +1,9 @@
package middleware
import (
- check "gopkg.in/check.v1"
"testing"
+
+ check "gopkg.in/check.v1"
)
func Test(t *testing.T) { check.TestingT(t) }
From e4dd3359cc3171b98ae34edd8a0a1a19e6d61a6a Mon Sep 17 00:00:00 2001
From: Josh Hawn
Date: Sat, 27 Feb 2016 15:37:07 -0800
Subject: [PATCH 023/546] Regulate filesystem driver to max of 100 calls
It's easily possible for a flood of requests to trigger thousands of
concurrent file accesses on the storage driver. Each file I/O call creates
a new OS thread that is not reaped by the Golang runtime. By limiting it
to only 100 at a time we can effectively bound the number of OS threads
in use by the storage driver.
Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn)
Signed-off-by: Tony Holdstock-Brown
---
registry/storage/driver/base/regulator.go | 150 +++++++++++++++++++
registry/storage/driver/filesystem/driver.go | 6 +-
2 files changed, 153 insertions(+), 3 deletions(-)
create mode 100644 registry/storage/driver/base/regulator.go
diff --git a/registry/storage/driver/base/regulator.go b/registry/storage/driver/base/regulator.go
new file mode 100644
index 00000000..21ddfe57
--- /dev/null
+++ b/registry/storage/driver/base/regulator.go
@@ -0,0 +1,150 @@
+package base
+
+import (
+ "io"
+ "sync"
+
+ "github.com/docker/distribution/context"
+ storagedriver "github.com/docker/distribution/registry/storage/driver"
+)
+
+type regulator struct {
+ storagedriver.StorageDriver
+ sync.Cond
+
+ available uint
+}
+
+// NewRegulator wraps the given driver and is used to regulate concurrent calls
+// to the given storage driver to a maximum of the given limit. This is useful
+// for storage drivers that would otherwise create an unbounded number of OS
+// threads if allowed to be called unregulated.
+func NewRegulator(driver storagedriver.StorageDriver, limit uint) storagedriver.StorageDriver {
+ return ®ulator{
+ StorageDriver: driver,
+ Cond: sync.Cond{
+ L: &sync.Mutex{},
+ },
+ available: limit,
+ }
+}
+
+func (r *regulator) condition() bool {
+ return r.available > 0
+}
+
+func (r *regulator) enter() {
+ r.L.Lock()
+ defer r.L.Unlock()
+
+ for !r.condition() {
+ r.Wait()
+ }
+
+ r.available--
+}
+
+func (r *regulator) exit() {
+ r.L.Lock()
+ defer r.Signal()
+ defer r.L.Unlock()
+
+ r.available++
+}
+
+// Name returns the human-readable "name" of the driver, useful in error
+// messages and logging. By convention, this will just be the registration
+// name, but drivers may provide other information here.
+func (r *regulator) Name() string {
+ r.enter()
+ defer r.exit()
+
+ return r.StorageDriver.Name()
+}
+
+// GetContent retrieves the content stored at "path" as a []byte.
+// This should primarily be used for small objects.
+func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) {
+ r.enter()
+ defer r.exit()
+
+ return r.StorageDriver.GetContent(ctx, path)
+}
+
+// PutContent stores the []byte content at a location designated by "path".
+// This should primarily be used for small objects.
+func (r *regulator) PutContent(ctx context.Context, path string, content []byte) error {
+ r.enter()
+ defer r.exit()
+
+ return r.StorageDriver.PutContent(ctx, path, content)
+}
+
+// ReadStream retrieves an io.ReadCloser for the content stored at "path"
+// with a given byte offset.
+// May be used to resume reading a stream by providing a nonzero offset.
+func (r *regulator) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
+ r.enter()
+ defer r.exit()
+
+ return r.StorageDriver.ReadStream(ctx, path, offset)
+}
+
+// WriteStream stores the contents of the provided io.ReadCloser at a
+// location designated by the given path.
+// May be used to resume writing a stream by providing a nonzero offset.
+// The offset must be no larger than the CurrentSize for this path.
+func (r *regulator) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) {
+ r.enter()
+ defer r.exit()
+
+ return r.StorageDriver.WriteStream(ctx, path, offset, reader)
+}
+
+// Stat retrieves the FileInfo for the given path, including the current
+// size in bytes and the creation time.
+func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
+ r.enter()
+ defer r.exit()
+
+ return r.StorageDriver.Stat(ctx, path)
+}
+
+// List returns a list of the objects that are direct descendants of the
+//given path.
+func (r *regulator) List(ctx context.Context, path string) ([]string, error) {
+ r.enter()
+ defer r.exit()
+
+ return r.StorageDriver.List(ctx, path)
+}
+
+// Move moves an object stored at sourcePath to destPath, removing the
+// original object.
+// Note: This may be no more efficient than a copy followed by a delete for
+// many implementations.
+func (r *regulator) Move(ctx context.Context, sourcePath string, destPath string) error {
+ r.enter()
+ defer r.exit()
+
+ return r.StorageDriver.Move(ctx, sourcePath, destPath)
+}
+
+// Delete recursively deletes all objects stored at "path" and its subpaths.
+func (r *regulator) Delete(ctx context.Context, path string) error {
+ r.enter()
+ defer r.exit()
+
+ return r.StorageDriver.Delete(ctx, path)
+}
+
+// URLFor returns a URL which may be used to retrieve the content stored at
+// the given path, possibly using the given options.
+// May return an ErrUnsupportedMethod in certain StorageDriver
+// implementations.
+func (r *regulator) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
+ r.enter()
+ defer r.exit()
+
+ return r.StorageDriver.URLFor(ctx, path, options)
+}
diff --git a/registry/storage/driver/filesystem/driver.go b/registry/storage/driver/filesystem/driver.go
index 3bbdc637..e22e9809 100644
--- a/registry/storage/driver/filesystem/driver.go
+++ b/registry/storage/driver/filesystem/driver.go
@@ -60,12 +60,12 @@ func FromParameters(parameters map[string]interface{}) *Driver {
// New constructs a new Driver with a given rootDirectory
func New(rootDirectory string) *Driver {
+ fsDriver := &driver{rootDirectory: rootDirectory}
+
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
- StorageDriver: &driver{
- rootDirectory: rootDirectory,
- },
+ StorageDriver: base.NewRegulator(fsDriver, 100),
},
},
}
From 32193bdcf07df973aa25372f00fcdf249f6036ff Mon Sep 17 00:00:00 2001
From: Serge Dubrouski
Date: Tue, 26 Apr 2016 19:44:23 -0600
Subject: [PATCH 024/546] Fix wording for dry-run flag in useage message for
garbage collector.
Signed-off-by: Serge Dubrouski
---
registry/garbagecollect.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/registry/garbagecollect.go b/registry/garbagecollect.go
index 1be4546d..7e1d97d9 100644
--- a/registry/garbagecollect.go
+++ b/registry/garbagecollect.go
@@ -135,7 +135,7 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis
}
func init() {
- GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything expect remove the blobs")
+ GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs")
}
var dryRun bool
From a5aaae1f0620f3a110232821d44ecd0505b3d496 Mon Sep 17 00:00:00 2001
From: Tony Holdstock-Brown
Date: Mon, 25 Apr 2016 21:14:00 -0700
Subject: [PATCH 025/546] Ensure GC continues marking if _manifests is
nonexistent
Signed-off-by: Tony Holdstock-Brown
---
registry/garbagecollect.go | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/registry/garbagecollect.go b/registry/garbagecollect.go
index 7e1d97d9..65d432e0 100644
--- a/registry/garbagecollect.go
+++ b/registry/garbagecollect.go
@@ -96,6 +96,17 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis
return nil
})
+ if err != nil {
+ // In certain situations such as unfinished uploads, deleting all
+ // tags in S3 or removing the _manifests folder manually, this
+ // error may be of type PathNotFound.
+ //
+ // In these cases we can continue marking other manifests safely.
+ if _, ok := err.(driver.PathNotFoundError); ok {
+ return nil
+ }
+ }
+
return err
})
From ea492aca1aacfdc53568ed948a7cee8658d1c813 Mon Sep 17 00:00:00 2001
From: Richard Scothern
Date: Wed, 27 Apr 2016 11:49:01 -0700
Subject: [PATCH 026/546] Move garbage collect code into storage package
Signed-off-by: Richard Scothern
---
registry/root.go | 56 +++++++++++
registry/{ => storage}/garbagecollect.go | 96 +++++--------------
registry/{ => storage}/garbagecollect_test.go | 11 +--
3 files changed, 85 insertions(+), 78 deletions(-)
rename registry/{ => storage}/garbagecollect.go (60%)
rename registry/{ => storage}/garbagecollect_test.go (96%)
diff --git a/registry/root.go b/registry/root.go
index 46338b46..7a7d44cb 100644
--- a/registry/root.go
+++ b/registry/root.go
@@ -1,7 +1,14 @@
package registry
import (
+ "fmt"
+ "os"
+
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/registry/storage"
+ "github.com/docker/distribution/registry/storage/driver/factory"
"github.com/docker/distribution/version"
+ "github.com/docker/libtrust"
"github.com/spf13/cobra"
)
@@ -10,6 +17,7 @@ var showVersion bool
func init() {
RootCmd.AddCommand(ServeCmd)
RootCmd.AddCommand(GCCmd)
+ GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs")
RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit")
}
@@ -26,3 +34,51 @@ var RootCmd = &cobra.Command{
cmd.Usage()
},
}
+
+var dryRun bool
+
+// GCCmd is the cobra command that corresponds to the garbage-collect subcommand
+var GCCmd = &cobra.Command{
+ Use: "garbage-collect ",
+ Short: "`garbage-collect` deletes layers not referenced by any manifests",
+ Long: "`garbage-collect` deletes layers not referenced by any manifests",
+ Run: func(cmd *cobra.Command, args []string) {
+ config, err := resolveConfiguration(args)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
+ cmd.Usage()
+ os.Exit(1)
+ }
+
+ driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters())
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err)
+ os.Exit(1)
+ }
+
+ ctx := context.Background()
+ ctx, err = configureLogging(ctx, config)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err)
+ os.Exit(1)
+ }
+
+ k, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ os.Exit(1)
+ }
+
+ registry, err := storage.NewRegistry(ctx, driver, storage.DisableSchema1Signatures, storage.Schema1SigningKey(k))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err)
+ os.Exit(1)
+ }
+
+ err = storage.MarkAndSweep(ctx, driver, registry, dryRun)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err)
+ os.Exit(1)
+ }
+ },
+}
diff --git a/registry/garbagecollect.go b/registry/storage/garbagecollect.go
similarity index 60%
rename from registry/garbagecollect.go
rename to registry/storage/garbagecollect.go
index 65d432e0..be64b847 100644
--- a/registry/garbagecollect.go
+++ b/registry/storage/garbagecollect.go
@@ -1,8 +1,7 @@
-package registry
+package storage
import (
"fmt"
- "os"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
@@ -10,21 +9,15 @@ import (
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/distribution/reference"
- "github.com/docker/distribution/registry/storage"
"github.com/docker/distribution/registry/storage/driver"
- "github.com/docker/distribution/registry/storage/driver/factory"
- "github.com/docker/libtrust"
- "github.com/spf13/cobra"
)
func emit(format string, a ...interface{}) {
- if dryRun {
- fmt.Printf(format+"\n", a...)
- }
+ fmt.Printf(format+"\n", a...)
}
-func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace) error {
-
+// MarkAndSweep performs a mark and sweep of registry data
+func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, dryRun bool) error {
repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator)
if !ok {
return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator")
@@ -33,7 +26,9 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis
// mark
markSet := make(map[digest.Digest]struct{})
err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error {
- emit(repoName)
+ if dryRun {
+ emit(repoName)
+ }
var err error
named, err := reference.ParseNamed(repoName)
@@ -57,7 +52,9 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis
err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error {
// Mark the manifest's blob
- emit("%s: marking manifest %s ", repoName, dgst)
+ if dryRun {
+ emit("%s: marking manifest %s ", repoName, dgst)
+ }
markSet[dgst] = struct{}{}
manifest, err := manifestService.Get(ctx, dgst)
@@ -68,7 +65,9 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis
descriptors := manifest.References()
for _, descriptor := range descriptors {
markSet[descriptor.Digest] = struct{}{}
- emit("%s: marking blob %s", repoName, descriptor.Digest)
+ if dryRun {
+ emit("%s: marking blob %s", repoName, descriptor.Digest)
+ }
}
switch manifest.(type) {
@@ -82,13 +81,17 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis
return fmt.Errorf("failed to get signatures for signed manifest: %v", err)
}
for _, signatureDigest := range signatures {
- emit("%s: marking signature %s", repoName, signatureDigest)
+ if dryRun {
+ emit("%s: marking signature %s", repoName, signatureDigest)
+ }
markSet[signatureDigest] = struct{}{}
}
break
case *schema2.DeserializedManifest:
config := manifest.(*schema2.DeserializedManifest).Config
- emit("%s: marking configuration %s", repoName, config.Digest)
+ if dryRun {
+ emit("%s: marking configuration %s", repoName, config.Digest)
+ }
markSet[config.Digest] = struct{}{}
break
}
@@ -127,13 +130,14 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis
if err != nil {
return fmt.Errorf("error enumerating blobs: %v", err)
}
-
- emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet))
+ if dryRun {
+ emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet))
+ }
// Construct vacuum
- vacuum := storage.NewVacuum(ctx, storageDriver)
+ vacuum := NewVacuum(ctx, storageDriver)
for dgst := range deleteSet {
- emit("blob eligible for deletion: %s", dgst)
if dryRun {
+ emit("blob eligible for deletion: %s", dgst)
continue
}
err = vacuum.RemoveBlob(string(dgst))
@@ -144,55 +148,3 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis
return err
}
-
-func init() {
- GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs")
-}
-
-var dryRun bool
-
-// GCCmd is the cobra command that corresponds to the garbage-collect subcommand
-var GCCmd = &cobra.Command{
- Use: "garbage-collect ",
- Short: "`garbage-collect` deletes layers not referenced by any manifests",
- Long: "`garbage-collect` deletes layers not referenced by any manifests",
- Run: func(cmd *cobra.Command, args []string) {
- config, err := resolveConfiguration(args)
- if err != nil {
- fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
- cmd.Usage()
- os.Exit(1)
- }
-
- driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters())
- if err != nil {
- fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err)
- os.Exit(1)
- }
-
- ctx := context.Background()
- ctx, err = configureLogging(ctx, config)
- if err != nil {
- fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err)
- os.Exit(1)
- }
-
- k, err := libtrust.GenerateECP256PrivateKey()
- if err != nil {
- fmt.Fprint(os.Stderr, err)
- os.Exit(1)
- }
-
- registry, err := storage.NewRegistry(ctx, driver, storage.DisableSchema1Signatures, storage.Schema1SigningKey(k))
- if err != nil {
- fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err)
- os.Exit(1)
- }
-
- err = markAndSweep(ctx, driver, registry)
- if err != nil {
- fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err)
- os.Exit(1)
- }
- },
-}
diff --git a/registry/garbagecollect_test.go b/registry/storage/garbagecollect_test.go
similarity index 96%
rename from registry/garbagecollect_test.go
rename to registry/storage/garbagecollect_test.go
index dd5fadd5..ff4a3df2 100644
--- a/registry/garbagecollect_test.go
+++ b/registry/storage/garbagecollect_test.go
@@ -1,4 +1,4 @@
-package registry
+package storage
import (
"io"
@@ -8,7 +8,6 @@ import (
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
- "github.com/docker/distribution/registry/storage"
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/inmemory"
"github.com/docker/distribution/testutil"
@@ -22,7 +21,7 @@ type image struct {
func createRegistry(t *testing.T, driver driver.StorageDriver) distribution.Namespace {
ctx := context.Background()
- registry, err := storage.NewRegistry(ctx, driver, storage.EnableDelete)
+ registry, err := NewRegistry(ctx, driver, EnableDelete)
if err != nil {
t.Fatalf("Failed to construct namespace")
}
@@ -161,7 +160,7 @@ func TestNoDeletionNoEffect(t *testing.T) {
}
// Run GC
- err = markAndSweep(context.Background(), inmemoryDriver, registry)
+ err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false)
if err != nil {
t.Fatalf("Failed mark and sweep: %v", err)
}
@@ -193,7 +192,7 @@ func TestDeletionHasEffect(t *testing.T) {
manifests.Delete(ctx, image3.manifestDigest)
// Run GC
- err = markAndSweep(context.Background(), inmemoryDriver, registry)
+ err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false)
if err != nil {
t.Fatalf("Failed mark and sweep: %v", err)
}
@@ -327,7 +326,7 @@ func TestOrphanBlobDeleted(t *testing.T) {
uploadRandomSchema2Image(t, repo)
// Run GC
- err = markAndSweep(context.Background(), inmemoryDriver, registry)
+ err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false)
if err != nil {
t.Fatalf("Failed mark and sweep: %v", err)
}
From 69ba30dc03fc1e6e0cc257752eb38a9c9dcdec28 Mon Sep 17 00:00:00 2001
From: Richard Scothern
Date: Wed, 27 Apr 2016 13:24:22 -0700
Subject: [PATCH 027/546] Add a test with a missing _manifests directory
Signed-off-by: Richard Scothern
---
registry/storage/garbagecollect_test.go | 32 +++++++++++++++++++++++++
1 file changed, 32 insertions(+)
diff --git a/registry/storage/garbagecollect_test.go b/registry/storage/garbagecollect_test.go
index ff4a3df2..a0ba154b 100644
--- a/registry/storage/garbagecollect_test.go
+++ b/registry/storage/garbagecollect_test.go
@@ -2,6 +2,7 @@ package storage
import (
"io"
+ "path"
"testing"
"github.com/docker/distribution"
@@ -176,6 +177,37 @@ func TestNoDeletionNoEffect(t *testing.T) {
}
}
+func TestGCWithMissingManifests(t *testing.T) {
+ ctx := context.Background()
+ d := inmemory.New()
+
+ registry := createRegistry(t, d)
+ repo := makeRepository(t, registry, "testrepo")
+ uploadRandomSchema1Image(t, repo)
+
+ // Simulate a missing _manifests directory
+ revPath, err := pathFor(manifestRevisionsPathSpec{"testrepo"})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _manifestsPath := path.Dir(revPath)
+ err = d.Delete(ctx, _manifestsPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = MarkAndSweep(context.Background(), d, registry, false)
+ if err != nil {
+ t.Fatalf("Failed mark and sweep: %v", err)
+ }
+
+ blobs := allBlobs(t, registry)
+ if len(blobs) > 0 {
+ t.Errorf("unexpected blobs after gc")
+ }
+}
+
func TestDeletionHasEffect(t *testing.T) {
ctx := context.Background()
inmemoryDriver := inmemory.New()
From d2e29acce05144bb8ec8e07ff60246ebc7164745 Mon Sep 17 00:00:00 2001
From: Richard Scothern
Date: Tue, 19 Apr 2016 16:31:25 -0700
Subject: [PATCH 028/546] When a blob upload is committed prevent writing out
hashstate in the subsequent close.
When a blob upload is cancelled close the blobwriter before removing
upload state to ensure old hashstates don't persist.
Signed-off-by: Richard Scothern
---
registry/storage/blob_test.go | 17 +++++++++++++++++
registry/storage/blobwriter.go | 15 ++++++++++++---
2 files changed, 29 insertions(+), 3 deletions(-)
diff --git a/registry/storage/blob_test.go b/registry/storage/blob_test.go
index 3698a415..7e1a7cd4 100644
--- a/registry/storage/blob_test.go
+++ b/registry/storage/blob_test.go
@@ -16,6 +16,7 @@ import (
"github.com/docker/distribution/registry/storage/cache/memory"
"github.com/docker/distribution/registry/storage/driver/inmemory"
"github.com/docker/distribution/testutil"
+ "path"
)
// TestWriteSeek tests that the current file size can be
@@ -83,6 +84,15 @@ func TestSimpleBlobUpload(t *testing.T) {
t.Fatalf("unexpected error during upload cancellation: %v", err)
}
+ // get the enclosing directory
+ uploadPath := path.Dir(blobUpload.(*blobWriter).path)
+
+ // ensure state was cleaned up
+ _, err = driver.List(ctx, uploadPath)
+ if err == nil {
+ t.Fatal("files in upload path after cleanup")
+ }
+
// Do a resume, get unknown upload
blobUpload, err = bs.Resume(ctx, blobUpload.ID())
if err != distribution.ErrBlobUploadUnknown {
@@ -128,6 +138,13 @@ func TestSimpleBlobUpload(t *testing.T) {
t.Fatalf("unexpected error finishing layer upload: %v", err)
}
+ // ensure state was cleaned up
+ uploadPath = path.Dir(blobUpload.(*blobWriter).path)
+ _, err = driver.List(ctx, uploadPath)
+ if err == nil {
+ t.Fatal("files in upload path after commit")
+ }
+
// After finishing an upload, it should no longer exist.
if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown {
t.Fatalf("expected layer upload to be unknown, got %v", err)
diff --git a/registry/storage/blobwriter.go b/registry/storage/blobwriter.go
index 7f280d36..2ae944a4 100644
--- a/registry/storage/blobwriter.go
+++ b/registry/storage/blobwriter.go
@@ -18,8 +18,8 @@ var (
errResumableDigestNotAvailable = errors.New("resumable digest not available")
)
-// layerWriter is used to control the various aspects of resumable
-// layer upload. It implements the LayerUpload interface.
+// blobWriter is used to control the various aspects of resumable
+// blob upload.
type blobWriter struct {
ctx context.Context
blobStore *linkedBlobStore
@@ -34,6 +34,7 @@ type blobWriter struct {
path string
resumableDigestEnabled bool
+ committed bool
}
var _ distribution.BlobWriter = &blobWriter{}
@@ -78,6 +79,7 @@ func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor)
return distribution.Descriptor{}, err
}
+ bw.committed = true
return canonical, nil
}
@@ -89,11 +91,14 @@ func (bw *blobWriter) Cancel(ctx context.Context) error {
return err
}
+ if err := bw.Close(); err != nil {
+ context.GetLogger(ctx).Errorf("error closing blobwriter: %s", err)
+ }
+
if err := bw.removeResources(ctx); err != nil {
return err
}
- bw.Close()
return nil
}
@@ -130,6 +135,10 @@ func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) {
}
func (bw *blobWriter) Close() error {
+ if bw.committed {
+ return errors.New("blobwriter close after commit")
+ }
+
if err := bw.storeHashState(bw.blobStore.ctx); err != nil {
return err
}
From e728c8bbed82b9cbe078ed3461cdd366e22cfdb5 Mon Sep 17 00:00:00 2001
From: Sven Dowideit
Date: Thu, 28 Apr 2016 12:57:10 +0000
Subject: [PATCH 029/546] convert docs Dockerfiles to use docs/base:oss
Signed-off-by: Sven Dowideit
---
docs/Dockerfile | 13 ++-----------
1 file changed, 2 insertions(+), 11 deletions(-)
diff --git a/docs/Dockerfile b/docs/Dockerfile
index 44128086..8fa63a33 100644
--- a/docs/Dockerfile
+++ b/docs/Dockerfile
@@ -1,18 +1,9 @@
-FROM docs/base:latest
+FROM docs/base:oss
MAINTAINER Mary Anthony (@moxiegirl)
-RUN svn checkout https://github.com/docker/docker/trunk/docs /docs/content/engine
-RUN svn checkout https://github.com/docker/compose/trunk/docs /docs/content/compose
-RUN svn checkout https://github.com/docker/swarm/trunk/docs /docs/content/swarm
-RUN svn checkout https://github.com/docker/machine/trunk/docs /docs/content/machine
-RUN svn checkout https://github.com/docker/notary/trunk/docs /docs/content/notary
-RUN svn checkout https://github.com/docker/kitematic/trunk/docs /docs/content/kitematic
-RUN svn checkout https://github.com/docker/toolbox/trunk/docs /docs/content/toolbox
-RUN svn checkout https://github.com/docker/opensource/trunk/docs /docs/content/opensource
-
ENV PROJECT=registry
# To get the git info for this repo
COPY . /src
-
+RUN rm -r /docs/content/$PROJECT/
COPY . /docs/content/$PROJECT/
From 620a0302177de09fad3b30ffc02c691baa084018 Mon Sep 17 00:00:00 2001
From: Aaron Lehmann
Date: Thu, 28 Apr 2016 11:57:48 -0700
Subject: [PATCH 030/546] Preserve author information in schema1 manifests
When we push a schema1 manifest, we encode history information from the
image JSON into v1Compatibility strings for the respective layers. The
"author" field was not being set in these v1Compatibility strings, so if
a parent layer had an author set, it would not be preserved after
pushing through a schema1 manifest and repulling, so the image ID would
change after the pull. This change preserves the authorship information
for parent layers so that the image ID does not change.
Signed-off-by: Aaron Lehmann
---
manifest/schema1/config_builder.go | 4 +++-
manifest/schema1/config_builder_test.go | 5 +++--
2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/manifest/schema1/config_builder.go b/manifest/schema1/config_builder.go
index b3d1e554..5cdd7679 100644
--- a/manifest/schema1/config_builder.go
+++ b/manifest/schema1/config_builder.go
@@ -110,7 +110,8 @@ func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Mani
ContainerConfig struct {
Cmd []string
} `json:"container_config,omitempty"`
- ThrowAway bool `json:"throwaway,omitempty"`
+ Author string `json:"author,omitempty"`
+ ThrowAway bool `json:"throwaway,omitempty"`
}
fsLayerList := make([]FSLayer, len(img.History))
@@ -145,6 +146,7 @@ func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Mani
Parent: parent,
Comment: h.Comment,
Created: h.Created,
+ Author: h.Author,
}
v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
if h.EmptyLayer {
diff --git a/manifest/schema1/config_builder_test.go b/manifest/schema1/config_builder_test.go
index 76b53bfd..5f9abaa9 100644
--- a/manifest/schema1/config_builder_test.go
+++ b/manifest/schema1/config_builder_test.go
@@ -163,6 +163,7 @@ func TestConfigBuilder(t *testing.T) {
"empty_layer": true
},
{
+ "author": "Alyssa P. Hacker \u003calyspdev@example.com\u003e",
"created": "2015-11-04T23:06:32.083868454Z",
"created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"
},
@@ -252,8 +253,8 @@ func TestConfigBuilder(t *testing.T) {
}
expectedV1Compatibility := []string{
- `{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"0850bfdeb7b060b1004a09099846c2f023a3f2ecbf33f56b4774384b00ce0323","os":"linux","parent":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","throwaway":true}`,
- `{"id":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","parent":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]}}`,
+ `{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"69e5c1bfadad697fdb6db59f6326648fa119e0c031a0eda33b8cfadcab54ba7f","os":"linux","parent":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","throwaway":true}`,
+ `{"id":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","parent":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]},"author":"Alyssa P. Hacker \u003calyspdev@example.com\u003e"}`,
`{"id":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","parent":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`,
`{"id":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`,
`{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`,
From 8cded9db0df16afc860a0fa58327ec0b466a87f6 Mon Sep 17 00:00:00 2001
From: Derek McGowan
Date: Thu, 28 Apr 2016 14:51:13 -0700
Subject: [PATCH 031/546] Use official docker 1.11.1 image to test docker 1.11
Signed-off-by: Derek McGowan (github: dmcgowan)
---
contrib/docker-integration/run_multiversion.sh | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/contrib/docker-integration/run_multiversion.sh b/contrib/docker-integration/run_multiversion.sh
index 35c2006e..b673d311 100755
--- a/contrib/docker-integration/run_multiversion.sh
+++ b/contrib/docker-integration/run_multiversion.sh
@@ -53,12 +53,12 @@ time docker pull distribution/golem-runner:0.1-bats
time docker pull docker:1.9.1-dind
time docker pull docker:1.10.3-dind
-time docker pull dockerswarm/dind:1.11.0-rc2
+time docker pull docker:1.11.1-dind
golem -cache $cachedir \
-i "golem-distribution:latest,$distimage,$distversion" \
-i "golem-dind:latest,docker:1.9.1-dind,1.9.1" \
-i "golem-dind:latest,docker:1.10.3-dind,1.10.3" \
- -i "golem-dind:latest,dockerswarm/dind:1.11.0-rc2,1.11.0" \
+ -i "golem-dind:latest,docker:1.11.1-dind,1.11.1" \
$DIR
From 3730470b641dcb7ebe50e32512ad4565fa340027 Mon Sep 17 00:00:00 2001
From: Troels Thomsen
Date: Fri, 29 Apr 2016 23:34:24 +0200
Subject: [PATCH 032/546] Pass through known errors
Signed-off-by: Troels Thomsen
---
registry/handlers/app.go | 2 ++
registry/handlers/blobupload.go | 2 ++
registry/handlers/images.go | 2 ++
registry/handlers/tags.go | 2 ++
4 files changed, 8 insertions(+)
diff --git a/registry/handlers/app.go b/registry/handlers/app.go
index 3c3e50d0..fc3f9069 100644
--- a/registry/handlers/app.go
+++ b/registry/handlers/app.go
@@ -634,6 +634,8 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler {
context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err))
case distribution.ErrRepositoryNameInvalid:
context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err))
+ case errcode.Error:
+ context.Errors = append(context.Errors, err)
}
if err := errcode.ServeJSON(w, context.Errors); err != nil {
diff --git a/registry/handlers/blobupload.go b/registry/handlers/blobupload.go
index 673e2c59..2cd5115d 100644
--- a/registry/handlers/blobupload.go
+++ b/registry/handlers/blobupload.go
@@ -239,6 +239,8 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht
switch err := err.(type) {
case distribution.ErrBlobInvalidDigest:
buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err))
+ case errcode.Error:
+ buh.Errors = append(buh.Errors, err)
default:
switch err {
case distribution.ErrAccessDenied:
diff --git a/registry/handlers/images.go b/registry/handlers/images.go
index 5f2d8855..dd2ed2c8 100644
--- a/registry/handlers/images.go
+++ b/registry/handlers/images.go
@@ -283,6 +283,8 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http
}
}
}
+ case errcode.Error:
+ imh.Errors = append(imh.Errors, err)
default:
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
diff --git a/registry/handlers/tags.go b/registry/handlers/tags.go
index fd661e66..91f1031e 100644
--- a/registry/handlers/tags.go
+++ b/registry/handlers/tags.go
@@ -41,6 +41,8 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) {
switch err := err.(type) {
case distribution.ErrRepositoryUnknown:
th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Named().Name()}))
+ case errcode.Error:
+ th.Errors = append(th.Errors, err)
default:
th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
From 33c448f14769c96d6702f9481831132b06e3cad6 Mon Sep 17 00:00:00 2001
From: Tony Holdstock-Brown
Date: Tue, 26 Apr 2016 14:36:38 -0700
Subject: [PATCH 033/546] Implement regulator in filesystem driver
This commit refactors base.regulator into the 2.4 interfaces and adds a
filesystem configuration option `maxthreads` to configure the regulator.
By default `maxthreads` is set to 100. This means the FS driver is
limited to 100 concurrent blocking file operations. Any subsequent
operations will block in Go until previous filesystem operations
complete.
This ensures that the registry can never open thousands of simultaneous
threads from os filesystem operations.
Note that `maxthreads` can never be less than 25.
Add test case covering parsable string maxthreads
Signed-off-by: Tony Holdstock-Brown
---
registry/proxy/proxyblobstore_test.go | 18 +++-
registry/storage/driver/base/regulator.go | 43 ++++-----
registry/storage/driver/filesystem/driver.go | 81 ++++++++++++++---
.../storage/driver/filesystem/driver_test.go | 89 ++++++++++++++++++-
4 files changed, 193 insertions(+), 38 deletions(-)
diff --git a/registry/proxy/proxyblobstore_test.go b/registry/proxy/proxyblobstore_test.go
index b93b5343..967dcd3d 100644
--- a/registry/proxy/proxyblobstore_test.go
+++ b/registry/proxy/proxyblobstore_test.go
@@ -132,8 +132,15 @@ func makeTestEnv(t *testing.T, name string) *testEnv {
t.Fatalf("unable to create tempdir: %s", err)
}
+ localDriver, err := filesystem.FromParameters(map[string]interface{}{
+ "rootdirectory": truthDir,
+ })
+ if err != nil {
+ t.Fatalf("unable to create filesystem driver: %s", err)
+ }
+
// todo: create a tempfile area here
- localRegistry, err := storage.NewRegistry(ctx, filesystem.New(truthDir), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption)
+ localRegistry, err := storage.NewRegistry(ctx, localDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
@@ -142,7 +149,14 @@ func makeTestEnv(t *testing.T, name string) *testEnv {
t.Fatalf("unexpected error getting repo: %v", err)
}
- truthRegistry, err := storage.NewRegistry(ctx, filesystem.New(cacheDir), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()))
+ cacheDriver, err := filesystem.FromParameters(map[string]interface{}{
+ "rootdirectory": cacheDir,
+ })
+ if err != nil {
+ t.Fatalf("unable to create filesystem driver: %s", err)
+ }
+
+ truthRegistry, err := storage.NewRegistry(ctx, cacheDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()))
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
diff --git a/registry/storage/driver/base/regulator.go b/registry/storage/driver/base/regulator.go
index 21ddfe57..185160a4 100644
--- a/registry/storage/driver/base/regulator.go
+++ b/registry/storage/driver/base/regulator.go
@@ -10,46 +10,41 @@ import (
type regulator struct {
storagedriver.StorageDriver
- sync.Cond
+ *sync.Cond
- available uint
+ available uint64
}
// NewRegulator wraps the given driver and is used to regulate concurrent calls
// to the given storage driver to a maximum of the given limit. This is useful
// for storage drivers that would otherwise create an unbounded number of OS
// threads if allowed to be called unregulated.
-func NewRegulator(driver storagedriver.StorageDriver, limit uint) storagedriver.StorageDriver {
+func NewRegulator(driver storagedriver.StorageDriver, limit uint64) storagedriver.StorageDriver {
return ®ulator{
StorageDriver: driver,
- Cond: sync.Cond{
- L: &sync.Mutex{},
- },
- available: limit,
+ Cond: sync.NewCond(&sync.Mutex{}),
+ available: limit,
}
}
-func (r *regulator) condition() bool {
- return r.available > 0
-}
-
func (r *regulator) enter() {
r.L.Lock()
- defer r.L.Unlock()
-
- for !r.condition() {
+ for r.available == 0 {
r.Wait()
}
-
r.available--
+ r.L.Unlock()
}
func (r *regulator) exit() {
r.L.Lock()
- defer r.Signal()
- defer r.L.Unlock()
-
+ // We only need to signal to a waiting FS operation if we're already at the
+ // limit of threads used
+ if r.available == 0 {
+ r.Signal()
+ }
r.available++
+ r.L.Unlock()
}
// Name returns the human-readable "name" of the driver, useful in error
@@ -80,25 +75,25 @@ func (r *regulator) PutContent(ctx context.Context, path string, content []byte)
return r.StorageDriver.PutContent(ctx, path, content)
}
-// ReadStream retrieves an io.ReadCloser for the content stored at "path"
+// Reader retrieves an io.ReadCloser for the content stored at "path"
// with a given byte offset.
// May be used to resume reading a stream by providing a nonzero offset.
-func (r *regulator) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
+func (r *regulator) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
r.enter()
defer r.exit()
- return r.StorageDriver.ReadStream(ctx, path, offset)
+ return r.StorageDriver.Reader(ctx, path, offset)
}
-// WriteStream stores the contents of the provided io.ReadCloser at a
+// Writer stores the contents of the provided io.ReadCloser at a
// location designated by the given path.
// May be used to resume writing a stream by providing a nonzero offset.
// The offset must be no larger than the CurrentSize for this path.
-func (r *regulator) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) {
+func (r *regulator) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
r.enter()
defer r.exit()
- return r.StorageDriver.WriteStream(ctx, path, offset, reader)
+ return r.StorageDriver.Writer(ctx, path, append)
}
// Stat retrieves the FileInfo for the given path, including the current
diff --git a/registry/storage/driver/filesystem/driver.go b/registry/storage/driver/filesystem/driver.go
index e22e9809..1a897261 100644
--- a/registry/storage/driver/filesystem/driver.go
+++ b/registry/storage/driver/filesystem/driver.go
@@ -8,6 +8,8 @@ import (
"io/ioutil"
"os"
"path"
+ "reflect"
+ "strconv"
"time"
"github.com/docker/distribution/context"
@@ -16,8 +18,23 @@ import (
"github.com/docker/distribution/registry/storage/driver/factory"
)
-const driverName = "filesystem"
-const defaultRootDirectory = "/var/lib/registry"
+const (
+ driverName = "filesystem"
+ defaultRootDirectory = "/var/lib/registry"
+ defaultMaxThreads = uint64(100)
+
+ // minThreads is the minimum value for the maxthreads configuration
+ // parameter. If the driver's parameters are less than this we set
+ // the parameters to minThreads
+ minThreads = uint64(25)
+)
+
+// DriverParameters represents all configuration options available for the
+// filesystem driver
+type DriverParameters struct {
+ RootDirectory string
+ MaxThreads uint64
+}
func init() {
factory.Register(driverName, &filesystemDriverFactory{})
@@ -27,7 +44,7 @@ func init() {
type filesystemDriverFactory struct{}
func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
- return FromParameters(parameters), nil
+ return FromParameters(parameters)
}
type driver struct {
@@ -47,25 +64,67 @@ type Driver struct {
// FromParameters constructs a new Driver with a given parameters map
// Optional Parameters:
// - rootdirectory
-func FromParameters(parameters map[string]interface{}) *Driver {
- var rootDirectory = defaultRootDirectory
+// - maxthreads
+func FromParameters(parameters map[string]interface{}) (*Driver, error) {
+ params, err := fromParametersImpl(parameters)
+ if err != nil || params == nil {
+ return nil, err
+ }
+ return New(*params), nil
+}
+
+func fromParametersImpl(parameters map[string]interface{}) (*DriverParameters, error) {
+ var (
+ err error
+ maxThreads = defaultMaxThreads
+ rootDirectory = defaultRootDirectory
+ )
+
if parameters != nil {
- rootDir, ok := parameters["rootdirectory"]
- if ok {
+ if rootDir, ok := parameters["rootdirectory"]; ok {
rootDirectory = fmt.Sprint(rootDir)
}
+
+ // Get maximum number of threads for blocking filesystem operations,
+ // if specified
+ threads := parameters["maxthreads"]
+ switch v := threads.(type) {
+ case string:
+ if maxThreads, err = strconv.ParseUint(v, 0, 64); err != nil {
+ return nil, fmt.Errorf("maxthreads parameter must be an integer, %v invalid", threads)
+ }
+ case uint64:
+ maxThreads = v
+ case int, int32, int64:
+ maxThreads = uint64(reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Int())
+ case uint, uint32:
+ maxThreads = reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Uint()
+ case nil:
+ // do nothing
+ default:
+ return nil, fmt.Errorf("invalid value for maxthreads: %#v", threads)
+ }
+
+ if maxThreads < minThreads {
+ maxThreads = minThreads
+ }
}
- return New(rootDirectory)
+
+ params := &DriverParameters{
+ RootDirectory: rootDirectory,
+ MaxThreads: maxThreads,
+ }
+ return params, nil
}
// New constructs a new Driver with a given rootDirectory
-func New(rootDirectory string) *Driver {
- fsDriver := &driver{rootDirectory: rootDirectory}
+func New(params DriverParameters) *Driver {
+ fsDriver := &driver{rootDirectory: params.RootDirectory}
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
- StorageDriver: base.NewRegulator(fsDriver, 100),
+ StorageDriver: base.NewRegulator(fsDriver, params.MaxThreads),
},
},
}
diff --git a/registry/storage/driver/filesystem/driver_test.go b/registry/storage/driver/filesystem/driver_test.go
index 8b48b431..3be85923 100644
--- a/registry/storage/driver/filesystem/driver_test.go
+++ b/registry/storage/driver/filesystem/driver_test.go
@@ -3,6 +3,7 @@ package filesystem
import (
"io/ioutil"
"os"
+ "reflect"
"testing"
storagedriver "github.com/docker/distribution/registry/storage/driver"
@@ -20,7 +21,93 @@ func init() {
}
defer os.Remove(root)
+ driver, err := FromParameters(map[string]interface{}{
+ "rootdirectory": root,
+ })
+ if err != nil {
+ panic(err)
+ }
+
testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) {
- return New(root), nil
+ return driver, nil
}, testsuites.NeverSkip)
}
+
+func TestFromParametersImpl(t *testing.T) {
+
+ tests := []struct {
+ params map[string]interface{} // techincally the yaml can contain anything
+ expected DriverParameters
+ pass bool
+ }{
+ // check we use default threads and root dirs
+ {
+ params: map[string]interface{}{},
+ expected: DriverParameters{
+ RootDirectory: defaultRootDirectory,
+ MaxThreads: defaultMaxThreads,
+ },
+ pass: true,
+ },
+ // Testing initiation with a string maxThreads which can't be parsed
+ {
+ params: map[string]interface{}{
+ "maxthreads": "fail",
+ },
+ expected: DriverParameters{},
+ pass: false,
+ },
+ {
+ params: map[string]interface{}{
+ "maxthreads": "100",
+ },
+ expected: DriverParameters{
+ RootDirectory: defaultRootDirectory,
+ MaxThreads: uint64(100),
+ },
+ pass: true,
+ },
+ {
+ params: map[string]interface{}{
+ "maxthreads": 100,
+ },
+ expected: DriverParameters{
+ RootDirectory: defaultRootDirectory,
+ MaxThreads: uint64(100),
+ },
+ pass: true,
+ },
+ // check that we use minimum thread counts
+ {
+ params: map[string]interface{}{
+ "maxthreads": 1,
+ },
+ expected: DriverParameters{
+ RootDirectory: defaultRootDirectory,
+ MaxThreads: minThreads,
+ },
+ pass: true,
+ },
+ }
+
+ for _, item := range tests {
+ params, err := fromParametersImpl(item.params)
+
+ if !item.pass {
+ // We only need to assert that expected failures have an error
+ if err == nil {
+ t.Fatalf("expected error configuring filesystem driver with invalid param: %+v", item.params)
+ }
+ continue
+ }
+
+ if err != nil {
+ t.Fatalf("unexpected error creating filesystem driver: %s", err)
+ }
+ // Note that we get a pointer to params back
+ if !reflect.DeepEqual(*params, item.expected) {
+ t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params)
+ }
+ }
+
+}
From d0352a7448b17346ad67882cf3df3b1239481d7c Mon Sep 17 00:00:00 2001
From: Tony Holdstock-Brown
Date: Tue, 26 Apr 2016 15:20:40 -0700
Subject: [PATCH 034/546] Add documentation
Signed-off-by: Tony Holdstock-Brown
---
docs/configuration.md | 1 +
docs/storage-drivers/filesystem.md | 4 ++++
2 files changed, 5 insertions(+)
diff --git a/docs/configuration.md b/docs/configuration.md
index f9b89feb..7d3a73e3 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -78,6 +78,7 @@ information about each option that appears later in this page.
storage:
filesystem:
rootdirectory: /var/lib/registry
+ maxthreads: 100
azure:
accountname: accountname
accountkey: base64encodedaccountkey
diff --git a/docs/storage-drivers/filesystem.md b/docs/storage-drivers/filesystem.md
index 476edcf5..fea7ce4a 100644
--- a/docs/storage-drivers/filesystem.md
+++ b/docs/storage-drivers/filesystem.md
@@ -16,3 +16,7 @@ An implementation of the `storagedriver.StorageDriver` interface which uses the
`rootdirectory`: (optional) The absolute path to a root directory tree in which
to store all registry files. The registry stores all its data here so make sure
there is adequate space available. Defaults to `/var/lib/registry`.
+`maxthreads`: (optional) The maximum number of simultaneous blocking filesystem
+operations permitted within the registry. Each operation spawns a new thread and
+may cause thread exhaustion issues if many are done in parallel. Defaults to
+`100`, and can be no lower than `25`.
From a264f9ae293da7c255ede556c425925035e166ec Mon Sep 17 00:00:00 2001
From: Antonio Murdaca
Date: Tue, 3 May 2016 10:28:40 +0200
Subject: [PATCH 035/546] registry: type too many requests error
Signed-off-by: Antonio Murdaca
---
registry/api/errcode/register.go | 10 ++++++++++
registry/client/errors.go | 8 ++++++--
2 files changed, 16 insertions(+), 2 deletions(-)
diff --git a/registry/api/errcode/register.go b/registry/api/errcode/register.go
index 01c34384..71cf6f7a 100644
--- a/registry/api/errcode/register.go
+++ b/registry/api/errcode/register.go
@@ -63,6 +63,16 @@ var (
Description: "Returned when a service is not available",
HTTPStatusCode: http.StatusServiceUnavailable,
})
+
+ // ErrorCodeTooManyRequests is returned if a client attempts too many
+ // times to contact a service endpoint.
+ ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{
+ Value: "TOOMANYREQUESTS",
+ Message: "too many requests",
+ Description: `Returned when a client attempts to contact a
+ service too many times`,
+ HTTPStatusCode: http.StatusTooManyRequests,
+ })
)
var nextCode = 1000
diff --git a/registry/client/errors.go b/registry/client/errors.go
index 00fafe11..804e69e0 100644
--- a/registry/client/errors.go
+++ b/registry/client/errors.go
@@ -51,10 +51,14 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
}
err = json.Unmarshal(body, &detailsErr)
if err == nil && detailsErr.Details != "" {
- if statusCode == http.StatusUnauthorized {
+ switch statusCode {
+ case http.StatusUnauthorized:
return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
+ case http.StatusTooManyRequests:
+ return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
+ default:
+ return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
}
- return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
}
if err := json.Unmarshal(body, &errors); err != nil {
From f60f275c29cb215bab1594b4e9b3967bc02bd743 Mon Sep 17 00:00:00 2001
From: Antonio Murdaca
Date: Tue, 3 May 2016 21:24:43 +0200
Subject: [PATCH 036/546] registry: do not use http.StatusTooManyRequests
go1.5 doesn't export http.StatusTooManyRequests while
go1.6 does. Fix this by hardcoding the status code for now.
Signed-off-by: Antonio Murdaca
---
registry/api/errcode/register.go | 5 ++++-
registry/client/errors.go | 5 ++++-
2 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/registry/api/errcode/register.go b/registry/api/errcode/register.go
index 71cf6f7a..7489e84f 100644
--- a/registry/api/errcode/register.go
+++ b/registry/api/errcode/register.go
@@ -71,7 +71,10 @@ var (
Message: "too many requests",
Description: `Returned when a client attempts to contact a
service too many times`,
- HTTPStatusCode: http.StatusTooManyRequests,
+ // FIXME: go1.5 doesn't export http.StatusTooManyRequests while
+ // go1.6 does. Update the hardcoded value to the constant once
+ // Docker updates golang version to 1.6.
+ HTTPStatusCode: 429,
})
)
diff --git a/registry/client/errors.go b/registry/client/errors.go
index 804e69e0..adbaacf4 100644
--- a/registry/client/errors.go
+++ b/registry/client/errors.go
@@ -54,7 +54,10 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
switch statusCode {
case http.StatusUnauthorized:
return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
- case http.StatusTooManyRequests:
+ // FIXME: go1.5 doesn't export http.StatusTooManyRequests while
+ // go1.6 does. Update the hardcoded value to the constant once
+ // Docker updates golang version to 1.6.
+ case 429:
return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
default:
return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
From c9c62380ffc1b72a6d1ea1e80cc2d806c750bd9a Mon Sep 17 00:00:00 2001
From: Tony Holdstock-Brown
Date: Tue, 3 May 2016 16:03:22 -0700
Subject: [PATCH 037/546] Don't wrap thead limits when using a negative int
Signed-off-by: Tony Holdstock-Brown
---
registry/storage/driver/filesystem/driver.go | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/registry/storage/driver/filesystem/driver.go b/registry/storage/driver/filesystem/driver.go
index 1a897261..649e2bc2 100644
--- a/registry/storage/driver/filesystem/driver.go
+++ b/registry/storage/driver/filesystem/driver.go
@@ -96,7 +96,12 @@ func fromParametersImpl(parameters map[string]interface{}) (*DriverParameters, e
case uint64:
maxThreads = v
case int, int32, int64:
- maxThreads = uint64(reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Int())
+ val := reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Int()
+ // If threads is negative casting to uint64 will wrap around and
+ // give you the hugest thread limit ever. Let's be sensible, here
+ if val > 0 {
+ maxThreads = uint64(val)
+ }
case uint, uint32:
maxThreads = reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Uint()
case nil:
From 35dd23c649bb959cb92654c1938292849b36a96f Mon Sep 17 00:00:00 2001
From: Richard Scothern
Date: Wed, 4 May 2016 10:22:07 -0700
Subject: [PATCH 038/546] Update AUTHORS and version file
Signed-off-by: Richard Scothern
---
AUTHORS | 12 ++++++++++++
version/version.go | 2 +-
2 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/AUTHORS b/AUTHORS
index 0857b62f..70d52599 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,4 +1,5 @@
Aaron Lehmann
+Aaron Schlesinger
Aaron Vinson
Adam Enger
Adrian Mouat
@@ -7,13 +8,16 @@ Alex Chan
Alex Elman
amitshukla
Amy Lindburg
+Andrew Hsu
Andrew Meredith
Andrew T Nguyen
Andrey Kostov
Andy Goldstein
+Anis Elleuch
Anton Tiurin
Antonio Mercado
Antonio Murdaca
+Arien Holthuizen
Arnaud Porterie
Arthur Baars
Asuka Suzuki
@@ -27,6 +31,7 @@ burnettk
Carson A
Chris Dillon
Daisuke Fujita
+Daniel Huhn
Darren Shepherd
Dave Trombley
Dave Tucker
@@ -41,6 +46,7 @@ DJ Enriquez
Donald Huang
Doug Davis
Eric Yang
+Fabio Huser
farmerworking
Felix Yan
Florentin Raud
@@ -57,8 +63,10 @@ Jack Griffin
Jason Freidman
Jeff Nickoloff
Jessie Frazelle
+jhaohai
Jianqing Wang
John Starks
+Jon Johnson
Jon Poler
Jonathan Boulle
Jordan Liggitt
@@ -92,17 +100,20 @@ Olivier Gambier
Olivier Jacques
Omer Cohen
Patrick Devine
+Phil Estes
Philip Misiowiec
Richard Scothern
Rodolfo Carvalho
Rusty Conover
Sean Boran
Sebastiaan van Stijn
+Serge Dubrouski
Sharif Nassar
Shawn Falkner-Horine
Shreyas Karnik
Simon Thulbourn
Spencer Rinehart
+Stefan Majewsky
Stefan Weil
Stephen J Day
Sungho Moon
@@ -114,6 +125,7 @@ Thomas Sjögren
Tianon Gravi
Tibor Vass
Tonis Tiigi
+Tony Holdstock-Brown
Trevor Pounds
Troels Thomsen
Vincent Batts
diff --git a/version/version.go b/version/version.go
index b1b880df..cafe2336 100644
--- a/version/version.go
+++ b/version/version.go
@@ -8,4 +8,4 @@ var Package = "github.com/docker/distribution"
// the latest release tag by hand, always suffixed by "+unknown". During
// build, it will be replaced by the actual version. The value here will be
// used if the registry is run after a go get based install.
-var Version = "v2.4.0+unknown"
+var Version = "v2.4.1+unknown"
From b0db8d49bd340d0c945d9c86b032bf66a9575900 Mon Sep 17 00:00:00 2001
From: Mary Anthony
Date: Thu, 21 Apr 2016 09:08:19 -0700
Subject: [PATCH 039/546] Fixing issue identified by customer Entering Seb's
comment Fix the flags
Signed-off-by: Mary Anthony
---
docs/nginx.md | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/docs/nginx.md b/docs/nginx.md
index 17b92f44..361a1063 100644
--- a/docs/nginx.md
+++ b/docs/nginx.md
@@ -76,7 +76,7 @@ events {
}
http {
-
+
upstream docker-registry {
server registry:5000;
}
@@ -98,34 +98,34 @@ http {
# SSL
ssl_certificate /etc/nginx/conf.d/domain.crt;
ssl_certificate_key /etc/nginx/conf.d/domain.key;
-
+
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
ssl_protocols TLSv1.1 TLSv1.2;
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
-
+
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
-
+
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
chunked_transfer_encoding on;
-
+
location /v2/ {
# Do not allow connections from docker 1.5 and earlier
# docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
if (\$http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*\$" ) {
return 404;
}
-
+
# To add basic authentication to v2 use auth_basic setting.
auth_basic "Registry realm";
auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd;
-
+
## If $docker_distribution_api_version is empty, the header will not be added.
## See the map directive above where this variable is defined.
add_header 'Docker-Distribution-Api-Version' \$docker_distribution_api_version always;
-
+
proxy_pass http://docker-registry;
proxy_set_header Host \$http_host; # required for docker client's sake
proxy_set_header X-Real-IP \$remote_addr; # pass on real client's IP
@@ -182,7 +182,7 @@ Now, start your stack:
Login with a "push" authorized user (using `testuser` and `testpassword`), then tag and push your first image:
- docker login -p=testuser -u=testpassword -e=root@example.ch myregistrydomain.com:5043
+ docker login -u=testuser -p=testpassword -e=root@example.ch myregistrydomain.com:5043
docker tag ubuntu myregistrydomain.com:5043/test
docker push myregistrydomain.com:5043/test
docker pull myregistrydomain.com:5043/test
From 2a4deee4413a4027b3024113a2c880e217097754 Mon Sep 17 00:00:00 2001
From: Richard Scothern
Date: Wed, 4 May 2016 15:58:08 -0700
Subject: [PATCH 040/546] Remove the unimplemented monolithic upload section
from the API documentation.
Signed-off-by: Richard Scothern
---
docs/spec/api.md | 20 --------------------
docs/spec/api.md.tmpl | 20 --------------------
2 files changed, 40 deletions(-)
diff --git a/docs/spec/api.md b/docs/spec/api.md
index fc074ffb..56e9242c 100644
--- a/docs/spec/api.md
+++ b/docs/spec/api.md
@@ -618,26 +618,6 @@ The "digest" parameter must be included with the PUT request. Please see the
[_Completed Upload_](#completed-upload) section for details on the parameters
and expected responses.
-Additionally, the upload can be completed with a single `POST` request to
-the uploads endpoint, including the "size" and "digest" parameters:
-
-```
-POST /v2//blobs/uploads/?digest=
-Content-Length:
-Content-Type: application/octet-stream
-
-
-```
-
-On the registry service, this should allocate a download, accept and verify
-the data and return the same response as the final chunk of an upload. If the
-POST request fails collecting the data in any way, the registry should attempt
-to return an error response to the client with the `Location` header providing
-a place to continue the download.
-
-The single `POST` method is provided for convenience and most clients should
-implement `POST` + `PUT` to support reliable resume of uploads.
-
##### Chunked Upload
To carry out an upload of a chunk, the client can specify a range header and
diff --git a/docs/spec/api.md.tmpl b/docs/spec/api.md.tmpl
index 1a879ba0..1a9e9f89 100644
--- a/docs/spec/api.md.tmpl
+++ b/docs/spec/api.md.tmpl
@@ -618,26 +618,6 @@ The "digest" parameter must be included with the PUT request. Please see the
[_Completed Upload_](#completed-upload) section for details on the parameters
and expected responses.
-Additionally, the upload can be completed with a single `POST` request to
-the uploads endpoint, including the "size" and "digest" parameters:
-
-```
-POST /v2//blobs/uploads/?digest=
-Content-Length:
-Content-Type: application/octet-stream
-
-
-```
-
-On the registry service, this should allocate a download, accept and verify
-the data and return the same response as the final chunk of an upload. If the
-POST request fails collecting the data in any way, the registry should attempt
-to return an error response to the client with the `Location` header providing
-a place to continue the download.
-
-The single `POST` method is provided for convenience and most clients should
-implement `POST` + `PUT` to support reliable resume of uploads.
-
##### Chunked Upload
To carry out an upload of a chunk, the client can specify a range header and
From 1d782c38f238ef0d7c3cafa92dc08cbf20865690 Mon Sep 17 00:00:00 2001
From: Arthur Baars
Date: Fri, 6 May 2016 10:46:37 +0100
Subject: [PATCH 041/546] StorageDriver: Test case for #1698
Signed-off-by: Arthur Baars
---
registry/handlers/api_test.go | 18 ++---
registry/handlers/app_test.go | 6 +-
registry/storage/blob_test.go | 16 +++--
.../storage/driver/testdriver/testdriver.go | 71 +++++++++++++++++++
4 files changed, 92 insertions(+), 19 deletions(-)
create mode 100644 registry/storage/driver/testdriver/testdriver.go
diff --git a/registry/handlers/api_test.go b/registry/handlers/api_test.go
index 523ecca2..8f4bff0e 100644
--- a/registry/handlers/api_test.go
+++ b/registry/handlers/api_test.go
@@ -29,7 +29,7 @@ import (
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
- _ "github.com/docker/distribution/registry/storage/driver/inmemory"
+ _ "github.com/docker/distribution/registry/storage/driver/testdriver"
"github.com/docker/distribution/testutil"
"github.com/docker/libtrust"
"github.com/gorilla/handlers"
@@ -219,7 +219,7 @@ func contains(elems []string, e string) bool {
func TestURLPrefix(t *testing.T) {
config := configuration.Configuration{
Storage: configuration.Storage{
- "inmemory": configuration.Parameters{},
+ "testdriver": configuration.Parameters{},
},
}
config.HTTP.Prefix = "/test/"
@@ -296,7 +296,7 @@ func TestBlobDelete(t *testing.T) {
func TestRelativeURL(t *testing.T) {
config := configuration.Configuration{
Storage: configuration.Storage{
- "inmemory": configuration.Parameters{},
+ "testdriver": configuration.Parameters{},
},
}
config.HTTP.Headers = headerConfig
@@ -1884,8 +1884,8 @@ type testEnv struct {
func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv {
config := configuration.Configuration{
Storage: configuration.Storage{
- "inmemory": configuration.Parameters{},
- "delete": configuration.Parameters{"enabled": deleteEnabled},
+ "testdriver": configuration.Parameters{},
+ "delete": configuration.Parameters{"enabled": deleteEnabled},
},
Proxy: configuration.Proxy{
RemoteURL: "http://example.com",
@@ -1899,8 +1899,8 @@ func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv {
func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv {
config := configuration.Configuration{
Storage: configuration.Storage{
- "inmemory": configuration.Parameters{},
- "delete": configuration.Parameters{"enabled": deleteEnabled},
+ "testdriver": configuration.Parameters{},
+ "delete": configuration.Parameters{"enabled": deleteEnabled},
},
}
@@ -2413,7 +2413,7 @@ func TestCheckContextNotifier(t *testing.T) {
func TestProxyManifestGetByTag(t *testing.T) {
truthConfig := configuration.Configuration{
Storage: configuration.Storage{
- "inmemory": configuration.Parameters{},
+ "testdriver": configuration.Parameters{},
},
}
truthConfig.HTTP.Headers = headerConfig
@@ -2427,7 +2427,7 @@ func TestProxyManifestGetByTag(t *testing.T) {
proxyConfig := configuration.Configuration{
Storage: configuration.Storage{
- "inmemory": configuration.Parameters{},
+ "testdriver": configuration.Parameters{},
},
Proxy: configuration.Proxy{
RemoteURL: truthEnv.server.URL,
diff --git a/registry/handlers/app_test.go b/registry/handlers/app_test.go
index caa7ab97..3a8e4e1e 100644
--- a/registry/handlers/app_test.go
+++ b/registry/handlers/app_test.go
@@ -16,7 +16,7 @@ import (
_ "github.com/docker/distribution/registry/auth/silly"
"github.com/docker/distribution/registry/storage"
memorycache "github.com/docker/distribution/registry/storage/cache/memory"
- "github.com/docker/distribution/registry/storage/driver/inmemory"
+ "github.com/docker/distribution/registry/storage/driver/testdriver"
)
// TestAppDispatcher builds an application with a test dispatcher and ensures
@@ -24,7 +24,7 @@ import (
// This only tests the dispatch mechanism. The underlying dispatchers must be
// tested individually.
func TestAppDispatcher(t *testing.T) {
- driver := inmemory.New()
+ driver := testdriver.New()
ctx := context.Background()
registry, err := storage.NewRegistry(ctx, driver, storage.BlobDescriptorCacheProvider(memorycache.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect)
if err != nil {
@@ -142,7 +142,7 @@ func TestNewApp(t *testing.T) {
ctx := context.Background()
config := configuration.Configuration{
Storage: configuration.Storage{
- "inmemory": nil,
+ "testdriver": nil,
},
Auth: configuration.Auth{
// For now, we simply test that new auth results in a viable
diff --git a/registry/storage/blob_test.go b/registry/storage/blob_test.go
index 7e1a7cd4..3cec3bff 100644
--- a/registry/storage/blob_test.go
+++ b/registry/storage/blob_test.go
@@ -14,7 +14,7 @@ import (
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/cache/memory"
- "github.com/docker/distribution/registry/storage/driver/inmemory"
+ "github.com/docker/distribution/registry/storage/driver/testdriver"
"github.com/docker/distribution/testutil"
"path"
)
@@ -24,7 +24,7 @@ import (
func TestWriteSeek(t *testing.T) {
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
- driver := inmemory.New()
+ driver := testdriver.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
@@ -42,6 +42,7 @@ func TestWriteSeek(t *testing.T) {
}
contents := []byte{1, 2, 3}
blobUpload.Write(contents)
+ blobUpload.Close()
offset := blobUpload.Size()
if offset != int64(len(contents)) {
t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents))
@@ -59,7 +60,7 @@ func TestSimpleBlobUpload(t *testing.T) {
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
- driver := inmemory.New()
+ driver := testdriver.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
@@ -120,11 +121,12 @@ func TestSimpleBlobUpload(t *testing.T) {
t.Fatalf("layer data write incomplete")
}
+ blobUpload.Close()
+
offset := blobUpload.Size()
if offset != nn {
t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn)
}
- blobUpload.Close()
// Do a resume, for good fun
blobUpload, err = bs.Resume(ctx, blobUpload.ID())
@@ -253,7 +255,7 @@ func TestSimpleBlobUpload(t *testing.T) {
func TestSimpleBlobRead(t *testing.T) {
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
- driver := inmemory.New()
+ driver := testdriver.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
@@ -365,7 +367,7 @@ func TestBlobMount(t *testing.T) {
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
sourceImageName, _ := reference.ParseNamed("foo/source")
- driver := inmemory.New()
+ driver := testdriver.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
@@ -516,7 +518,7 @@ func TestBlobMount(t *testing.T) {
func TestLayerUploadZeroLength(t *testing.T) {
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
- driver := inmemory.New()
+ driver := testdriver.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
diff --git a/registry/storage/driver/testdriver/testdriver.go b/registry/storage/driver/testdriver/testdriver.go
new file mode 100644
index 00000000..988e5d33
--- /dev/null
+++ b/registry/storage/driver/testdriver/testdriver.go
@@ -0,0 +1,71 @@
+package testdriver
+
+import (
+ "github.com/docker/distribution/context"
+ storagedriver "github.com/docker/distribution/registry/storage/driver"
+ "github.com/docker/distribution/registry/storage/driver/factory"
+ "github.com/docker/distribution/registry/storage/driver/inmemory"
+)
+
+const driverName = "testdriver"
+
+func init() {
+ factory.Register(driverName, &testDriverFactory{})
+}
+
+// testDriverFactory implements the factory.StorageDriverFactory interface.
+type testDriverFactory struct{}
+
+func (factory *testDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
+ return New(), nil
+}
+
+// TestDriver is a StorageDriver for testing purposes. The Writer returned by this driver
+// simulates the case where Write operations are buffered. This causes the value returned by Size to lag
+// behind until Close (or Commit, or Cancel) is called.
+type TestDriver struct {
+ storagedriver.StorageDriver
+}
+
+type testFileWriter struct {
+ storagedriver.FileWriter
+ prevchunk []byte
+}
+
+var _ storagedriver.StorageDriver = &TestDriver{}
+
+// New constructs a new StorageDriver for testing purposes. The Writer returned by this driver
+// simulates the case where Write operations are buffered. This causes the value returned by Size to lag
+// behind until Close (or Commit, or Cancel) is called.
+func New() *TestDriver {
+ return &TestDriver{StorageDriver: inmemory.New()}
+}
+
+// Writer returns a FileWriter which will store the content written to it
+// at the location designated by "path" after the call to Commit.
+func (td *TestDriver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
+ fw, err := td.StorageDriver.Writer(ctx, path, append)
+ return &testFileWriter{FileWriter: fw}, err
+}
+
+func (tfw *testFileWriter) Write(p []byte) (int, error) {
+ _, err := tfw.FileWriter.Write(tfw.prevchunk)
+ tfw.prevchunk = make([]byte, len(p))
+ copy(tfw.prevchunk, p)
+ return len(p), err
+}
+
+func (tfw *testFileWriter) Close() error {
+ tfw.Write(nil)
+ return tfw.FileWriter.Close()
+}
+
+func (tfw *testFileWriter) Cancel() error {
+ tfw.Write(nil)
+ return tfw.FileWriter.Cancel()
+}
+
+func (tfw *testFileWriter) Commit() error {
+ tfw.Write(nil)
+ return tfw.FileWriter.Commit()
+}
From 0490ff450b797e7092d98d848625bd949987d56b Mon Sep 17 00:00:00 2001
From: Arthur Baars
Date: Thu, 5 May 2016 15:49:14 +0100
Subject: [PATCH 042/546] Blobwriter: call BlobWriter.Size after
BlobWriter.Close
Signed-off-by: Arthur Baars
---
registry/handlers/blobupload.go | 5 +----
registry/storage/blobwriter.go | 1 +
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/registry/handlers/blobupload.go b/registry/handlers/blobupload.go
index 673e2c59..b403a167 100644
--- a/registry/handlers/blobupload.go
+++ b/registry/handlers/blobupload.go
@@ -134,7 +134,6 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req
}
buh.Upload = upload
- defer buh.Upload.Close()
if err := buh.blobUploadResponse(w, r, true); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
@@ -224,11 +223,8 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht
return
}
- size := buh.Upload.Size()
-
desc, err := buh.Upload.Commit(buh, distribution.Descriptor{
Digest: dgst,
- Size: size,
// TODO(stevvooe): This isn't wildly important yet, but we should
// really set the mediatype. For now, we can let the backend take care
@@ -293,6 +289,7 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.
// TODO(stevvooe): Need a better way to manage the upload state automatically.
buh.State.Name = buh.Repository.Named().Name()
buh.State.UUID = buh.Upload.ID()
+ buh.Upload.Close()
buh.State.Offset = buh.Upload.Size()
buh.State.StartedAt = buh.Upload.StartedAt()
diff --git a/registry/storage/blobwriter.go b/registry/storage/blobwriter.go
index 3387bafb..48ac8a75 100644
--- a/registry/storage/blobwriter.go
+++ b/registry/storage/blobwriter.go
@@ -58,6 +58,7 @@ func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor)
}
bw.Close()
+ desc.Size = bw.Size()
canonical, err := bw.validateBlob(ctx, desc)
if err != nil {
From eca581cf36d7994b171f8b357f78de1d923474ff Mon Sep 17 00:00:00 2001
From: Arthur Baars
Date: Thu, 5 May 2016 17:16:48 +0100
Subject: [PATCH 043/546] StorageDriver: GCS: allow Cancel on a closed
FileWriter
Signed-off-by: Arthur Baars
---
registry/storage/driver/gcs/gcs.go | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/registry/storage/driver/gcs/gcs.go b/registry/storage/driver/gcs/gcs.go
index abe0b9f6..1369c280 100644
--- a/registry/storage/driver/gcs/gcs.go
+++ b/registry/storage/driver/gcs/gcs.go
@@ -321,12 +321,8 @@ type writer struct {
// Cancel removes any written content from this FileWriter.
func (w *writer) Cancel() error {
- err := w.checkClosed()
- if err != nil {
- return err
- }
w.closed = true
- err = storageDeleteObject(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name)
+ err := storageDeleteObject(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name)
if err != nil {
if status, ok := err.(*googleapi.Error); ok {
if status.Code == http.StatusNotFound {
From 4f2ee029a2a9b48d7f28f5013268196ed0ebe1e9 Mon Sep 17 00:00:00 2001
From: Richard Scothern
Date: Mon, 9 May 2016 16:38:16 +0100
Subject: [PATCH 044/546] Add 'us-gov-west-1' to the valid region list.
Signed-off-by: Richard Scothern
---
registry/storage/driver/s3-aws/s3.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go
index 565f264d..902abeb4 100644
--- a/registry/storage/driver/s3-aws/s3.go
+++ b/registry/storage/driver/s3-aws/s3.go
@@ -82,6 +82,7 @@ func init() {
"ap-northeast-2",
"sa-east-1",
"cn-north-1",
+ "us-gov-west-1",
} {
validRegions[region] = struct{}{}
}
From db713e127ba35bd15595113df56ee964de24637a Mon Sep 17 00:00:00 2001
From: Derek McGowan
Date: Mon, 9 May 2016 14:21:53 -0700
Subject: [PATCH 045/546] Generate the certificate directory with a pretest
script
fixes #1690
Signed-off-by: Derek McGowan (github: dmcgowan)
---
contrib/docker-integration/docker-compose.yml | 2 +-
.../localregistry:5440/ca.crt | 29 -----------
.../localregistry:5441/ca.crt | 29 -----------
.../localregistry:5442/ca.crt | 29 -----------
.../localregistry:5442/client.cert | 29 -----------
.../localregistry:5442/client.key | 51 -------------------
.../localregistry:5443/ca.crt | 29 -----------
.../localregistry:5443/client.cert | 29 -----------
.../localregistry:5443/client.key | 51 -------------------
.../localregistry:5444/ca.crt | 29 -----------
.../localregistry:5444/client.cert | 29 -----------
.../localregistry:5444/client.key | 51 -------------------
.../localregistry:5447/client.cert | 29 -----------
.../localregistry:5447/client.key | 51 -------------------
.../localregistry:5448/ca.crt | 29 -----------
.../localregistry:5553/ca.crt | 18 -------
.../localregistry:5554/ca.crt | 18 -------
.../localregistry:5555/ca.crt | 18 -------
.../localregistry:5557/ca.crt | 18 -------
.../localregistry:5558/ca.crt | 18 -------
.../localregistry:6666/ca.crt | 18 -------
contrib/docker-integration/golem.conf | 2 +
contrib/docker-integration/install_certs.sh | 30 +++++------
23 files changed, 15 insertions(+), 621 deletions(-)
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5440/ca.crt
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5441/ca.crt
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5442/ca.crt
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5442/client.cert
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5442/client.key
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5443/ca.crt
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5443/client.cert
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5443/client.key
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5444/ca.crt
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5444/client.cert
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5444/client.key
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5447/client.cert
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5447/client.key
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5448/ca.crt
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5553/ca.crt
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5554/ca.crt
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5555/ca.crt
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5557/ca.crt
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:5558/ca.crt
delete mode 100644 contrib/docker-integration/generated_certs.d/localregistry:6666/ca.crt
diff --git a/contrib/docker-integration/docker-compose.yml b/contrib/docker-integration/docker-compose.yml
index 24ae32a1..32bfaad0 100644
--- a/contrib/docker-integration/docker-compose.yml
+++ b/contrib/docker-integration/docker-compose.yml
@@ -75,7 +75,7 @@ docker:
environment:
DOCKER_GRAPHDRIVER:
volumes:
- - ./generated_certs.d:/etc/docker/certs.d
+ - /etc/generated_certs.d:/etc/docker/certs.d
- /var/lib/docker
links:
- nginx:localregistry
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5440/ca.crt b/contrib/docker-integration/generated_certs.d/localregistry:5440/ca.crt
deleted file mode 100644
index 8c9b1bca..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5440/ca.crt
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIE9TCCAt+gAwIBAgIQMsdPWoLAso/tIOvLk8R/sDALBgkqhkiG9w0BAQswJjER
-MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw
-NTQwMVoXDTE4MDUxMDIwNTQwMVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV
-BAMTCFF1aWNrVExTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1YeX
-GTvXPKlWA2lMbCvIGB9JYld/otf8aqs6euVJK1f09ngj5b6VoVlI8o1ScVcHKlKx
-BGfPMThnM7fiEmsfDSPuCIlGmTqR0t4t9dHRnLBGbZmR8JdAs7LKpP+PFYu0JTIT
-wFcjXIs+45cIF2HpsYY6zkj0bmNsyYmT1U1BTW+qqmhvc0Jkr+ikElOQ93Pn7zIO
-cXtxdERdzdzXY5cfL3CCaoJDgXOsKPQfYrCi5Zl6sLZVBkIc6Q2fErSIjTp45+NY
-AjiOxfUT0MOFtA0/HzYvVp3gTNPGEWM3dF1hwzCqJ32odbw/3TiFCEeC1B82p1sR
-sgoFZ6Vbfy9fMhB5S7BBtbqF09Yq/PMM3drOvWIxMF4aOY55ilrtKVwmnckiB0mE
-CPOColUUyiWIwwvp82InYsX5ekfS4x1mX1iz8zQEuTF5QHdKiUfd4A33ZMf0Ve6p
-y9SaMmos99uVQMzWlwj7nVACXjb9Ee6MY/ePRl7Z2gBxEYV41SGFRg8LNkQ//fYk
-o2vJ4Bp4aOh/O3ZQNv1eqEDmf/Su5lYCzURyQ2srcRRdwpteDPX+NHYn2d07knHN
-NQvOJn6EkcsDbgp0vSr6mFDv2GZWkTOAd8jZyrcErrLHAxRNm0Va+CEIKLhswf1G
-Y2kFkPL1otI8OSDvdJSjZ2GjRSwXhM2Mf3PzfAkCAwEAAaMjMCEwDgYDVR0PAQH/
-BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4ICAQDBxOHKnF9z
-PZWPNKDRmBPtmnU2IHh6JJ9HzqGALJJbBU0MUSD/aLBBkYeS0YSHgYZ1hXLsfuRU
-lm/czV41hU1FTDqS2fFpcAAGH+6/rwyfrz+GYr2K4b/ijCwOMbMrDWO54zqZT3KU
-GFBpkrh4fNyKdgUNJsy0Q0it3gOGSUmLvEQUzqxPFVz7h/pF/Cecr0/kpjbpsxna
-XQkhtDyKDIQfPCq8Ci1vox5WvBbBkdzDtyCm+KSb6VC3pCX6LV5NkS7YM7mtscTi
-QdYfLbKX05kUVG2R9SShJn5BSXzGk9M5FR5koGY0lMHwmJqaOqazXjqa1jR7UNDK
-UyExHIXSqJ+nCf4bChEsaC1uwu3Gr7PfP41Zb2U3Raf8UmFnbz6Hx0sS4zBvyJ5w
-Ntemve4M1mB7++oLZ4PkuwK82SkQ8YK0z+lGJQRjg/HP3fVETV8TlIPJAvg7bRnH
-sMrLb/V+K6iY+08kQ2rpU02itRjKnU/DLoha4KVjafY8eIcIR2lpwrYjx+KYpkcF
-AMEC7MnuzhyUfDL++GO6XGwRnx2E54MnKtkrECObMSzwuLysPmjhrEUH6YR7zGib
-KmN6vQkA4s5053R+Tu0k1JGaw90SfvcW4bxGcFjU4Kg0KqlY1y8tnt+ZiHmK0naA
-KauB3KY1NiL+Ng5DCzNdkwDkWH78ZguI2w==
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5441/ca.crt b/contrib/docker-integration/generated_certs.d/localregistry:5441/ca.crt
deleted file mode 100644
index 8c9b1bca..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5441/ca.crt
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIE9TCCAt+gAwIBAgIQMsdPWoLAso/tIOvLk8R/sDALBgkqhkiG9w0BAQswJjER
-MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw
-NTQwMVoXDTE4MDUxMDIwNTQwMVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV
-BAMTCFF1aWNrVExTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1YeX
-GTvXPKlWA2lMbCvIGB9JYld/otf8aqs6euVJK1f09ngj5b6VoVlI8o1ScVcHKlKx
-BGfPMThnM7fiEmsfDSPuCIlGmTqR0t4t9dHRnLBGbZmR8JdAs7LKpP+PFYu0JTIT
-wFcjXIs+45cIF2HpsYY6zkj0bmNsyYmT1U1BTW+qqmhvc0Jkr+ikElOQ93Pn7zIO
-cXtxdERdzdzXY5cfL3CCaoJDgXOsKPQfYrCi5Zl6sLZVBkIc6Q2fErSIjTp45+NY
-AjiOxfUT0MOFtA0/HzYvVp3gTNPGEWM3dF1hwzCqJ32odbw/3TiFCEeC1B82p1sR
-sgoFZ6Vbfy9fMhB5S7BBtbqF09Yq/PMM3drOvWIxMF4aOY55ilrtKVwmnckiB0mE
-CPOColUUyiWIwwvp82InYsX5ekfS4x1mX1iz8zQEuTF5QHdKiUfd4A33ZMf0Ve6p
-y9SaMmos99uVQMzWlwj7nVACXjb9Ee6MY/ePRl7Z2gBxEYV41SGFRg8LNkQ//fYk
-o2vJ4Bp4aOh/O3ZQNv1eqEDmf/Su5lYCzURyQ2srcRRdwpteDPX+NHYn2d07knHN
-NQvOJn6EkcsDbgp0vSr6mFDv2GZWkTOAd8jZyrcErrLHAxRNm0Va+CEIKLhswf1G
-Y2kFkPL1otI8OSDvdJSjZ2GjRSwXhM2Mf3PzfAkCAwEAAaMjMCEwDgYDVR0PAQH/
-BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4ICAQDBxOHKnF9z
-PZWPNKDRmBPtmnU2IHh6JJ9HzqGALJJbBU0MUSD/aLBBkYeS0YSHgYZ1hXLsfuRU
-lm/czV41hU1FTDqS2fFpcAAGH+6/rwyfrz+GYr2K4b/ijCwOMbMrDWO54zqZT3KU
-GFBpkrh4fNyKdgUNJsy0Q0it3gOGSUmLvEQUzqxPFVz7h/pF/Cecr0/kpjbpsxna
-XQkhtDyKDIQfPCq8Ci1vox5WvBbBkdzDtyCm+KSb6VC3pCX6LV5NkS7YM7mtscTi
-QdYfLbKX05kUVG2R9SShJn5BSXzGk9M5FR5koGY0lMHwmJqaOqazXjqa1jR7UNDK
-UyExHIXSqJ+nCf4bChEsaC1uwu3Gr7PfP41Zb2U3Raf8UmFnbz6Hx0sS4zBvyJ5w
-Ntemve4M1mB7++oLZ4PkuwK82SkQ8YK0z+lGJQRjg/HP3fVETV8TlIPJAvg7bRnH
-sMrLb/V+K6iY+08kQ2rpU02itRjKnU/DLoha4KVjafY8eIcIR2lpwrYjx+KYpkcF
-AMEC7MnuzhyUfDL++GO6XGwRnx2E54MnKtkrECObMSzwuLysPmjhrEUH6YR7zGib
-KmN6vQkA4s5053R+Tu0k1JGaw90SfvcW4bxGcFjU4Kg0KqlY1y8tnt+ZiHmK0naA
-KauB3KY1NiL+Ng5DCzNdkwDkWH78ZguI2w==
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5442/ca.crt b/contrib/docker-integration/generated_certs.d/localregistry:5442/ca.crt
deleted file mode 100644
index 8c9b1bca..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5442/ca.crt
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIE9TCCAt+gAwIBAgIQMsdPWoLAso/tIOvLk8R/sDALBgkqhkiG9w0BAQswJjER
-MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw
-NTQwMVoXDTE4MDUxMDIwNTQwMVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV
-BAMTCFF1aWNrVExTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1YeX
-GTvXPKlWA2lMbCvIGB9JYld/otf8aqs6euVJK1f09ngj5b6VoVlI8o1ScVcHKlKx
-BGfPMThnM7fiEmsfDSPuCIlGmTqR0t4t9dHRnLBGbZmR8JdAs7LKpP+PFYu0JTIT
-wFcjXIs+45cIF2HpsYY6zkj0bmNsyYmT1U1BTW+qqmhvc0Jkr+ikElOQ93Pn7zIO
-cXtxdERdzdzXY5cfL3CCaoJDgXOsKPQfYrCi5Zl6sLZVBkIc6Q2fErSIjTp45+NY
-AjiOxfUT0MOFtA0/HzYvVp3gTNPGEWM3dF1hwzCqJ32odbw/3TiFCEeC1B82p1sR
-sgoFZ6Vbfy9fMhB5S7BBtbqF09Yq/PMM3drOvWIxMF4aOY55ilrtKVwmnckiB0mE
-CPOColUUyiWIwwvp82InYsX5ekfS4x1mX1iz8zQEuTF5QHdKiUfd4A33ZMf0Ve6p
-y9SaMmos99uVQMzWlwj7nVACXjb9Ee6MY/ePRl7Z2gBxEYV41SGFRg8LNkQ//fYk
-o2vJ4Bp4aOh/O3ZQNv1eqEDmf/Su5lYCzURyQ2srcRRdwpteDPX+NHYn2d07knHN
-NQvOJn6EkcsDbgp0vSr6mFDv2GZWkTOAd8jZyrcErrLHAxRNm0Va+CEIKLhswf1G
-Y2kFkPL1otI8OSDvdJSjZ2GjRSwXhM2Mf3PzfAkCAwEAAaMjMCEwDgYDVR0PAQH/
-BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4ICAQDBxOHKnF9z
-PZWPNKDRmBPtmnU2IHh6JJ9HzqGALJJbBU0MUSD/aLBBkYeS0YSHgYZ1hXLsfuRU
-lm/czV41hU1FTDqS2fFpcAAGH+6/rwyfrz+GYr2K4b/ijCwOMbMrDWO54zqZT3KU
-GFBpkrh4fNyKdgUNJsy0Q0it3gOGSUmLvEQUzqxPFVz7h/pF/Cecr0/kpjbpsxna
-XQkhtDyKDIQfPCq8Ci1vox5WvBbBkdzDtyCm+KSb6VC3pCX6LV5NkS7YM7mtscTi
-QdYfLbKX05kUVG2R9SShJn5BSXzGk9M5FR5koGY0lMHwmJqaOqazXjqa1jR7UNDK
-UyExHIXSqJ+nCf4bChEsaC1uwu3Gr7PfP41Zb2U3Raf8UmFnbz6Hx0sS4zBvyJ5w
-Ntemve4M1mB7++oLZ4PkuwK82SkQ8YK0z+lGJQRjg/HP3fVETV8TlIPJAvg7bRnH
-sMrLb/V+K6iY+08kQ2rpU02itRjKnU/DLoha4KVjafY8eIcIR2lpwrYjx+KYpkcF
-AMEC7MnuzhyUfDL++GO6XGwRnx2E54MnKtkrECObMSzwuLysPmjhrEUH6YR7zGib
-KmN6vQkA4s5053R+Tu0k1JGaw90SfvcW4bxGcFjU4Kg0KqlY1y8tnt+ZiHmK0naA
-KauB3KY1NiL+Ng5DCzNdkwDkWH78ZguI2w==
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5442/client.cert b/contrib/docker-integration/generated_certs.d/localregistry:5442/client.cert
deleted file mode 100644
index a239939d..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5442/client.cert
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIE9TCCAt+gAwIBAgIRAKbgxG1zgQI81ISaHxqLfpcwCwYJKoZIhvcNAQELMCYx
-ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNTA1MjYy
-MDU0MjJaFw0xODA1MTAyMDU0MjJaMBMxETAPBgNVBAoTCFF1aWNrVExTMIICIjAN
-BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/
-oARHbx59G+GOeGkrwG6ZWSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8M
-WpLxp5U9LyYkv0AiSPfT2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/
-MgJbdTylEq1UcZSLMuky+RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7II
-hGlhziLVTKV9W1RP8Aop8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4
-nFwmuhOo8gvw/HhzYcxyMHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCai
-gwUNzfe4/dHeCk/r3pteWOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru
-5QqKMrbSlOcd6yHT6NM1ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/
-Vlp5N+WRjDpsBscR8kt2Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoO
-nhRqhl2PSphcWdimk8Bwf5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3j
-NLQ8EmHWaZlJSeW4BiDYsXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeB
-twZJXIXR6Jc8hgsCAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgCgMBMGA1UdJQQMMAoG
-CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4ICAQCl0cTLbLIn
-XFuxreei+y6TlG2Z5XcxJ84mr8VLAaQMlJOLZV0O/suFBu9KqBuvPaHhGRnKE2uw
-Vxdj9qaDdvmvuzi4jYyUA/sQuqq1+wHwGTadOi9r0IsL8OxzsG16OlhuXzhoQVdw
-C9z1jad4HC7uihQ5yhl2ltAA+h5G0Sr1b9El2mx4p6BV+okmTvrqrmjshQb1GZwx
-jG6SJ/uvjGf7rn09ZyYafF9ZDTMNodNXjW8orqGlFdXZLPFJ9agUFfwWfqD2lrtm
-Fu+Ei0ZvKOtyzmh06eO2aGAHJCBTfcDM4tBKBKp0MOMoZkcQQDNpSyI12j6s1wtx
-/1dC8QDyfFpZFXTbKn3q+6MpR+u5zqVquYjwP5DqGTvX0e1sLSthv7LRiOi0qHv1
-bZ8JoWhRMNumui9mzwar5t20ExcWxGxizZY+t+OIj4kaAeRoKK6r6FrYBnTjM+iR
-+xtML5UHPOSmYfNcai0Wn4T7hwpgnCJ+K7qGYjFUCarsINppQEwkxHAvuX+asc38
-nA0wd7ByulkMJph0gP6j6LuJf28JODi6EQ7FcQItMeTuPrc+mpqJ4jP7vTTSJG7Q
-wvqXLMgFQFR+2PG0s10hbY/Y/nwZAROfAs7ADED+EcDPTl/+XjVyo/aYIeOb/07W
-SpS/cacZYUsSLgB4cWbxElcc/p7CW1PbOA==
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5442/client.key b/contrib/docker-integration/generated_certs.d/localregistry:5442/client.key
deleted file mode 100644
index acfc9a48..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5442/client.key
+++ /dev/null
@@ -1,51 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIJKQIBAAKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/oARHbx59G+GOeGkrwG6Z
-WSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8MWpLxp5U9LyYkv0AiSPfT
-2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/MgJbdTylEq1UcZSLMuky
-+RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7IIhGlhziLVTKV9W1RP8Aop
-8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4nFwmuhOo8gvw/HhzYcxy
-MHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCaigwUNzfe4/dHeCk/r3pte
-WOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru5QqKMrbSlOcd6yHT6NM1
-ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/Vlp5N+WRjDpsBscR8kt2
-Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoOnhRqhl2PSphcWdimk8Bw
-f5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3jNLQ8EmHWaZlJSeW4BiDY
-sXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeBtwZJXIXR6Jc8hgsCAwEA
-AQKCAgBJcL1iR5ROMtr0ZNIp4gciALfjQVV3gb48GR/e/9b/LWI0j3i0sOzeLN3h
-SLda1fjzOn1Td1ma0dZwmdMUOF+hvhPDYZfzkwWLLkThXgLt/At3rMYstGWa8pN2
-wVUSH7sri7IHmYedP3baQdrHP/9pUsGQc+m8ASTE3i+PFcKbPe5+818HTtRrhVgN
-X3oNmPKUNCmSom7ZcKer5P1+Ruum0NuDgomCdkoZgfhjeKeLrVjl/wXDSQL/AhWA
-02c4/sML7xx19nl8uf7z+Gj0ir1pvRouhRJTwnRc4KdWu+Yn7WLU8j2ZKf5St/as
-zjnpYVEdCp0KSHccgXtobUZDEG2NCHmM6gR2j3qgoUAYjHyqPYlph2r5C47q+p4c
-dDWkpwZwGiuYq9qpZj24X6BfppxExcX6AwOgFLZLp80IynwrMVxFsDd2J+KpKRQ1
-+ZtYPcULwInF9MNi/dv84pxGOmmOaIUyjN8Sw4eqANU4T5uvTjUj7Ou6KYyfmxgG
-y++vjpRN7tN1t1Hwde8SVWobvmhU+5SJVHV8INoJD7uciaevPo9pt833SQTtDXeY
-PVBhOKO7thAxdUiqlU/1nGTXnf1VO6wAjaVYoTnP4tJ97WuTptwd2F5znVWHFGVh
-lzJAzmFOuyCnRnInsf4n5EmWJnT7XF2CofQqAJ8NIddrU8GnQQKCAQEAyqWAiPMK
-I/dMzlS7oJGlhbKZ5R4buc+EoZqtW7/8/S+0L6IaQvpEUilD+aDQyaxXjoKiQQL+
-0UeeSmF/zU5BsOTpB8AuJUfYoUe0N+x7hO5eIcoCB/QWYX+iC3tCN4j1Iwt6VliV
-PBYEiLUYPngSIHob/nK8UtgxrWQ3Fik9XJtWhePHrvMvDBalgCKdnyhuucGxKUjc
-TtPcyMFdi0z4Kt/FAm+5u/v4ZkO909Ish0FrAqQ9t5ETfvTTTYKBmzny6/LSPTK9
-0XIsHltuC1xG4vGQsES/Ph++Yj3Vn011FqvFZeBUHbfcQuB4h5wcb+90d4GU1kux
-eabsHPIZKrlN4QKCAQEA2Fs8NAN5K9i7qbxZCJPi6DJV6XMznk6JVGb+qkkChCyq
-IOXb95+c9CIpe6w2d3res3zvML3zbdz2Lyp9G0ve6tSlOaSnHeyIxZ5SRB+yQrcF
-GXtsx370bOGjCi1/NH85kwKlMuROFJKleJQv8rKpIEo5aPSPV9Cc/VsUqBpvR+O0
-U1HMv57P4yJA/ddw6imHJBl3jTmWBpK4B+LBsCbdypxdVoO8t32Lb2BqDTaPJfYU
-RJUpjn/efLLoP6CWxYtqpUlY5tc7NJGAokl8Fo1mPn02klydvs09uiXE80Li2Hoc
-/meMH07Lbt2VTw6iGNRX6VpIHEUZGZeS6rbAvO4ZawKCAQEAjOtGVPXdyWEB0kHu
-MBzYY/7tMf0b/rymWNL9Vt5NiauQu8cYSBdNR21WzdLdHkFwqbOCLX9twA7zrnna
-q+SNnfuxaShlbptls9HvKyySQMCaSRj3DJzaq3ZcM2vFgmUFQxeKPV1geeY9xOta
-LqbExDzmFq2m9F1PPmqAPDL1bt6+7mCVzb1irB9be52WysUNKrPdBP6b5V1DHYAK
-EwK1WOs/TxBusqDn/gWBjjmLqYr+ZVndaTfDvPd3sWDdzBoiKZ40QUZ15Z5lu76M
-6e2DhfHCUjGcZBEjDaI+WYc9s0REAzJajEf9Lax3ZKZUyCpWbXx5CgSdKCHB8+cP
-RTyTQQKCAQEAsxx8r5a8hocLfQ43Kvm7HH0nUHeVoRXlbOFDLNf6ZE/RnCCOxOX3
-esiZTRAZmzo2CaOBJPnr/+SwTgW/woxCBGh8TEc6LnS2GdviwRD4c3CuoRTjzhgU
-49q8Ld3SdDRrBoBnIMWOuktY/4S2WRZ9GwU3l+L2lD1Y6gmwBSa1P2+Lxnpupagk
-9CVUZpEnokM05LbMmTa2M8Tc43Je5KSYcnaWctvmrIUbnN3VjhC/2y5oQwq1d4n2
-N4eo65vXlbzAUgtxtNEz62YVdsSdHNJ8dXkVZ3+S+/VPh75i2PxjbdFSFW7Futlx
-YtvAEs3LdgC8squSDQ1LJTutXfBjiUUX9wKCAQBiCMre86tLyJu6Qb6X1cRAwO7m
-4kyGzIUtijXko6mWxb4X/usVvzhSaNVYbHbMZXjX+J5vhBOul+RmQ3EY6nw0H2z8
-9D4z/rnQVqeb0uvIeUhBPni+s4fS4bA92M6Ie5bhiOSF2JjjJr38BFnTZARE7C+7
-ZII7z2c0eQz/wAAt9fWWroAB2mIm6wxq0LNij2NoE0iq6k2xJE1/k8qhXpsN0zAv
-bjG72Q7WryBeK/eIDK9e5wGlfLVDOx2Evlcaj70oJxuoRh57e8fCYy8huJQT+Wlx
-Qw4zhxiyzAMq8SEqFsm8dVO4Bu2FwzmmehA80ieSb+si7JZU92xGDT394Im2
------END RSA PRIVATE KEY-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5443/ca.crt b/contrib/docker-integration/generated_certs.d/localregistry:5443/ca.crt
deleted file mode 100644
index 8c9b1bca..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5443/ca.crt
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIE9TCCAt+gAwIBAgIQMsdPWoLAso/tIOvLk8R/sDALBgkqhkiG9w0BAQswJjER
-MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw
-NTQwMVoXDTE4MDUxMDIwNTQwMVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV
-BAMTCFF1aWNrVExTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1YeX
-GTvXPKlWA2lMbCvIGB9JYld/otf8aqs6euVJK1f09ngj5b6VoVlI8o1ScVcHKlKx
-BGfPMThnM7fiEmsfDSPuCIlGmTqR0t4t9dHRnLBGbZmR8JdAs7LKpP+PFYu0JTIT
-wFcjXIs+45cIF2HpsYY6zkj0bmNsyYmT1U1BTW+qqmhvc0Jkr+ikElOQ93Pn7zIO
-cXtxdERdzdzXY5cfL3CCaoJDgXOsKPQfYrCi5Zl6sLZVBkIc6Q2fErSIjTp45+NY
-AjiOxfUT0MOFtA0/HzYvVp3gTNPGEWM3dF1hwzCqJ32odbw/3TiFCEeC1B82p1sR
-sgoFZ6Vbfy9fMhB5S7BBtbqF09Yq/PMM3drOvWIxMF4aOY55ilrtKVwmnckiB0mE
-CPOColUUyiWIwwvp82InYsX5ekfS4x1mX1iz8zQEuTF5QHdKiUfd4A33ZMf0Ve6p
-y9SaMmos99uVQMzWlwj7nVACXjb9Ee6MY/ePRl7Z2gBxEYV41SGFRg8LNkQ//fYk
-o2vJ4Bp4aOh/O3ZQNv1eqEDmf/Su5lYCzURyQ2srcRRdwpteDPX+NHYn2d07knHN
-NQvOJn6EkcsDbgp0vSr6mFDv2GZWkTOAd8jZyrcErrLHAxRNm0Va+CEIKLhswf1G
-Y2kFkPL1otI8OSDvdJSjZ2GjRSwXhM2Mf3PzfAkCAwEAAaMjMCEwDgYDVR0PAQH/
-BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4ICAQDBxOHKnF9z
-PZWPNKDRmBPtmnU2IHh6JJ9HzqGALJJbBU0MUSD/aLBBkYeS0YSHgYZ1hXLsfuRU
-lm/czV41hU1FTDqS2fFpcAAGH+6/rwyfrz+GYr2K4b/ijCwOMbMrDWO54zqZT3KU
-GFBpkrh4fNyKdgUNJsy0Q0it3gOGSUmLvEQUzqxPFVz7h/pF/Cecr0/kpjbpsxna
-XQkhtDyKDIQfPCq8Ci1vox5WvBbBkdzDtyCm+KSb6VC3pCX6LV5NkS7YM7mtscTi
-QdYfLbKX05kUVG2R9SShJn5BSXzGk9M5FR5koGY0lMHwmJqaOqazXjqa1jR7UNDK
-UyExHIXSqJ+nCf4bChEsaC1uwu3Gr7PfP41Zb2U3Raf8UmFnbz6Hx0sS4zBvyJ5w
-Ntemve4M1mB7++oLZ4PkuwK82SkQ8YK0z+lGJQRjg/HP3fVETV8TlIPJAvg7bRnH
-sMrLb/V+K6iY+08kQ2rpU02itRjKnU/DLoha4KVjafY8eIcIR2lpwrYjx+KYpkcF
-AMEC7MnuzhyUfDL++GO6XGwRnx2E54MnKtkrECObMSzwuLysPmjhrEUH6YR7zGib
-KmN6vQkA4s5053R+Tu0k1JGaw90SfvcW4bxGcFjU4Kg0KqlY1y8tnt+ZiHmK0naA
-KauB3KY1NiL+Ng5DCzNdkwDkWH78ZguI2w==
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5443/client.cert b/contrib/docker-integration/generated_certs.d/localregistry:5443/client.cert
deleted file mode 100644
index ef030f74..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5443/client.cert
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIE9DCCAt6gAwIBAgIQb58oJ+9SvWUCcYWA+L1oiTALBgkqhkiG9w0BAQswJjER
-MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw
-NTUwMFoXDTE4MDUxMDIwNTUwMFowEzERMA8GA1UEChMIUXVpY2tUTFMwggIiMA0G
-CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDDmOL3EhBm4So3agPMmF0z1+/nPlrE
-xoG7x0HYPk5CP3PF3TNVk3ArBPkMzge0/895a4ZEb9j+LUQEjOZa/ZwuLmSjfJSt
-9xTXI1ldp8KasyzQZjC33/bUj7FGxGzgbHyJrGGBoH2W5HdswH4WzhCnGTslyiDo
-VN4hklJ7gr+Geq3TPf8Eji+1L71MOrUyoNp7BaQBQT/gKxK0nV+ZuSk6eaiu+om7
-slp3x4bc21o7eIMmNXggJP6p9fMDctnioKhAPcm+5ADiFYSjivLeUQ85VkMTpmdU
-yvq6ziK3Ls6erD+S3xLvcHYAaeu84qLd7qdPwkHMTQsDpO4vPMIwL8piMzZV+kwL
-Bq+5xk5//FwnQH0pSo2Nr4vRn+DITZc3GKyGUJQoOUgAdfGNskTt8GXa4IsHn5iw
-zr12vGaxb//GDm0RLHnh7NVbD8xxDHIJq+fJNFb7MdXa8v31PYebkWuaPhYt6HQC
-I/D81zwcJIOGfzNITS2ifM5tvMaUXireo4pLC2v2aSY6RrPq1owlB6jGFwGwZSAF
-O6rxSqWO1gLfhJLzqcw/NjWnO7nCZEs/iKgAa22K2CtTt3dDMTvSBYKdkRe/FYQC
-MCa7MFJSaH85pYRzoDN4IuVpvROrtuQmlI47oZzb64uCPoA4A8AN+k8iysqITsgK
-1m8ePPXhbu4YlwIDAQABozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYI
-KwYBBQUHAwIwDAYDVR0TAQH/BAIwADALBgkqhkiG9w0BAQsDggIBALSgrCdEQd3I
-vb/FNkNZkAwdjfBD6j7ZtPBwvjEiiyNTx9hOLBGvbey7kr0HtW0KkLWsdRmCc+3z
-ev9I5VjDOtpiqrvuAA1wRBaL3UzGyj/eFjPJpvkfJi8zjkIZ2y18QG3yJ6Eqy6dD
-0aIQAHl9hkXMOVrf364gf0p7EoOGtSlfQ56yIGDPTFKKiy+Al0S42p17lhI4coz9
-zGXE1/SiNeZgdsk4zHDqhzzBp8foZuSL1sGcIXHkG8RtqZ1WvCyIPYRyIjIKZcXd
-JCEM//EbgDzQ7VE/jm+hIlYfPjM7fmUzsfii+bIrp/0HGEU3HN++LsA6eQOwWPa/
-PrxKPP36EVXb72QK8C3lmz6y+CHhuuAm0C1b1qmYVEs4eRE21S8eB2l0KUlfOecf
-xZ1LWp1agKt6fGqRgcsR3/qO27l8W7hlbFNPeOTgr6NQQkEMRW5OxbnZ58ULXqr3
-gWh8Na3D4+3j53035UBBQUMmeeFfWCvtr5n0+6BTAi62Cwwu9QQQBM/2f9/9K+B7
-cW0xPYtczm+VwJL6/rDtNN9xPWitxab1dkZp2XcHG3VWtYvE2R2EtEoKvvCLPggx
-zcafsZfcD1wlvtQF7YjykGJnMa0SB0GBl9SQtvGc8PkP39yXHqXZhIoo3fp4qm9v
-RfbdpOr8p/Ks34ZqQPukFwpM1s/6aicF
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5443/client.key b/contrib/docker-integration/generated_certs.d/localregistry:5443/client.key
deleted file mode 100644
index 5aee3ea5..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5443/client.key
+++ /dev/null
@@ -1,51 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIJKQIBAAKCAgEAw5ji9xIQZuEqN2oDzJhdM9fv5z5axMaBu8dB2D5OQj9zxd0z
-VZNwKwT5DM4HtP/PeWuGRG/Y/i1EBIzmWv2cLi5ko3yUrfcU1yNZXafCmrMs0GYw
-t9/21I+xRsRs4Gx8iaxhgaB9luR3bMB+Fs4Qpxk7Jcog6FTeIZJSe4K/hnqt0z3/
-BI4vtS+9TDq1MqDaewWkAUE/4CsStJ1fmbkpOnmorvqJu7Jad8eG3NtaO3iDJjV4
-ICT+qfXzA3LZ4qCoQD3JvuQA4hWEo4ry3lEPOVZDE6ZnVMr6us4ity7Onqw/kt8S
-73B2AGnrvOKi3e6nT8JBzE0LA6TuLzzCMC/KYjM2VfpMCwavucZOf/xcJ0B9KUqN
-ja+L0Z/gyE2XNxishlCUKDlIAHXxjbJE7fBl2uCLB5+YsM69drxmsW//xg5tESx5
-4ezVWw/McQxyCavnyTRW+zHV2vL99T2Hm5Frmj4WLeh0AiPw/Nc8HCSDhn8zSE0t
-onzObbzGlF4q3qOKSwtr9mkmOkaz6taMJQeoxhcBsGUgBTuq8UqljtYC34SS86nM
-PzY1pzu5wmRLP4ioAGttitgrU7d3QzE70gWCnZEXvxWEAjAmuzBSUmh/OaWEc6Az
-eCLlab0Tq7bkJpSOO6Gc2+uLgj6AOAPADfpPIsrKiE7ICtZvHjz14W7uGJcCAwEA
-AQKCAgBmIvmxpp8l+cH/ub5OIenZXpMJn4fqZPXtxjjd4HshIN0ln0JlF15lOG2M
-gDGKFGKUts8gAX/ACocQETtgnDnn65XlwPIqfXFGflD2FNoLyjBGinY6LhtIF9is
-aXmpHz1Q7tDjzZiHKLor8cBlzCjp+MToEMpqR5bO1Qd5M2cro/gM7Lyz9kN3S3x/
-x9BCpbgwsVtYxGfEePmFkwAO159tx4WMCYvOlW2kSm5j+a7+iwmA9D7MGkVZHvNN
-A7Y/H0F8ekdVBN5pMG9Yrv/vk0ht2lugcS5YGr4eufFq0mhWdv+jhBTxLzqPMMBG
-m9oMJcj8XyXYtwpfVsqBpCqK2wnEnv4Kf0rZzBU706nI2mjPXx3dL+5qo8uQJKNp
-mxoS7vmHV5RIJgtdvyzGFHjdfu1leowhV+Jy9jWzMw4wlnmlxsfDECf5RoSf2XGt
-SMGJb0dbJKae+W4MfNUFsgAWMZk3h3KF8AHHe44OpDbQeoh3JLnkWSG0oS3CR0ch
-68TzCy0SZZEZ9IS+I6o5WVpwWfReCQ5NjaKipWcpiJvxg+Dc3GG3QcVXVz2gGrJh
-g9v0v6eyeOJ32QGvvP7THFBjpWeeHlXT8Yz6hFcPrvErEZ029TEmhg8aLWBGfsR5
-F1bazdbqvOSEB9vBAAaddNnEDG9Rl8EmC4WdsnVgYUw1J7gfQQKCAQEA9DKjD9eN
-CrUl/2YfSm2WaFhYci74XcHDVeAXN2SbOyKbMIqk3aOFQNRAsLRnwPkdiLtuqeDK
-BafrfLTCORHfFdYKnUzmuekESNLckN9VyLztgqOqNAv3LD6GmSHBaJEnUyniLxOL
-k0wMEBIsEQw7Fb4blM2REYJ3ZzMFmgpRGnIX8KcxhW9XgSrnqMLO0w6mVxjo7xzd
-813nCcNrGhySM/EzKYtTNHy2JZmMH5QFHaIj67KklO7VeEZX5U+TKveBEt4rmHqs
-Ndqf/djSs8vu1xse82pVRxMXX2mhDLmwjUjPgWYxUL92jTiyJhE7GxpVB/yHgF1J
-Ecb47MDahoNKkQKCAQEAzQzvCOA77IQpGO117GcMqcjzwEUhTytojFBT+s5mHfzk
-dYr5TyN86LQ7/GktNoJ5oRvD9UGRSul1OGneivqtWj6mv6/Zvfzacx8NXY4MYFs1
-nEr3Gr7orVFIzD2x7nMPG2G6+J6hZ1rhpnZ9Hprf5G41sHIJxHJ9wTYSUAmFh8bv
-FiJqF90bSq/E5hgjphtX6wZWeZYspzc/5+IrJ/I0nqoxV3rjUy234zlzKJAV10sV
-5oVgxLLQsUujkHp/Da+ij2aTv1Za8y3PTJ7MAHYgdpa5l/4U9MnPUEB2REBCI1NN
-TqxnViwD0xgsvxfb79UzruLJIYOCKvfOumlutXM0pwKCAQBUIMXQhWAP2kyW6mXJ
-TGvO0vDVlZz3H/Pdt/AHo19fRhLU7E7UFKupo/YNanl8H9au7nO3jrvKqwkT02o+
-IwwKB81sV7v9PGu/cvWN64MwPvZMVXojqCOlWH0icGCjV66Glh1YPpGNU1ushbYs
-wVvxp6b04sUhlSLxqMA7S2aZh8j7nX4QDEXHODLLDyIV0Cw6QViuV/GXEDiyQmK5
-gjJUNrp7i4ZExNozpeyCTIpepSde4hKVRJrCbumFFJ8M5GvRRj0asNh3TTRlTbd5
-Pb6w2KUXEwECFW+t7UQQkEBkzDrAx6YhvXRoPqoRN0p3keDNeZBtBrZPq47CccZX
-JRAhAoIBAQCJ/DgnGu54XP9i/PksGrSU1Nvi+SJPKoDyW2QIFTj22SXMS7c1oEYA
-OrlbRFPeqLK8zfhyZKsnZC8zxVqy37okTqDbwbSfezZt3emamWqOtRJAmNnsr6fY
-aii4+JNySQ9Td9LgV69549iRso7EN6iPCfMrR7J29izWBlMQdTfchOyDUqleYbZp
-7hpsVLY4o5HoYJ10uLBX3oAsxTARc5YhZ5pIqjOr18o1KIXsN/napXaZaAwUkdiK
-VsI9CZHSXezg30Bxs+UEXEFx6DKT5Oo3o3pFZAAqMlxGPvrXNv7K0tXlKXNos7nn
-Jg+GkMG6hRiAibCb0umXjKcbHrQXeu1lAoIBAQDcRBsy6cSQXMSu6+PyroH+2DvR
-4fuiMfSrUNjv+9K8gtjYLetrZUvRuFT3A/KzDrALKyTFTGJk3YlpTaC5iNKd+QK8
-6RBJRYeYV16fpX/2ak/8MgfB2gdW//pE0eFjw+qakcUXmo957m7dUXbOrw1VNAET
-LVBeVnml+2FUj0sTXGwHKcINPR78PWZ8i1ka9DptnKLBNeA+x+OMkCA88RJJegSk
-/rgDDV52z4fJHQJh9TZ7zLAXxGgDFYLGPTrdeT+D/owuPXF+SCP4pMtVnwbQgH9G
-dfQ9bb7G14vAeu/kEkFdGFEreS09BOTRbTfzFjFdDvSV4JyOXe9i/sUDxf9R
------END RSA PRIVATE KEY-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5444/ca.crt b/contrib/docker-integration/generated_certs.d/localregistry:5444/ca.crt
deleted file mode 100644
index 8c9b1bca..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5444/ca.crt
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIE9TCCAt+gAwIBAgIQMsdPWoLAso/tIOvLk8R/sDALBgkqhkiG9w0BAQswJjER
-MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw
-NTQwMVoXDTE4MDUxMDIwNTQwMVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV
-BAMTCFF1aWNrVExTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1YeX
-GTvXPKlWA2lMbCvIGB9JYld/otf8aqs6euVJK1f09ngj5b6VoVlI8o1ScVcHKlKx
-BGfPMThnM7fiEmsfDSPuCIlGmTqR0t4t9dHRnLBGbZmR8JdAs7LKpP+PFYu0JTIT
-wFcjXIs+45cIF2HpsYY6zkj0bmNsyYmT1U1BTW+qqmhvc0Jkr+ikElOQ93Pn7zIO
-cXtxdERdzdzXY5cfL3CCaoJDgXOsKPQfYrCi5Zl6sLZVBkIc6Q2fErSIjTp45+NY
-AjiOxfUT0MOFtA0/HzYvVp3gTNPGEWM3dF1hwzCqJ32odbw/3TiFCEeC1B82p1sR
-sgoFZ6Vbfy9fMhB5S7BBtbqF09Yq/PMM3drOvWIxMF4aOY55ilrtKVwmnckiB0mE
-CPOColUUyiWIwwvp82InYsX5ekfS4x1mX1iz8zQEuTF5QHdKiUfd4A33ZMf0Ve6p
-y9SaMmos99uVQMzWlwj7nVACXjb9Ee6MY/ePRl7Z2gBxEYV41SGFRg8LNkQ//fYk
-o2vJ4Bp4aOh/O3ZQNv1eqEDmf/Su5lYCzURyQ2srcRRdwpteDPX+NHYn2d07knHN
-NQvOJn6EkcsDbgp0vSr6mFDv2GZWkTOAd8jZyrcErrLHAxRNm0Va+CEIKLhswf1G
-Y2kFkPL1otI8OSDvdJSjZ2GjRSwXhM2Mf3PzfAkCAwEAAaMjMCEwDgYDVR0PAQH/
-BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4ICAQDBxOHKnF9z
-PZWPNKDRmBPtmnU2IHh6JJ9HzqGALJJbBU0MUSD/aLBBkYeS0YSHgYZ1hXLsfuRU
-lm/czV41hU1FTDqS2fFpcAAGH+6/rwyfrz+GYr2K4b/ijCwOMbMrDWO54zqZT3KU
-GFBpkrh4fNyKdgUNJsy0Q0it3gOGSUmLvEQUzqxPFVz7h/pF/Cecr0/kpjbpsxna
-XQkhtDyKDIQfPCq8Ci1vox5WvBbBkdzDtyCm+KSb6VC3pCX6LV5NkS7YM7mtscTi
-QdYfLbKX05kUVG2R9SShJn5BSXzGk9M5FR5koGY0lMHwmJqaOqazXjqa1jR7UNDK
-UyExHIXSqJ+nCf4bChEsaC1uwu3Gr7PfP41Zb2U3Raf8UmFnbz6Hx0sS4zBvyJ5w
-Ntemve4M1mB7++oLZ4PkuwK82SkQ8YK0z+lGJQRjg/HP3fVETV8TlIPJAvg7bRnH
-sMrLb/V+K6iY+08kQ2rpU02itRjKnU/DLoha4KVjafY8eIcIR2lpwrYjx+KYpkcF
-AMEC7MnuzhyUfDL++GO6XGwRnx2E54MnKtkrECObMSzwuLysPmjhrEUH6YR7zGib
-KmN6vQkA4s5053R+Tu0k1JGaw90SfvcW4bxGcFjU4Kg0KqlY1y8tnt+ZiHmK0naA
-KauB3KY1NiL+Ng5DCzNdkwDkWH78ZguI2w==
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5444/client.cert b/contrib/docker-integration/generated_certs.d/localregistry:5444/client.cert
deleted file mode 100644
index a239939d..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5444/client.cert
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIE9TCCAt+gAwIBAgIRAKbgxG1zgQI81ISaHxqLfpcwCwYJKoZIhvcNAQELMCYx
-ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNTA1MjYy
-MDU0MjJaFw0xODA1MTAyMDU0MjJaMBMxETAPBgNVBAoTCFF1aWNrVExTMIICIjAN
-BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/
-oARHbx59G+GOeGkrwG6ZWSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8M
-WpLxp5U9LyYkv0AiSPfT2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/
-MgJbdTylEq1UcZSLMuky+RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7II
-hGlhziLVTKV9W1RP8Aop8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4
-nFwmuhOo8gvw/HhzYcxyMHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCai
-gwUNzfe4/dHeCk/r3pteWOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru
-5QqKMrbSlOcd6yHT6NM1ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/
-Vlp5N+WRjDpsBscR8kt2Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoO
-nhRqhl2PSphcWdimk8Bwf5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3j
-NLQ8EmHWaZlJSeW4BiDYsXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeB
-twZJXIXR6Jc8hgsCAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgCgMBMGA1UdJQQMMAoG
-CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4ICAQCl0cTLbLIn
-XFuxreei+y6TlG2Z5XcxJ84mr8VLAaQMlJOLZV0O/suFBu9KqBuvPaHhGRnKE2uw
-Vxdj9qaDdvmvuzi4jYyUA/sQuqq1+wHwGTadOi9r0IsL8OxzsG16OlhuXzhoQVdw
-C9z1jad4HC7uihQ5yhl2ltAA+h5G0Sr1b9El2mx4p6BV+okmTvrqrmjshQb1GZwx
-jG6SJ/uvjGf7rn09ZyYafF9ZDTMNodNXjW8orqGlFdXZLPFJ9agUFfwWfqD2lrtm
-Fu+Ei0ZvKOtyzmh06eO2aGAHJCBTfcDM4tBKBKp0MOMoZkcQQDNpSyI12j6s1wtx
-/1dC8QDyfFpZFXTbKn3q+6MpR+u5zqVquYjwP5DqGTvX0e1sLSthv7LRiOi0qHv1
-bZ8JoWhRMNumui9mzwar5t20ExcWxGxizZY+t+OIj4kaAeRoKK6r6FrYBnTjM+iR
-+xtML5UHPOSmYfNcai0Wn4T7hwpgnCJ+K7qGYjFUCarsINppQEwkxHAvuX+asc38
-nA0wd7ByulkMJph0gP6j6LuJf28JODi6EQ7FcQItMeTuPrc+mpqJ4jP7vTTSJG7Q
-wvqXLMgFQFR+2PG0s10hbY/Y/nwZAROfAs7ADED+EcDPTl/+XjVyo/aYIeOb/07W
-SpS/cacZYUsSLgB4cWbxElcc/p7CW1PbOA==
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5444/client.key b/contrib/docker-integration/generated_certs.d/localregistry:5444/client.key
deleted file mode 100644
index acfc9a48..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5444/client.key
+++ /dev/null
@@ -1,51 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIJKQIBAAKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/oARHbx59G+GOeGkrwG6Z
-WSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8MWpLxp5U9LyYkv0AiSPfT
-2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/MgJbdTylEq1UcZSLMuky
-+RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7IIhGlhziLVTKV9W1RP8Aop
-8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4nFwmuhOo8gvw/HhzYcxy
-MHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCaigwUNzfe4/dHeCk/r3pte
-WOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru5QqKMrbSlOcd6yHT6NM1
-ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/Vlp5N+WRjDpsBscR8kt2
-Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoOnhRqhl2PSphcWdimk8Bw
-f5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3jNLQ8EmHWaZlJSeW4BiDY
-sXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeBtwZJXIXR6Jc8hgsCAwEA
-AQKCAgBJcL1iR5ROMtr0ZNIp4gciALfjQVV3gb48GR/e/9b/LWI0j3i0sOzeLN3h
-SLda1fjzOn1Td1ma0dZwmdMUOF+hvhPDYZfzkwWLLkThXgLt/At3rMYstGWa8pN2
-wVUSH7sri7IHmYedP3baQdrHP/9pUsGQc+m8ASTE3i+PFcKbPe5+818HTtRrhVgN
-X3oNmPKUNCmSom7ZcKer5P1+Ruum0NuDgomCdkoZgfhjeKeLrVjl/wXDSQL/AhWA
-02c4/sML7xx19nl8uf7z+Gj0ir1pvRouhRJTwnRc4KdWu+Yn7WLU8j2ZKf5St/as
-zjnpYVEdCp0KSHccgXtobUZDEG2NCHmM6gR2j3qgoUAYjHyqPYlph2r5C47q+p4c
-dDWkpwZwGiuYq9qpZj24X6BfppxExcX6AwOgFLZLp80IynwrMVxFsDd2J+KpKRQ1
-+ZtYPcULwInF9MNi/dv84pxGOmmOaIUyjN8Sw4eqANU4T5uvTjUj7Ou6KYyfmxgG
-y++vjpRN7tN1t1Hwde8SVWobvmhU+5SJVHV8INoJD7uciaevPo9pt833SQTtDXeY
-PVBhOKO7thAxdUiqlU/1nGTXnf1VO6wAjaVYoTnP4tJ97WuTptwd2F5znVWHFGVh
-lzJAzmFOuyCnRnInsf4n5EmWJnT7XF2CofQqAJ8NIddrU8GnQQKCAQEAyqWAiPMK
-I/dMzlS7oJGlhbKZ5R4buc+EoZqtW7/8/S+0L6IaQvpEUilD+aDQyaxXjoKiQQL+
-0UeeSmF/zU5BsOTpB8AuJUfYoUe0N+x7hO5eIcoCB/QWYX+iC3tCN4j1Iwt6VliV
-PBYEiLUYPngSIHob/nK8UtgxrWQ3Fik9XJtWhePHrvMvDBalgCKdnyhuucGxKUjc
-TtPcyMFdi0z4Kt/FAm+5u/v4ZkO909Ish0FrAqQ9t5ETfvTTTYKBmzny6/LSPTK9
-0XIsHltuC1xG4vGQsES/Ph++Yj3Vn011FqvFZeBUHbfcQuB4h5wcb+90d4GU1kux
-eabsHPIZKrlN4QKCAQEA2Fs8NAN5K9i7qbxZCJPi6DJV6XMznk6JVGb+qkkChCyq
-IOXb95+c9CIpe6w2d3res3zvML3zbdz2Lyp9G0ve6tSlOaSnHeyIxZ5SRB+yQrcF
-GXtsx370bOGjCi1/NH85kwKlMuROFJKleJQv8rKpIEo5aPSPV9Cc/VsUqBpvR+O0
-U1HMv57P4yJA/ddw6imHJBl3jTmWBpK4B+LBsCbdypxdVoO8t32Lb2BqDTaPJfYU
-RJUpjn/efLLoP6CWxYtqpUlY5tc7NJGAokl8Fo1mPn02klydvs09uiXE80Li2Hoc
-/meMH07Lbt2VTw6iGNRX6VpIHEUZGZeS6rbAvO4ZawKCAQEAjOtGVPXdyWEB0kHu
-MBzYY/7tMf0b/rymWNL9Vt5NiauQu8cYSBdNR21WzdLdHkFwqbOCLX9twA7zrnna
-q+SNnfuxaShlbptls9HvKyySQMCaSRj3DJzaq3ZcM2vFgmUFQxeKPV1geeY9xOta
-LqbExDzmFq2m9F1PPmqAPDL1bt6+7mCVzb1irB9be52WysUNKrPdBP6b5V1DHYAK
-EwK1WOs/TxBusqDn/gWBjjmLqYr+ZVndaTfDvPd3sWDdzBoiKZ40QUZ15Z5lu76M
-6e2DhfHCUjGcZBEjDaI+WYc9s0REAzJajEf9Lax3ZKZUyCpWbXx5CgSdKCHB8+cP
-RTyTQQKCAQEAsxx8r5a8hocLfQ43Kvm7HH0nUHeVoRXlbOFDLNf6ZE/RnCCOxOX3
-esiZTRAZmzo2CaOBJPnr/+SwTgW/woxCBGh8TEc6LnS2GdviwRD4c3CuoRTjzhgU
-49q8Ld3SdDRrBoBnIMWOuktY/4S2WRZ9GwU3l+L2lD1Y6gmwBSa1P2+Lxnpupagk
-9CVUZpEnokM05LbMmTa2M8Tc43Je5KSYcnaWctvmrIUbnN3VjhC/2y5oQwq1d4n2
-N4eo65vXlbzAUgtxtNEz62YVdsSdHNJ8dXkVZ3+S+/VPh75i2PxjbdFSFW7Futlx
-YtvAEs3LdgC8squSDQ1LJTutXfBjiUUX9wKCAQBiCMre86tLyJu6Qb6X1cRAwO7m
-4kyGzIUtijXko6mWxb4X/usVvzhSaNVYbHbMZXjX+J5vhBOul+RmQ3EY6nw0H2z8
-9D4z/rnQVqeb0uvIeUhBPni+s4fS4bA92M6Ie5bhiOSF2JjjJr38BFnTZARE7C+7
-ZII7z2c0eQz/wAAt9fWWroAB2mIm6wxq0LNij2NoE0iq6k2xJE1/k8qhXpsN0zAv
-bjG72Q7WryBeK/eIDK9e5wGlfLVDOx2Evlcaj70oJxuoRh57e8fCYy8huJQT+Wlx
-Qw4zhxiyzAMq8SEqFsm8dVO4Bu2FwzmmehA80ieSb+si7JZU92xGDT394Im2
------END RSA PRIVATE KEY-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5447/client.cert b/contrib/docker-integration/generated_certs.d/localregistry:5447/client.cert
deleted file mode 100644
index a239939d..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5447/client.cert
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIE9TCCAt+gAwIBAgIRAKbgxG1zgQI81ISaHxqLfpcwCwYJKoZIhvcNAQELMCYx
-ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNTA1MjYy
-MDU0MjJaFw0xODA1MTAyMDU0MjJaMBMxETAPBgNVBAoTCFF1aWNrVExTMIICIjAN
-BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/
-oARHbx59G+GOeGkrwG6ZWSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8M
-WpLxp5U9LyYkv0AiSPfT2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/
-MgJbdTylEq1UcZSLMuky+RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7II
-hGlhziLVTKV9W1RP8Aop8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4
-nFwmuhOo8gvw/HhzYcxyMHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCai
-gwUNzfe4/dHeCk/r3pteWOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru
-5QqKMrbSlOcd6yHT6NM1ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/
-Vlp5N+WRjDpsBscR8kt2Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoO
-nhRqhl2PSphcWdimk8Bwf5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3j
-NLQ8EmHWaZlJSeW4BiDYsXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeB
-twZJXIXR6Jc8hgsCAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgCgMBMGA1UdJQQMMAoG
-CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4ICAQCl0cTLbLIn
-XFuxreei+y6TlG2Z5XcxJ84mr8VLAaQMlJOLZV0O/suFBu9KqBuvPaHhGRnKE2uw
-Vxdj9qaDdvmvuzi4jYyUA/sQuqq1+wHwGTadOi9r0IsL8OxzsG16OlhuXzhoQVdw
-C9z1jad4HC7uihQ5yhl2ltAA+h5G0Sr1b9El2mx4p6BV+okmTvrqrmjshQb1GZwx
-jG6SJ/uvjGf7rn09ZyYafF9ZDTMNodNXjW8orqGlFdXZLPFJ9agUFfwWfqD2lrtm
-Fu+Ei0ZvKOtyzmh06eO2aGAHJCBTfcDM4tBKBKp0MOMoZkcQQDNpSyI12j6s1wtx
-/1dC8QDyfFpZFXTbKn3q+6MpR+u5zqVquYjwP5DqGTvX0e1sLSthv7LRiOi0qHv1
-bZ8JoWhRMNumui9mzwar5t20ExcWxGxizZY+t+OIj4kaAeRoKK6r6FrYBnTjM+iR
-+xtML5UHPOSmYfNcai0Wn4T7hwpgnCJ+K7qGYjFUCarsINppQEwkxHAvuX+asc38
-nA0wd7ByulkMJph0gP6j6LuJf28JODi6EQ7FcQItMeTuPrc+mpqJ4jP7vTTSJG7Q
-wvqXLMgFQFR+2PG0s10hbY/Y/nwZAROfAs7ADED+EcDPTl/+XjVyo/aYIeOb/07W
-SpS/cacZYUsSLgB4cWbxElcc/p7CW1PbOA==
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5447/client.key b/contrib/docker-integration/generated_certs.d/localregistry:5447/client.key
deleted file mode 100644
index acfc9a48..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5447/client.key
+++ /dev/null
@@ -1,51 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIJKQIBAAKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/oARHbx59G+GOeGkrwG6Z
-WSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8MWpLxp5U9LyYkv0AiSPfT
-2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/MgJbdTylEq1UcZSLMuky
-+RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7IIhGlhziLVTKV9W1RP8Aop
-8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4nFwmuhOo8gvw/HhzYcxy
-MHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCaigwUNzfe4/dHeCk/r3pte
-WOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru5QqKMrbSlOcd6yHT6NM1
-ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/Vlp5N+WRjDpsBscR8kt2
-Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoOnhRqhl2PSphcWdimk8Bw
-f5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3jNLQ8EmHWaZlJSeW4BiDY
-sXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeBtwZJXIXR6Jc8hgsCAwEA
-AQKCAgBJcL1iR5ROMtr0ZNIp4gciALfjQVV3gb48GR/e/9b/LWI0j3i0sOzeLN3h
-SLda1fjzOn1Td1ma0dZwmdMUOF+hvhPDYZfzkwWLLkThXgLt/At3rMYstGWa8pN2
-wVUSH7sri7IHmYedP3baQdrHP/9pUsGQc+m8ASTE3i+PFcKbPe5+818HTtRrhVgN
-X3oNmPKUNCmSom7ZcKer5P1+Ruum0NuDgomCdkoZgfhjeKeLrVjl/wXDSQL/AhWA
-02c4/sML7xx19nl8uf7z+Gj0ir1pvRouhRJTwnRc4KdWu+Yn7WLU8j2ZKf5St/as
-zjnpYVEdCp0KSHccgXtobUZDEG2NCHmM6gR2j3qgoUAYjHyqPYlph2r5C47q+p4c
-dDWkpwZwGiuYq9qpZj24X6BfppxExcX6AwOgFLZLp80IynwrMVxFsDd2J+KpKRQ1
-+ZtYPcULwInF9MNi/dv84pxGOmmOaIUyjN8Sw4eqANU4T5uvTjUj7Ou6KYyfmxgG
-y++vjpRN7tN1t1Hwde8SVWobvmhU+5SJVHV8INoJD7uciaevPo9pt833SQTtDXeY
-PVBhOKO7thAxdUiqlU/1nGTXnf1VO6wAjaVYoTnP4tJ97WuTptwd2F5znVWHFGVh
-lzJAzmFOuyCnRnInsf4n5EmWJnT7XF2CofQqAJ8NIddrU8GnQQKCAQEAyqWAiPMK
-I/dMzlS7oJGlhbKZ5R4buc+EoZqtW7/8/S+0L6IaQvpEUilD+aDQyaxXjoKiQQL+
-0UeeSmF/zU5BsOTpB8AuJUfYoUe0N+x7hO5eIcoCB/QWYX+iC3tCN4j1Iwt6VliV
-PBYEiLUYPngSIHob/nK8UtgxrWQ3Fik9XJtWhePHrvMvDBalgCKdnyhuucGxKUjc
-TtPcyMFdi0z4Kt/FAm+5u/v4ZkO909Ish0FrAqQ9t5ETfvTTTYKBmzny6/LSPTK9
-0XIsHltuC1xG4vGQsES/Ph++Yj3Vn011FqvFZeBUHbfcQuB4h5wcb+90d4GU1kux
-eabsHPIZKrlN4QKCAQEA2Fs8NAN5K9i7qbxZCJPi6DJV6XMznk6JVGb+qkkChCyq
-IOXb95+c9CIpe6w2d3res3zvML3zbdz2Lyp9G0ve6tSlOaSnHeyIxZ5SRB+yQrcF
-GXtsx370bOGjCi1/NH85kwKlMuROFJKleJQv8rKpIEo5aPSPV9Cc/VsUqBpvR+O0
-U1HMv57P4yJA/ddw6imHJBl3jTmWBpK4B+LBsCbdypxdVoO8t32Lb2BqDTaPJfYU
-RJUpjn/efLLoP6CWxYtqpUlY5tc7NJGAokl8Fo1mPn02klydvs09uiXE80Li2Hoc
-/meMH07Lbt2VTw6iGNRX6VpIHEUZGZeS6rbAvO4ZawKCAQEAjOtGVPXdyWEB0kHu
-MBzYY/7tMf0b/rymWNL9Vt5NiauQu8cYSBdNR21WzdLdHkFwqbOCLX9twA7zrnna
-q+SNnfuxaShlbptls9HvKyySQMCaSRj3DJzaq3ZcM2vFgmUFQxeKPV1geeY9xOta
-LqbExDzmFq2m9F1PPmqAPDL1bt6+7mCVzb1irB9be52WysUNKrPdBP6b5V1DHYAK
-EwK1WOs/TxBusqDn/gWBjjmLqYr+ZVndaTfDvPd3sWDdzBoiKZ40QUZ15Z5lu76M
-6e2DhfHCUjGcZBEjDaI+WYc9s0REAzJajEf9Lax3ZKZUyCpWbXx5CgSdKCHB8+cP
-RTyTQQKCAQEAsxx8r5a8hocLfQ43Kvm7HH0nUHeVoRXlbOFDLNf6ZE/RnCCOxOX3
-esiZTRAZmzo2CaOBJPnr/+SwTgW/woxCBGh8TEc6LnS2GdviwRD4c3CuoRTjzhgU
-49q8Ld3SdDRrBoBnIMWOuktY/4S2WRZ9GwU3l+L2lD1Y6gmwBSa1P2+Lxnpupagk
-9CVUZpEnokM05LbMmTa2M8Tc43Je5KSYcnaWctvmrIUbnN3VjhC/2y5oQwq1d4n2
-N4eo65vXlbzAUgtxtNEz62YVdsSdHNJ8dXkVZ3+S+/VPh75i2PxjbdFSFW7Futlx
-YtvAEs3LdgC8squSDQ1LJTutXfBjiUUX9wKCAQBiCMre86tLyJu6Qb6X1cRAwO7m
-4kyGzIUtijXko6mWxb4X/usVvzhSaNVYbHbMZXjX+J5vhBOul+RmQ3EY6nw0H2z8
-9D4z/rnQVqeb0uvIeUhBPni+s4fS4bA92M6Ie5bhiOSF2JjjJr38BFnTZARE7C+7
-ZII7z2c0eQz/wAAt9fWWroAB2mIm6wxq0LNij2NoE0iq6k2xJE1/k8qhXpsN0zAv
-bjG72Q7WryBeK/eIDK9e5wGlfLVDOx2Evlcaj70oJxuoRh57e8fCYy8huJQT+Wlx
-Qw4zhxiyzAMq8SEqFsm8dVO4Bu2FwzmmehA80ieSb+si7JZU92xGDT394Im2
------END RSA PRIVATE KEY-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5448/ca.crt b/contrib/docker-integration/generated_certs.d/localregistry:5448/ca.crt
deleted file mode 100644
index 8c9b1bca..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5448/ca.crt
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIE9TCCAt+gAwIBAgIQMsdPWoLAso/tIOvLk8R/sDALBgkqhkiG9w0BAQswJjER
-MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw
-NTQwMVoXDTE4MDUxMDIwNTQwMVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV
-BAMTCFF1aWNrVExTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1YeX
-GTvXPKlWA2lMbCvIGB9JYld/otf8aqs6euVJK1f09ngj5b6VoVlI8o1ScVcHKlKx
-BGfPMThnM7fiEmsfDSPuCIlGmTqR0t4t9dHRnLBGbZmR8JdAs7LKpP+PFYu0JTIT
-wFcjXIs+45cIF2HpsYY6zkj0bmNsyYmT1U1BTW+qqmhvc0Jkr+ikElOQ93Pn7zIO
-cXtxdERdzdzXY5cfL3CCaoJDgXOsKPQfYrCi5Zl6sLZVBkIc6Q2fErSIjTp45+NY
-AjiOxfUT0MOFtA0/HzYvVp3gTNPGEWM3dF1hwzCqJ32odbw/3TiFCEeC1B82p1sR
-sgoFZ6Vbfy9fMhB5S7BBtbqF09Yq/PMM3drOvWIxMF4aOY55ilrtKVwmnckiB0mE
-CPOColUUyiWIwwvp82InYsX5ekfS4x1mX1iz8zQEuTF5QHdKiUfd4A33ZMf0Ve6p
-y9SaMmos99uVQMzWlwj7nVACXjb9Ee6MY/ePRl7Z2gBxEYV41SGFRg8LNkQ//fYk
-o2vJ4Bp4aOh/O3ZQNv1eqEDmf/Su5lYCzURyQ2srcRRdwpteDPX+NHYn2d07knHN
-NQvOJn6EkcsDbgp0vSr6mFDv2GZWkTOAd8jZyrcErrLHAxRNm0Va+CEIKLhswf1G
-Y2kFkPL1otI8OSDvdJSjZ2GjRSwXhM2Mf3PzfAkCAwEAAaMjMCEwDgYDVR0PAQH/
-BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4ICAQDBxOHKnF9z
-PZWPNKDRmBPtmnU2IHh6JJ9HzqGALJJbBU0MUSD/aLBBkYeS0YSHgYZ1hXLsfuRU
-lm/czV41hU1FTDqS2fFpcAAGH+6/rwyfrz+GYr2K4b/ijCwOMbMrDWO54zqZT3KU
-GFBpkrh4fNyKdgUNJsy0Q0it3gOGSUmLvEQUzqxPFVz7h/pF/Cecr0/kpjbpsxna
-XQkhtDyKDIQfPCq8Ci1vox5WvBbBkdzDtyCm+KSb6VC3pCX6LV5NkS7YM7mtscTi
-QdYfLbKX05kUVG2R9SShJn5BSXzGk9M5FR5koGY0lMHwmJqaOqazXjqa1jR7UNDK
-UyExHIXSqJ+nCf4bChEsaC1uwu3Gr7PfP41Zb2U3Raf8UmFnbz6Hx0sS4zBvyJ5w
-Ntemve4M1mB7++oLZ4PkuwK82SkQ8YK0z+lGJQRjg/HP3fVETV8TlIPJAvg7bRnH
-sMrLb/V+K6iY+08kQ2rpU02itRjKnU/DLoha4KVjafY8eIcIR2lpwrYjx+KYpkcF
-AMEC7MnuzhyUfDL++GO6XGwRnx2E54MnKtkrECObMSzwuLysPmjhrEUH6YR7zGib
-KmN6vQkA4s5053R+Tu0k1JGaw90SfvcW4bxGcFjU4Kg0KqlY1y8tnt+ZiHmK0naA
-KauB3KY1NiL+Ng5DCzNdkwDkWH78ZguI2w==
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5553/ca.crt b/contrib/docker-integration/generated_certs.d/localregistry:5553/ca.crt
deleted file mode 100644
index 0b585b3f..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5553/ca.crt
+++ /dev/null
@@ -1,18 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIC9TCCAd+gAwIBAgIQNS9SaFSFBN7Zvwjalrf2DDALBgkqhkiG9w0BAQswJjER
-MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw
-NDIzMFoXDTE5MDExMjAwNDIzMFowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV
-BAMTCFF1aWNrVExTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu/Pf
-fQ7VUTSXs12PRyrLDVDz7kPDbGNTt0vF7FYDmTTGOU3i62xZNOGuxBezAiVSV5A3
-lopwsv4OH7DRtSaPn+XCt1JDALna2WrjT0MshypMd5o2c3jmGUfAKf5gjizgIoEl
-d4e5aqEBuOQP+QCEde+8p8N1buQW+zMy9srM2O/7BFMIaQ07CWLlj3hIiF+L5rKD
-L6dWtKT7INRmRwpuZZnThEWnBSNgayrWek6G0i3y8QYTfVA1SwA+H3grJxy5NrLp
-GYXSmu2509mu0QAHhx05t1rJhwhFz/4sG7j8AggYeDXEqfQ/VIb/bvnW9bD+vrQ2
-ZnICvxnzNMYBx23BkQIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAKQwDwYDVR0TAQH/
-BAUwAwEB/zALBgkqhkiG9w0BAQsDggEBALvTi6E44Fltu83dFLVEj0kLtusI/TTH
-Tw6upoB5pRG+7A75w0Ii8bvvd2tNpBOg+L+80xyIFqaNkXhLKTN4lgtd7WiCuyb/
-w1BEuF/+RjCXhu6wQ/63ab46d6ctaQ1zjxlU2rQLQXQFALI8ntyn/TELc01HYkr2
-x3NHlbnBNlgI2CKXPeUBzvBylTCcdYGwoa+2ZPdIsFjle2aCIBoZ+WNZlIbFwgLh
-XCHwcbviC+thjqOneJpJZmRW9AxQ638ki6iGItdrJewCN/1dcL2KKjxnC5VHbpne
-SOjEPNXihY08Brl8myhFNtRRKZ55MJIYzDtVQSkCaT91Q3XX9tSZadY=
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5554/ca.crt b/contrib/docker-integration/generated_certs.d/localregistry:5554/ca.crt
deleted file mode 100644
index 0b585b3f..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5554/ca.crt
+++ /dev/null
@@ -1,18 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIC9TCCAd+gAwIBAgIQNS9SaFSFBN7Zvwjalrf2DDALBgkqhkiG9w0BAQswJjER
-MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw
-NDIzMFoXDTE5MDExMjAwNDIzMFowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV
-BAMTCFF1aWNrVExTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu/Pf
-fQ7VUTSXs12PRyrLDVDz7kPDbGNTt0vF7FYDmTTGOU3i62xZNOGuxBezAiVSV5A3
-lopwsv4OH7DRtSaPn+XCt1JDALna2WrjT0MshypMd5o2c3jmGUfAKf5gjizgIoEl
-d4e5aqEBuOQP+QCEde+8p8N1buQW+zMy9srM2O/7BFMIaQ07CWLlj3hIiF+L5rKD
-L6dWtKT7INRmRwpuZZnThEWnBSNgayrWek6G0i3y8QYTfVA1SwA+H3grJxy5NrLp
-GYXSmu2509mu0QAHhx05t1rJhwhFz/4sG7j8AggYeDXEqfQ/VIb/bvnW9bD+vrQ2
-ZnICvxnzNMYBx23BkQIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAKQwDwYDVR0TAQH/
-BAUwAwEB/zALBgkqhkiG9w0BAQsDggEBALvTi6E44Fltu83dFLVEj0kLtusI/TTH
-Tw6upoB5pRG+7A75w0Ii8bvvd2tNpBOg+L+80xyIFqaNkXhLKTN4lgtd7WiCuyb/
-w1BEuF/+RjCXhu6wQ/63ab46d6ctaQ1zjxlU2rQLQXQFALI8ntyn/TELc01HYkr2
-x3NHlbnBNlgI2CKXPeUBzvBylTCcdYGwoa+2ZPdIsFjle2aCIBoZ+WNZlIbFwgLh
-XCHwcbviC+thjqOneJpJZmRW9AxQ638ki6iGItdrJewCN/1dcL2KKjxnC5VHbpne
-SOjEPNXihY08Brl8myhFNtRRKZ55MJIYzDtVQSkCaT91Q3XX9tSZadY=
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5555/ca.crt b/contrib/docker-integration/generated_certs.d/localregistry:5555/ca.crt
deleted file mode 100644
index 0b585b3f..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5555/ca.crt
+++ /dev/null
@@ -1,18 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIC9TCCAd+gAwIBAgIQNS9SaFSFBN7Zvwjalrf2DDALBgkqhkiG9w0BAQswJjER
-MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw
-NDIzMFoXDTE5MDExMjAwNDIzMFowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV
-BAMTCFF1aWNrVExTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu/Pf
-fQ7VUTSXs12PRyrLDVDz7kPDbGNTt0vF7FYDmTTGOU3i62xZNOGuxBezAiVSV5A3
-lopwsv4OH7DRtSaPn+XCt1JDALna2WrjT0MshypMd5o2c3jmGUfAKf5gjizgIoEl
-d4e5aqEBuOQP+QCEde+8p8N1buQW+zMy9srM2O/7BFMIaQ07CWLlj3hIiF+L5rKD
-L6dWtKT7INRmRwpuZZnThEWnBSNgayrWek6G0i3y8QYTfVA1SwA+H3grJxy5NrLp
-GYXSmu2509mu0QAHhx05t1rJhwhFz/4sG7j8AggYeDXEqfQ/VIb/bvnW9bD+vrQ2
-ZnICvxnzNMYBx23BkQIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAKQwDwYDVR0TAQH/
-BAUwAwEB/zALBgkqhkiG9w0BAQsDggEBALvTi6E44Fltu83dFLVEj0kLtusI/TTH
-Tw6upoB5pRG+7A75w0Ii8bvvd2tNpBOg+L+80xyIFqaNkXhLKTN4lgtd7WiCuyb/
-w1BEuF/+RjCXhu6wQ/63ab46d6ctaQ1zjxlU2rQLQXQFALI8ntyn/TELc01HYkr2
-x3NHlbnBNlgI2CKXPeUBzvBylTCcdYGwoa+2ZPdIsFjle2aCIBoZ+WNZlIbFwgLh
-XCHwcbviC+thjqOneJpJZmRW9AxQ638ki6iGItdrJewCN/1dcL2KKjxnC5VHbpne
-SOjEPNXihY08Brl8myhFNtRRKZ55MJIYzDtVQSkCaT91Q3XX9tSZadY=
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5557/ca.crt b/contrib/docker-integration/generated_certs.d/localregistry:5557/ca.crt
deleted file mode 100644
index 0b585b3f..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5557/ca.crt
+++ /dev/null
@@ -1,18 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIC9TCCAd+gAwIBAgIQNS9SaFSFBN7Zvwjalrf2DDALBgkqhkiG9w0BAQswJjER
-MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw
-NDIzMFoXDTE5MDExMjAwNDIzMFowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV
-BAMTCFF1aWNrVExTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu/Pf
-fQ7VUTSXs12PRyrLDVDz7kPDbGNTt0vF7FYDmTTGOU3i62xZNOGuxBezAiVSV5A3
-lopwsv4OH7DRtSaPn+XCt1JDALna2WrjT0MshypMd5o2c3jmGUfAKf5gjizgIoEl
-d4e5aqEBuOQP+QCEde+8p8N1buQW+zMy9srM2O/7BFMIaQ07CWLlj3hIiF+L5rKD
-L6dWtKT7INRmRwpuZZnThEWnBSNgayrWek6G0i3y8QYTfVA1SwA+H3grJxy5NrLp
-GYXSmu2509mu0QAHhx05t1rJhwhFz/4sG7j8AggYeDXEqfQ/VIb/bvnW9bD+vrQ2
-ZnICvxnzNMYBx23BkQIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAKQwDwYDVR0TAQH/
-BAUwAwEB/zALBgkqhkiG9w0BAQsDggEBALvTi6E44Fltu83dFLVEj0kLtusI/TTH
-Tw6upoB5pRG+7A75w0Ii8bvvd2tNpBOg+L+80xyIFqaNkXhLKTN4lgtd7WiCuyb/
-w1BEuF/+RjCXhu6wQ/63ab46d6ctaQ1zjxlU2rQLQXQFALI8ntyn/TELc01HYkr2
-x3NHlbnBNlgI2CKXPeUBzvBylTCcdYGwoa+2ZPdIsFjle2aCIBoZ+WNZlIbFwgLh
-XCHwcbviC+thjqOneJpJZmRW9AxQ638ki6iGItdrJewCN/1dcL2KKjxnC5VHbpne
-SOjEPNXihY08Brl8myhFNtRRKZ55MJIYzDtVQSkCaT91Q3XX9tSZadY=
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:5558/ca.crt b/contrib/docker-integration/generated_certs.d/localregistry:5558/ca.crt
deleted file mode 100644
index 0b585b3f..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:5558/ca.crt
+++ /dev/null
@@ -1,18 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIC9TCCAd+gAwIBAgIQNS9SaFSFBN7Zvwjalrf2DDALBgkqhkiG9w0BAQswJjER
-MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw
-NDIzMFoXDTE5MDExMjAwNDIzMFowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV
-BAMTCFF1aWNrVExTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu/Pf
-fQ7VUTSXs12PRyrLDVDz7kPDbGNTt0vF7FYDmTTGOU3i62xZNOGuxBezAiVSV5A3
-lopwsv4OH7DRtSaPn+XCt1JDALna2WrjT0MshypMd5o2c3jmGUfAKf5gjizgIoEl
-d4e5aqEBuOQP+QCEde+8p8N1buQW+zMy9srM2O/7BFMIaQ07CWLlj3hIiF+L5rKD
-L6dWtKT7INRmRwpuZZnThEWnBSNgayrWek6G0i3y8QYTfVA1SwA+H3grJxy5NrLp
-GYXSmu2509mu0QAHhx05t1rJhwhFz/4sG7j8AggYeDXEqfQ/VIb/bvnW9bD+vrQ2
-ZnICvxnzNMYBx23BkQIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAKQwDwYDVR0TAQH/
-BAUwAwEB/zALBgkqhkiG9w0BAQsDggEBALvTi6E44Fltu83dFLVEj0kLtusI/TTH
-Tw6upoB5pRG+7A75w0Ii8bvvd2tNpBOg+L+80xyIFqaNkXhLKTN4lgtd7WiCuyb/
-w1BEuF/+RjCXhu6wQ/63ab46d6ctaQ1zjxlU2rQLQXQFALI8ntyn/TELc01HYkr2
-x3NHlbnBNlgI2CKXPeUBzvBylTCcdYGwoa+2ZPdIsFjle2aCIBoZ+WNZlIbFwgLh
-XCHwcbviC+thjqOneJpJZmRW9AxQ638ki6iGItdrJewCN/1dcL2KKjxnC5VHbpne
-SOjEPNXihY08Brl8myhFNtRRKZ55MJIYzDtVQSkCaT91Q3XX9tSZadY=
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/generated_certs.d/localregistry:6666/ca.crt b/contrib/docker-integration/generated_certs.d/localregistry:6666/ca.crt
deleted file mode 100644
index 93325986..00000000
--- a/contrib/docker-integration/generated_certs.d/localregistry:6666/ca.crt
+++ /dev/null
@@ -1,18 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIC9TCCAd+gAwIBAgIQKQTGjKpSVBW78ef0fOcxRTALBgkqhkiG9w0BAQswJjER
-MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDgyMDIz
-MjE0OVoXDTE4MDgwNDIzMjE0OVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV
-BAMTCFF1aWNrVExTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwoPM
-xiDZK6Fwy5r3waRkfJHhyZZH828Jyj+nz5UVkMyOM/xN6MgJ2w911hTj1wSXG2n3
-AohF3gTFNrDYh4j2qRZnixDrOM5GBm2/KJbyfBIYkrR45yLfjidO7MRnhaPZ5Fov
-l+RKwNBXP4Q2mUe7q9FM457Rm8hAcqXP04AJT20m1QSYQivDgxsDxuAQte3VEy1E
-0j0CwUKoFHT6MHOnDPEZbc4r1+ba34WBM1Sc5KXyV2JlbtU07J4hACYWVsD7vQCl
-VFlZNE4E35ahMDZ+ODLal9PAT8ARLdAtjvRWrT+h8qZ4Yfwt/sGF1K4CAkTP3H5p
-uMkJG56zmqIEYeHMuwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAKQwDwYDVR0TAQH/
-BAUwAwEB/zALBgkqhkiG9w0BAQsDggEBALpieTckiPEeb3rTAWl7waDPLPOIhS5C
-XHVfOm7cPmRn3pT2VuR8y74U7a1uOkYMgJnCWb8lSXhbqC89FatLnAhKqo4I9oD8
-2BXgYeIpP5/OWBcjzmsMnowrvokc0chAmAR0Ux6AP0eX9amC0lGMuTHdw3+is0AR
-lhoImOUPXvgMH7W2RimpSgnX0R5wKqfuGwMfbGa0xhWBZ+wekAKcU8b+pIHDyX0c
-EQcir2y8/lVjECXSAIlV6iasPQ3hm1sd0xq1hx4yrwYFvQb7yEhOXbK24HLr/20D
-RRmEOuS8gg2XtUFv66z/VOw/nUleIg9GAuWDJaiu9frmIma4/tIY4qY=
------END CERTIFICATE-----
diff --git a/contrib/docker-integration/golem.conf b/contrib/docker-integration/golem.conf
index 43deb730..99c8d600 100644
--- a/contrib/docker-integration/golem.conf
+++ b/contrib/docker-integration/golem.conf
@@ -2,6 +2,8 @@
dind=true
images=[ "nginx:1.9", "dmcgowan/token-server:simple", "dmcgowan/token-server:oauth", "dmcgowan/malevolent:0.1.0" ]
+ [[suite.pretest]]
+ command="sh ./install_certs.sh /etc/generated_certs.d"
[[suite.testrunner]]
command="bats -t ."
format="tap"
diff --git a/contrib/docker-integration/install_certs.sh b/contrib/docker-integration/install_certs.sh
index 15bf1fcb..828b7896 100644
--- a/contrib/docker-integration/install_certs.sh
+++ b/contrib/docker-integration/install_certs.sh
@@ -2,14 +2,7 @@
set -e
hostname="localregistry"
-authhostname="auth.$hostname"
-
-set_etc_hosts() {
- hostentry=$1
- IP=$(ifconfig eth0|grep "inet addr:"| cut -d: -f2 | awk '{ print $1}')
- echo "$IP $hostentry" >> /etc/hosts
- # TODO: Check if record already exists in /etc/hosts
-}
+installdir="$1"
install_ca() {
mkdir -p $1/$hostname:$2
@@ -32,18 +25,19 @@ install_test_certs() {
install_ca $1 5448
}
-set_etc_hosts $hostname
-set_etc_hosts $authhostname
+install_ca_file() {
+ mkdir -p $2
+ cp $1 $2/ca.crt
+}
-install_test_certs /etc/docker/certs.d
-install_test_certs /root/.docker/tls
+install_test_certs $installdir
# Malevolent server
-mkdir -p /etc/docker/certs.d/$hostname:6666
-cp ./malevolent-certs/ca.pem /etc/docker/certs.d/$hostname:6666/ca.crt
+install_ca_file ./malevolent-certs/ca.pem $installdir/$hostname:6666
# Token server
-install_file ./tokenserver/certs/ca.pem $1 5555
-install_file ./tokenserver/certs/ca.pem $1 5554
-install_file ./tokenserver/certs/ca.pem $1 5557
-install_file ./tokenserver/certs/ca.pem $1 5558
+install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5554
+install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5555
+install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5557
+install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5558
+
From 52eecb556c0096f0f331b52a0ee818951af576c8 Mon Sep 17 00:00:00 2001
From: Gleb Schukin
Date: Wed, 11 May 2016 15:06:08 +0300
Subject: [PATCH 046/546] Handle rare case when ceph doesn't return
Last-Modified for HEAD requests in case DLO manifest doesn't have any
segments
Signed-off-by: Gleb Schukin
---
vendor/github.com/ncw/swift/swift.go | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/ncw/swift/swift.go
index 290f73a7..e5be4ebc 100644
--- a/vendor/github.com/ncw/swift/swift.go
+++ b/vendor/github.com/ncw/swift/swift.go
@@ -1737,9 +1737,11 @@ func (c *Connection) Object(container string, objectName string) (info Object, h
return
}
}
- info.ServerLastModified = resp.Header.Get("Last-Modified")
- if info.LastModified, err = time.Parse(http.TimeFormat, info.ServerLastModified); err != nil {
- return
+ if resp.Header.Get("Last-Modified") != "" {
+ info.ServerLastModified = resp.Header.Get("Last-Modified")
+ if info.LastModified, err = time.Parse(http.TimeFormat, info.ServerLastModified); err != nil {
+ return
+ }
}
info.Hash = resp.Header.Get("Etag")
return
From 5d7600e3cada81e26b01aa0e2713c3aef517bcdc Mon Sep 17 00:00:00 2001
From: Gleb Schukin
Date: Wed, 11 May 2016 15:06:08 +0300
Subject: [PATCH 047/546] Handle rare case when ceph doesn't return
Last-Modified for HEAD requests in case DLO manifest doesn't have any
segments
Signed-off-by: Gleb Schukin
---
vendor/github.com/ncw/swift/swift.go | 3 +++
1 file changed, 3 insertions(+)
diff --git a/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/ncw/swift/swift.go
index e5be4ebc..1ad8fcc1 100644
--- a/vendor/github.com/ncw/swift/swift.go
+++ b/vendor/github.com/ncw/swift/swift.go
@@ -1737,6 +1737,9 @@ func (c *Connection) Object(container string, objectName string) (info Object, h
return
}
}
+ //HACK
+ //Currently ceph doestn't return Last-Modified header for DLO manifest without any segments
+ //Currently it affects all versions of ceph http://tracker.ceph.com/issues/15812
if resp.Header.Get("Last-Modified") != "" {
info.ServerLastModified = resp.Header.Get("Last-Modified")
if info.LastModified, err = time.Parse(http.TimeFormat, info.ServerLastModified); err != nil {
From 8854eed7ab7ea9e45638e24c67e03ea9adea5d11 Mon Sep 17 00:00:00 2001
From: cyli
Date: Wed, 11 May 2016 13:41:58 -0700
Subject: [PATCH 048/546] Update the auth spec scope grammar to reflect the
fact that hostnames are optionally supported.
Signed-off-by: cyli
---
docs/spec/auth/scope.md | 18 +++++++++++++-----
1 file changed, 13 insertions(+), 5 deletions(-)
diff --git a/docs/spec/auth/scope.md b/docs/spec/auth/scope.md
index 76e6f8cf..e626b6e1 100644
--- a/docs/spec/auth/scope.md
+++ b/docs/spec/auth/scope.md
@@ -56,7 +56,7 @@ it.
The resource name represent the name which identifies a resource for a resource
provider. A resource is identified by this name and the provided resource type.
An example of a resource name would be the name component of an image tag, such
-as "samalba/myapp".
+as "samalba/myapp" or "hostname/samalba/myapp".
### Resource Actions
@@ -83,16 +83,24 @@ scopes.
scope := resourcescope [ ' ' resourcescope ]*
resourcescope := resourcetype ":" resourcename ":" action [ ',' action ]*
resourcetype := /[a-z]*/
-resourcename := component [ '/' component ]*
+resourcename := [ hostname '/' ] component [ '/' component ]*
+hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
+hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
+port-number := /[0-9]+/
action := /[a-z]*/
component := alpha-numeric [ separator alpha-numeric ]*
alpha-numeric := /[a-z0-9]+/
separator := /[_.]|__|[-]*/
```
Full reference grammar is defined
-(here)[https://godoc.org/github.com/docker/distribution/reference]. Currently
-the scope name grammar is a subset of the reference grammar without support
-for hostnames.
+[here](https://godoc.org/github.com/docker/distribution/reference). Currently
+the scope name grammar is a subset of the reference grammar.
+
+> **NOTE:** that the `resourcename` may contain one `:` due to a possible port
+> number in the hostname component of the `resourcename`, so a naive
+> implementation that interprets the first three `:`-delimited tokens of a
+> `scope` to be the `resourcetype`, `resourcename`, and a list of `action`
+> would be insufficient.
## Resource Provider Use
From f18da7d3a3f5343b3742302fe59fcc8cbd696a42 Mon Sep 17 00:00:00 2001
From: Nikita Tarasov
Date: Mon, 16 May 2016 16:09:26 +0300
Subject: [PATCH 049/546] update docs
Signed-off-by: Nikita Tarasov
---
docs/storage-drivers/swift.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/storage-drivers/swift.md b/docs/storage-drivers/swift.md
index 28e87d4d..4e28847d 100644
--- a/docs/storage-drivers/swift.md
+++ b/docs/storage-drivers/swift.md
@@ -304,7 +304,7 @@ An implementation of the `storagedriver.StorageDriver` interface that uses [Open
- Optionally, specify the OpenStack Auth's version,for example 3 . The driver use 0 (autodetect) by default.
+ Optionally, specify the OpenStack Auth's version,for example 3 . By default the driver will autodetect the auth's version from the AuthURL.
|
From f97eca5ad68f3acca652f6376d5120edf6fae4ee Mon Sep 17 00:00:00 2001
From: Alexey Gladkov
Date: Wed, 18 May 2016 18:54:27 +0200
Subject: [PATCH 050/546] Add support for blobAccessController middleware
Signed-off-by: Michal Minar
Signed-off-by: Alexey Gladkov
---
blobs.go | 5 ++
registry/handlers/app.go | 2 +-
registry/middleware/registry/middleware.go | 14 ++++++
registry/storage/registry.go | 54 +++++++++++++++-------
4 files changed, 57 insertions(+), 18 deletions(-)
diff --git a/blobs.go b/blobs.go
index 1765e9f7..0d9b5c13 100644
--- a/blobs.go
+++ b/blobs.go
@@ -124,6 +124,11 @@ type BlobDescriptorService interface {
Clear(ctx context.Context, dgst digest.Digest) error
}
+// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
+type BlobDescriptorServiceFactory interface {
+ BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
+}
+
// ReadSeekCloser is the primary reader type for blob data, combining
// io.ReadSeeker with io.Closer.
type ReadSeekCloser interface {
diff --git a/registry/handlers/app.go b/registry/handlers/app.go
index 3c3e50d0..c65441c6 100644
--- a/registry/handlers/app.go
+++ b/registry/handlers/app.go
@@ -177,7 +177,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App {
app.httpHost = *u
}
- options := []storage.RegistryOption{}
+ options := registrymiddleware.GetRegistryOptions()
if app.isCache {
options = append(options, storage.DisableDigestResumption)
diff --git a/registry/middleware/registry/middleware.go b/registry/middleware/registry/middleware.go
index 7535c6db..3e6e5cc7 100644
--- a/registry/middleware/registry/middleware.go
+++ b/registry/middleware/registry/middleware.go
@@ -5,6 +5,7 @@ import (
"github.com/docker/distribution"
"github.com/docker/distribution/context"
+ "github.com/docker/distribution/registry/storage"
)
// InitFunc is the type of a RegistryMiddleware factory function and is
@@ -12,6 +13,7 @@ import (
type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error)
var middlewares map[string]InitFunc
+var registryoptions []storage.RegistryOption
// Register is used to register an InitFunc for
// a RegistryMiddleware backend with the given name.
@@ -38,3 +40,15 @@ func Get(ctx context.Context, name string, options map[string]interface{}, regis
return nil, fmt.Errorf("no registry middleware registered with name: %s", name)
}
+
+// RegisterOptions adds more options to RegistryOption list. Options get applied before
+// any other configuration-based options.
+func RegisterOptions(options ...storage.RegistryOption) error {
+ registryoptions = append(registryoptions, options...)
+ return nil
+}
+
+// GetRegistryOptions returns list of RegistryOption.
+func GetRegistryOptions() []storage.RegistryOption {
+ return registryoptions
+}
diff --git a/registry/storage/registry.go b/registry/storage/registry.go
index a1128b4a..3fe4ac68 100644
--- a/registry/storage/registry.go
+++ b/registry/storage/registry.go
@@ -12,14 +12,15 @@ import (
// registry is the top-level implementation of Registry for use in the storage
// package. All instances should descend from this object.
type registry struct {
- blobStore *blobStore
- blobServer *blobServer
- statter *blobStatter // global statter service.
- blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider
- deleteEnabled bool
- resumableDigestEnabled bool
- schema1SignaturesEnabled bool
- schema1SigningKey libtrust.PrivateKey
+ blobStore *blobStore
+ blobServer *blobServer
+ statter *blobStatter // global statter service.
+ blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider
+ deleteEnabled bool
+ resumableDigestEnabled bool
+ schema1SignaturesEnabled bool
+ schema1SigningKey libtrust.PrivateKey
+ blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory
}
// RegistryOption is the type used for functional options for NewRegistry.
@@ -64,6 +65,15 @@ func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption {
}
}
+// BlobDescriptorServiceFactory returns a functional option for NewRegistry. It sets the
+// factory to create BlobDescriptorServiceFactory middleware.
+func BlobDescriptorServiceFactory(factory distribution.BlobDescriptorServiceFactory) RegistryOption {
+ return func(registry *registry) error {
+ registry.blobDescriptorServiceFactory = factory
+ return nil
+ }
+}
+
// BlobDescriptorCacheProvider returns a functional option for
// NewRegistry. It creates a cached blob statter for use by the
// registry.
@@ -190,16 +200,22 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M
manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()}
+ var statter distribution.BlobDescriptorService = &linkedBlobStatter{
+ blobStore: repo.blobStore,
+ repository: repo,
+ linkPathFns: manifestLinkPathFns,
+ }
+
+ if repo.registry.blobDescriptorServiceFactory != nil {
+ statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter)
+ }
+
blobStore := &linkedBlobStore{
- ctx: ctx,
- blobStore: repo.blobStore,
- repository: repo,
- deleteEnabled: repo.registry.deleteEnabled,
- blobAccessController: &linkedBlobStatter{
- blobStore: repo.blobStore,
- repository: repo,
- linkPathFns: manifestLinkPathFns,
- },
+ ctx: ctx,
+ blobStore: repo.blobStore,
+ repository: repo,
+ deleteEnabled: repo.registry.deleteEnabled,
+ blobAccessController: statter,
// TODO(stevvooe): linkPath limits this blob store to only
// manifests. This instance cannot be used for blob checks.
@@ -258,6 +274,10 @@ func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore {
statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter)
}
+ if repo.registry.blobDescriptorServiceFactory != nil {
+ statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter)
+ }
+
return &linkedBlobStore{
registry: repo.registry,
blobStore: repo.blobStore,
From f0052b84341b588017a67d87d4edbb82c76705b3 Mon Sep 17 00:00:00 2001
From: John Starks
Date: Sat, 14 May 2016 14:49:08 -0700
Subject: [PATCH 051/546] Add support for layers from foreign sources
This will be used to support downloading Windows base layers from
Microsoft URLs.
Signed-off-by: John Starks
---
blobs.go | 3 +
docs/spec/manifest-v2-2.md | 8 ++
manifest/schema2/manifest.go | 5 +-
registry/proxy/proxytagservice_test.go | 7 +-
registry/storage/blob_test.go | 11 +-
registry/storage/cache/cachecheck/suite.go | 15 +--
registry/storage/schema2manifesthandler.go | 31 ++++-
.../storage/schema2manifesthandler_test.go | 117 ++++++++++++++++++
8 files changed, 180 insertions(+), 17 deletions(-)
create mode 100644 registry/storage/schema2manifesthandler_test.go
diff --git a/blobs.go b/blobs.go
index 1765e9f7..9f572bfa 100644
--- a/blobs.go
+++ b/blobs.go
@@ -69,6 +69,9 @@ type Descriptor struct {
// against against this digest.
Digest digest.Digest `json:"digest,omitempty"`
+ // URLs contains the source URLs of this content.
+ URLs []string `json:"urls,omitempty"`
+
// NOTE: Before adding a field here, please ensure that all
// other options have been exhausted. Much of the type relationships
// depend on the simplicity of this type.
diff --git a/docs/spec/manifest-v2-2.md b/docs/spec/manifest-v2-2.md
index 31631454..c5c90648 100644
--- a/docs/spec/manifest-v2-2.md
+++ b/docs/spec/manifest-v2-2.md
@@ -216,6 +216,14 @@ image. It's the direct replacement for the schema-1 manifest.
The digest of the content, as defined by the
[Registry V2 HTTP API Specificiation](https://docs.docker.com/registry/spec/api/#digest-parameter).
+ - **`urls`** *array*
+
+ For an ordinary layer, this is empty, and the layer contents can be
+ retrieved directly from the registry. For a layer with *`mediatype`* of
+ `application/vnd.docker.image.rootfs.foreign.diff.tar.gzip`, this
+ contains a non-empty list of URLs from which this object can be
+ downloaded.
+
## Example Image Manifest
*Example showing an image manifest:*
diff --git a/manifest/schema2/manifest.go b/manifest/schema2/manifest.go
index 8d378e99..355b5ad4 100644
--- a/manifest/schema2/manifest.go
+++ b/manifest/schema2/manifest.go
@@ -20,6 +20,10 @@ const (
// MediaTypeLayer is the mediaType used for layers referenced by the
// manifest.
MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
+
+ // MediaTypeForeignLayer is the mediaType used for layers that must be
+ // downloaded from foreign URLs.
+ MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
)
var (
@@ -63,7 +67,6 @@ type Manifest struct {
// References returnes the descriptors of this manifests references.
func (m Manifest) References() []distribution.Descriptor {
return m.Layers
-
}
// Target returns the target of this signed manifest.
diff --git a/registry/proxy/proxytagservice_test.go b/registry/proxy/proxytagservice_test.go
index a446645c..ce0fe78b 100644
--- a/registry/proxy/proxytagservice_test.go
+++ b/registry/proxy/proxytagservice_test.go
@@ -1,6 +1,7 @@
package proxy
import (
+ "reflect"
"sort"
"sync"
"testing"
@@ -92,7 +93,7 @@ func TestGet(t *testing.T) {
t.Fatalf("Expected 1 auth challenge call, got %#v", proxyTags.authChallenger)
}
- if d != remoteDesc {
+ if !reflect.DeepEqual(d, remoteDesc) {
t.Fatal("unable to get put tag")
}
@@ -101,7 +102,7 @@ func TestGet(t *testing.T) {
t.Fatal("remote tag not pulled into store")
}
- if local != remoteDesc {
+ if !reflect.DeepEqual(local, remoteDesc) {
t.Fatalf("unexpected descriptor pulled through")
}
@@ -121,7 +122,7 @@ func TestGet(t *testing.T) {
t.Fatalf("Expected 2 auth challenge calls, got %#v", proxyTags.authChallenger)
}
- if d != newRemoteDesc {
+ if !reflect.DeepEqual(d, newRemoteDesc) {
t.Fatal("unable to get put tag")
}
diff --git a/registry/storage/blob_test.go b/registry/storage/blob_test.go
index 7e1a7cd4..f7ae70f1 100644
--- a/registry/storage/blob_test.go
+++ b/registry/storage/blob_test.go
@@ -7,6 +7,8 @@ import (
"io"
"io/ioutil"
"os"
+ "path"
+ "reflect"
"testing"
"github.com/docker/distribution"
@@ -16,7 +18,6 @@ import (
"github.com/docker/distribution/registry/storage/cache/memory"
"github.com/docker/distribution/registry/storage/driver/inmemory"
"github.com/docker/distribution/testutil"
- "path"
)
// TestWriteSeek tests that the current file size can be
@@ -156,7 +157,7 @@ func TestSimpleBlobUpload(t *testing.T) {
t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs)
}
- if statDesc != desc {
+ if !reflect.DeepEqual(statDesc, desc) {
t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
}
@@ -410,7 +411,7 @@ func TestBlobMount(t *testing.T) {
t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs)
}
- if statDesc != desc {
+ if !reflect.DeepEqual(statDesc, desc) {
t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
}
@@ -436,7 +437,7 @@ func TestBlobMount(t *testing.T) {
t.Fatalf("unexpected error mounting layer: %v", err)
}
- if ebm.Descriptor != desc {
+ if !reflect.DeepEqual(ebm.Descriptor, desc) {
t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc)
}
@@ -446,7 +447,7 @@ func TestBlobMount(t *testing.T) {
t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs)
}
- if statDesc != desc {
+ if !reflect.DeepEqual(statDesc, desc) {
t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
}
diff --git a/registry/storage/cache/cachecheck/suite.go b/registry/storage/cache/cachecheck/suite.go
index 13e9c132..cba5addd 100644
--- a/registry/storage/cache/cachecheck/suite.go
+++ b/registry/storage/cache/cachecheck/suite.go
@@ -1,6 +1,7 @@
package cachecheck
import (
+ "reflect"
"testing"
"github.com/docker/distribution"
@@ -79,7 +80,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi
t.Fatalf("unexpected error statting fake2:abc: %v", err)
}
- if expected != desc {
+ if !reflect.DeepEqual(expected, desc) {
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
}
@@ -89,7 +90,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi
t.Fatalf("descriptor not returned for canonical key: %v", err)
}
- if expected != desc {
+ if !reflect.DeepEqual(expected, desc) {
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
}
@@ -99,7 +100,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi
t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc)
}
- if desc != expected {
+ if !reflect.DeepEqual(desc, expected) {
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
}
@@ -109,7 +110,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi
t.Fatalf("unexpected error checking glboal descriptor: %v", err)
}
- if desc != expected {
+ if !reflect.DeepEqual(desc, expected) {
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
}
@@ -126,7 +127,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi
t.Fatalf("unexpected error getting descriptor: %v", err)
}
- if desc != expected {
+ if !reflect.DeepEqual(desc, expected) {
t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected)
}
@@ -137,7 +138,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi
expected.MediaType = "application/octet-stream" // expect original mediatype in global
- if desc != expected {
+ if !reflect.DeepEqual(desc, expected) {
t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected)
}
}
@@ -163,7 +164,7 @@ func checkBlobDescriptorCacheClear(t *testing.T, ctx context.Context, provider c
t.Fatalf("unexpected error statting fake2:abc: %v", err)
}
- if expected != desc {
+ if !reflect.DeepEqual(expected, desc) {
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
}
diff --git a/registry/storage/schema2manifesthandler.go b/registry/storage/schema2manifesthandler.go
index 115786e2..6456efa4 100644
--- a/registry/storage/schema2manifesthandler.go
+++ b/registry/storage/schema2manifesthandler.go
@@ -1,15 +1,24 @@
package storage
import (
+ "errors"
"fmt"
+ "net/url"
"encoding/json"
+
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest/schema2"
)
+var (
+ errUnexpectedURL = errors.New("unexpected URL on layer")
+ errMissingURL = errors.New("missing URL on layer")
+ errInvalidURL = errors.New("invalid URL on layer")
+)
+
//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests.
type schema2ManifestHandler struct {
repository *repository
@@ -80,7 +89,27 @@ func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst sche
}
for _, fsLayer := range mnfst.References() {
- _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest)
+ var err error
+ if fsLayer.MediaType != schema2.MediaTypeForeignLayer {
+ if len(fsLayer.URLs) == 0 {
+ _, err = ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest)
+ } else {
+ err = errUnexpectedURL
+ }
+ } else {
+ // Clients download this layer from an external URL, so do not check for
+ // its presense.
+ if len(fsLayer.URLs) == 0 {
+ err = errMissingURL
+ }
+ for _, u := range fsLayer.URLs {
+ var pu *url.URL
+ pu, err = url.Parse(u)
+ if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" {
+ err = errInvalidURL
+ }
+ }
+ }
if err != nil {
if err != distribution.ErrBlobUnknown {
errs = append(errs, err)
diff --git a/registry/storage/schema2manifesthandler_test.go b/registry/storage/schema2manifesthandler_test.go
new file mode 100644
index 00000000..c2f61edf
--- /dev/null
+++ b/registry/storage/schema2manifesthandler_test.go
@@ -0,0 +1,117 @@
+package storage
+
+import (
+ "testing"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/manifest"
+ "github.com/docker/distribution/manifest/schema2"
+ "github.com/docker/distribution/registry/storage/driver/inmemory"
+)
+
+func TestVerifyManifestForeignLayer(t *testing.T) {
+ ctx := context.Background()
+ inmemoryDriver := inmemory.New()
+ registry := createRegistry(t, inmemoryDriver)
+ repo := makeRepository(t, registry, "test")
+ manifestService := makeManifestService(t, repo)
+
+ config, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeConfig, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ layer, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeLayer, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ foreignLayer := distribution.Descriptor{
+ Digest: "sha256:463435349086340864309863409683460843608348608934092322395278926a",
+ Size: 6323,
+ MediaType: schema2.MediaTypeForeignLayer,
+ }
+
+ template := schema2.Manifest{
+ Versioned: manifest.Versioned{
+ SchemaVersion: 2,
+ MediaType: schema2.MediaTypeManifest,
+ },
+ Config: config,
+ }
+
+ type testcase struct {
+ BaseLayer distribution.Descriptor
+ URLs []string
+ Err error
+ }
+
+ cases := []testcase{
+ {
+ foreignLayer,
+ nil,
+ errMissingURL,
+ },
+ {
+ layer,
+ []string{"http://foo/bar"},
+ errUnexpectedURL,
+ },
+ {
+ foreignLayer,
+ []string{"file:///local/file"},
+ errInvalidURL,
+ },
+ {
+ foreignLayer,
+ []string{"http://foo/bar#baz"},
+ errInvalidURL,
+ },
+ {
+ foreignLayer,
+ []string{""},
+ errInvalidURL,
+ },
+ {
+ foreignLayer,
+ []string{"https://foo/bar", ""},
+ errInvalidURL,
+ },
+ {
+ foreignLayer,
+ []string{"http://foo/bar"},
+ nil,
+ },
+ {
+ foreignLayer,
+ []string{"https://foo/bar"},
+ nil,
+ },
+ }
+
+ for _, c := range cases {
+ m := template
+ l := c.BaseLayer
+ l.URLs = c.URLs
+ m.Layers = []distribution.Descriptor{l}
+ dm, err := schema2.FromStruct(m)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ _, err = manifestService.Put(ctx, dm)
+ if verr, ok := err.(distribution.ErrManifestVerification); ok {
+ // Extract the first error
+ if len(verr) == 2 {
+ if _, ok = verr[1].(distribution.ErrManifestBlobUnknown); ok {
+ err = verr[0]
+ }
+ }
+ }
+ if err != c.Err {
+ t.Errorf("%#v: expected %v, got %v", l, c.Err, err)
+ }
+ }
+}
From 79d6008a54ccaac7f0e75c223c9ee592c412e7fd Mon Sep 17 00:00:00 2001
From: Ke Xu
Date: Tue, 24 May 2016 16:33:30 +0900
Subject: [PATCH 052/546] fix broken markdown
Signed-off-by: Ke Xu
---
docs/spec/auth/oauth.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/docs/spec/auth/oauth.md b/docs/spec/auth/oauth.md
index b0f9a13f..1b4c0d9c 100644
--- a/docs/spec/auth/oauth.md
+++ b/docs/spec/auth/oauth.md
@@ -171,11 +171,11 @@ HTTP/1.1 200 OK
Content-Type: application/json
{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5","expires_in":"900","scope":""}
-````
+```
#### Example refreshing an Access Token
-````
+```
POST /token HTTP/1.1
Host: auth.docker.io
Content-Type: application/x-www-form-urlencoded
@@ -186,5 +186,5 @@ HTTP/1.1 200 OK
Content-Type: application/json
{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5":"expires_in":"900","scope":"repository:samalba/my-app:pull,repository:samalba/my-app:push"}
-````
+```
From 588692f6c4037616518a5d61be19600627e2a610 Mon Sep 17 00:00:00 2001
From: Richard Scothern
Date: Tue, 24 May 2016 10:42:29 -0700
Subject: [PATCH 053/546] Correct yaml key names in configuration
Signed-off-by: Richard Scothern
---
configuration/configuration.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/configuration/configuration.go b/configuration/configuration.go
index 90fd422c..eee83f8a 100644
--- a/configuration/configuration.go
+++ b/configuration/configuration.go
@@ -221,7 +221,7 @@ type FileChecker struct {
// HTTPChecker is a type of entry in the health section for checking HTTP URIs.
type HTTPChecker struct {
// Timeout is the duration to wait before timing out the HTTP request
- Timeout time.Duration `yaml:"interval,omitempty"`
+ Timeout time.Duration `yaml:"timeout,omitempty"`
// StatusCode is the expected status code
StatusCode int
// Interval is the duration in between checks
@@ -238,7 +238,7 @@ type HTTPChecker struct {
// TCPChecker is a type of entry in the health section for checking TCP servers.
type TCPChecker struct {
// Timeout is the duration to wait before timing out the TCP connection
- Timeout time.Duration `yaml:"interval,omitempty"`
+ Timeout time.Duration `yaml:"timeout,omitempty"`
// Interval is the duration in between checks
Interval time.Duration `yaml:"interval,omitempty"`
// Addr is the TCP address to check
From 166c4a957fc1f07f14992246cbcb627e976bc74f Mon Sep 17 00:00:00 2001
From: Tony Holdstock-Brown
Date: Tue, 24 May 2016 11:07:55 -0700
Subject: [PATCH 054/546] Pass in `app` as context to apply{N}Middleware
This lets us access registry config within middleware for additional
configuration of whatever it is that you're overriding.
Signed-off-by: Tony Holdstock-Brown
---
registry/handlers/app.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/registry/handlers/app.go b/registry/handlers/app.go
index 3c3e50d0..bf6727af 100644
--- a/registry/handlers/app.go
+++ b/registry/handlers/app.go
@@ -258,7 +258,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App {
}
}
- app.registry, err = applyRegistryMiddleware(app.Context, app.registry, config.Middleware["registry"])
+ app.registry, err = applyRegistryMiddleware(app, app.registry, config.Middleware["registry"])
if err != nil {
panic(err)
}
@@ -647,7 +647,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler {
repository,
app.eventBridge(context, r))
- context.Repository, err = applyRepoMiddleware(context.Context, context.Repository, app.Config.Middleware["repository"])
+ context.Repository, err = applyRepoMiddleware(app, context.Repository, app.Config.Middleware["repository"])
if err != nil {
ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err)
context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
From 3207e0c1123a808d936a5de4a3df36a8e62fefc7 Mon Sep 17 00:00:00 2001
From: Derek McGowan
Date: Wed, 25 May 2016 11:43:36 -0700
Subject: [PATCH 055/546] Update auth documentation examples to show "expires
in" as int
Go will fail to parse the examples since an int is expected rather than a string for the "expires in" value
Signed-off-by: Derek McGowan (github: dmcgowan)
---
docs/spec/auth/oauth.md | 4 ++--
docs/spec/auth/token.md | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/docs/spec/auth/oauth.md b/docs/spec/auth/oauth.md
index 1b4c0d9c..1311df5d 100644
--- a/docs/spec/auth/oauth.md
+++ b/docs/spec/auth/oauth.md
@@ -170,7 +170,7 @@ grant_type=password&username=johndoe&password=A3ddj3w&service=hub.docker.io&clie
HTTP/1.1 200 OK
Content-Type: application/json
-{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5","expires_in":"900","scope":""}
+{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5","expires_in":900,"scope":""}
```
#### Example refreshing an Access Token
@@ -185,6 +185,6 @@ grant_type=refresh_token&refresh_token=kas9Da81Dfa8&service=registry-1.docker.io
HTTP/1.1 200 OK
Content-Type: application/json
-{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5":"expires_in":"900","scope":"repository:samalba/my-app:pull,repository:samalba/my-app:push"}
+{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5":"expires_in":900,"scope":"repository:samalba/my-app:pull,repository:samalba/my-app:push"}
```
diff --git a/docs/spec/auth/token.md b/docs/spec/auth/token.md
index 9f3167be..65482699 100644
--- a/docs/spec/auth/token.md
+++ b/docs/spec/auth/token.md
@@ -238,7 +238,7 @@ authenticate to the audience service (within the indicated window of time):
HTTP/1.1 200 OK
Content-Type: application/json
-{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w", "expires_in": "3600","issued_at": "2009-11-10T23:00:00Z"}
+{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w", "expires_in": 3600,"issued_at": "2009-11-10T23:00:00Z"}
```
From 0c15ab69528059d21395c166b3ec3c72ec297c69 Mon Sep 17 00:00:00 2001
From: Richard Scothern
Date: Wed, 6 Apr 2016 17:01:30 -0700
Subject: [PATCH 056/546] Remove signature store from registry. Return a
generated signature for manifest pull.
Signed-off-by: Richard Scothern
---
configuration/configuration.go | 5 -
docs/configuration.md | 19 ----
manifests.go | 6 -
notifications/listener_test.go | 7 +-
registry/handlers/api_test.go | 4 +-
registry/handlers/app.go | 10 +-
registry/proxy/proxymanifeststore_test.go | 17 ++-
registry/root.go | 2 +-
registry/storage/blobstore.go | 1 -
registry/storage/garbagecollect.go | 17 ---
registry/storage/garbagecollect_test.go | 24 ++--
registry/storage/manifeststore.go | 47 --------
registry/storage/manifeststore_test.go | 43 +------
registry/storage/paths.go | 54 +--------
registry/storage/paths_test.go | 17 +--
registry/storage/registry.go | 22 +---
registry/storage/signaturestore.go | 131 ----------------------
registry/storage/signedmanifesthandler.go | 22 ----
18 files changed, 39 insertions(+), 409 deletions(-)
delete mode 100644 registry/storage/signaturestore.go
diff --git a/configuration/configuration.go b/configuration/configuration.go
index eee83f8a..59c90fde 100644
--- a/configuration/configuration.go
+++ b/configuration/configuration.go
@@ -157,11 +157,6 @@ type Configuration struct {
// TrustKey is the signing key to use for adding the signature to
// schema1 manifests.
TrustKey string `yaml:"signingkeyfile,omitempty"`
-
- // DisableSignatureStore will cause all signatures attached to schema1 manifests
- // to be ignored. Signatures will be generated on all schema1 manifest requests
- // rather than only requests which converted schema2 to schema1.
- DisableSignatureStore bool `yaml:"disablesignaturestore,omitempty"`
} `yaml:"schema1,omitempty"`
} `yaml:"compatibility,omitempty"`
}
diff --git a/docs/configuration.md b/docs/configuration.md
index 6a1afb79..c215647d 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -243,7 +243,6 @@ information about each option that appears later in this page.
compatibility:
schema1:
signingkeyfile: /etc/registry/key.json
- disablesignaturestore: true
In some instances a configuration option is **optional** but it contains child
options marked as **required**. This indicates that you can omit the parent with
@@ -1730,7 +1729,6 @@ To enable pulling private repositories (e.g. `batman/robin`) a username and pass
compatibility:
schema1:
signingkeyfile: /etc/registry/key.json
- disablesignaturestore: true
Configure handling of older and deprecated features. Each subsection
defines a such a feature with configurable behavior.
@@ -1756,23 +1754,6 @@ defines a such a feature with configurable behavior.
startup.
-
-
- disablesignaturestore
- |
-
- no
- |
-
- Disables storage of signatures attached to schema1 manifests. By default
- signatures are detached from schema1 manifests, stored, and reattached
- when the manifest is requested. When this is true, the storage is disabled
- and a new signature is always generated for schema1 manifests using the
- schema1 signing key. Disabling signature storage will cause all newly
- uploaded signatures to be discarded. Existing stored signatures will not
- be removed but they will not be re-attached to the corresponding manifest.
- |
-
## Example: Development configuration
diff --git a/manifests.go b/manifests.go
index 3bf912a6..2ac7c8f2 100644
--- a/manifests.go
+++ b/manifests.go
@@ -61,12 +61,6 @@ type ManifestEnumerator interface {
Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
}
-// SignaturesGetter provides an interface for getting the signatures of a schema1 manifest. If the digest
-// referred to is not a schema1 manifest, an error should be returned.
-type SignaturesGetter interface {
- GetSignatures(ctx context.Context, manifestDigest digest.Digest) ([]digest.Digest, error)
-}
-
// Describable is an interface for descriptors
type Describable interface {
Descriptor() Descriptor
diff --git a/notifications/listener_test.go b/notifications/listener_test.go
index d16a4560..c7db5944 100644
--- a/notifications/listener_test.go
+++ b/notifications/listener_test.go
@@ -20,7 +20,12 @@ import (
func TestListener(t *testing.T) {
ctx := context.Background()
- registry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect)
+ k, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ registry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect, storage.Schema1SigningKey(k))
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
diff --git a/registry/handlers/api_test.go b/registry/handlers/api_test.go
index 523ecca2..01fd4f4c 100644
--- a/registry/handlers/api_test.go
+++ b/registry/handlers/api_test.go
@@ -1067,13 +1067,13 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name
t.Fatalf("error decoding fetched manifest: %v", err)
}
- // check two signatures were roundtripped
+ // check only 1 signature is returned
signatures, err = fetchedManifestByDigest.Signatures()
if err != nil {
t.Fatal(err)
}
- if len(signatures) != 2 {
+ if len(signatures) != 1 {
t.Fatalf("expected 2 signature from manifest, got: %d", len(signatures))
}
diff --git a/registry/handlers/app.go b/registry/handlers/app.go
index 4bda082b..384a61d6 100644
--- a/registry/handlers/app.go
+++ b/registry/handlers/app.go
@@ -155,6 +155,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App {
app.configureRedis(config)
app.configureLogHook(config)
+ options := registrymiddleware.GetRegistryOptions()
if config.Compatibility.Schema1.TrustKey != "" {
app.trustKey, err = libtrust.LoadKeyFile(config.Compatibility.Schema1.TrustKey)
if err != nil {
@@ -169,6 +170,8 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App {
}
}
+ options = append(options, storage.Schema1SigningKey(app.trustKey))
+
if config.HTTP.Host != "" {
u, err := url.Parse(config.HTTP.Host)
if err != nil {
@@ -177,17 +180,10 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App {
app.httpHost = *u
}
- options := registrymiddleware.GetRegistryOptions()
-
if app.isCache {
options = append(options, storage.DisableDigestResumption)
}
- if config.Compatibility.Schema1.DisableSignatureStore {
- options = append(options, storage.DisableSchema1Signatures)
- options = append(options, storage.Schema1SigningKey(app.trustKey))
- }
-
// configure deletion
if d, ok := config.Storage["delete"]; ok {
e, ok := d["enabled"]
diff --git a/registry/proxy/proxymanifeststore_test.go b/registry/proxy/proxymanifeststore_test.go
index 1069d66c..0d6b7171 100644
--- a/registry/proxy/proxymanifeststore_test.go
+++ b/registry/proxy/proxymanifeststore_test.go
@@ -60,12 +60,6 @@ func (sm statsManifest) Put(ctx context.Context, manifest distribution.Manifest,
return sm.manifests.Put(ctx, manifest)
}
-/*func (sm statsManifest) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
- sm.stats["enumerate"]++
- return sm.manifests.Enumerate(ctx, manifests, last)
-}
-*/
-
type mockChallenger struct {
sync.Mutex
count int
@@ -75,7 +69,6 @@ type mockChallenger struct {
func (m *mockChallenger) tryEstablishChallenges(context.Context) error {
m.Lock()
defer m.Unlock()
-
m.count++
return nil
}
@@ -93,9 +86,15 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE
if err != nil {
t.Fatalf("unable to parse reference: %s", err)
}
+ k, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
ctx := context.Background()
- truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()))
+ truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(),
+ storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()),
+ storage.Schema1SigningKey(k))
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
@@ -117,7 +116,7 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE
t.Fatalf(err.Error())
}
- localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption)
+ localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption, storage.Schema1SigningKey(k))
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
diff --git a/registry/root.go b/registry/root.go
index 7a7d44cb..5d3005c2 100644
--- a/registry/root.go
+++ b/registry/root.go
@@ -69,7 +69,7 @@ var GCCmd = &cobra.Command{
os.Exit(1)
}
- registry, err := storage.NewRegistry(ctx, driver, storage.DisableSchema1Signatures, storage.Schema1SigningKey(k))
+ registry, err := storage.NewRegistry(ctx, driver, storage.Schema1SigningKey(k))
if err != nil {
fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err)
os.Exit(1)
diff --git a/registry/storage/blobstore.go b/registry/storage/blobstore.go
index 9034cb68..84f6660f 100644
--- a/registry/storage/blobstore.go
+++ b/registry/storage/blobstore.go
@@ -75,7 +75,6 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr
}
// TODO(stevvooe): Write out mediatype here, as well.
-
return distribution.Descriptor{
Size: int64(len(p)),
diff --git a/registry/storage/garbagecollect.go b/registry/storage/garbagecollect.go
index be64b847..bc340416 100644
--- a/registry/storage/garbagecollect.go
+++ b/registry/storage/garbagecollect.go
@@ -6,7 +6,6 @@ import (
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
- "github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/driver"
@@ -71,22 +70,6 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis
}
switch manifest.(type) {
- case *schema1.SignedManifest:
- signaturesGetter, ok := manifestService.(distribution.SignaturesGetter)
- if !ok {
- return fmt.Errorf("unable to convert ManifestService into SignaturesGetter")
- }
- signatures, err := signaturesGetter.GetSignatures(ctx, dgst)
- if err != nil {
- return fmt.Errorf("failed to get signatures for signed manifest: %v", err)
- }
- for _, signatureDigest := range signatures {
- if dryRun {
- emit("%s: marking signature %s", repoName, signatureDigest)
- }
- markSet[signatureDigest] = struct{}{}
- }
- break
case *schema2.DeserializedManifest:
config := manifest.(*schema2.DeserializedManifest).Config
if dryRun {
diff --git a/registry/storage/garbagecollect_test.go b/registry/storage/garbagecollect_test.go
index a0ba154b..86fc175a 100644
--- a/registry/storage/garbagecollect_test.go
+++ b/registry/storage/garbagecollect_test.go
@@ -12,6 +12,7 @@ import (
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/inmemory"
"github.com/docker/distribution/testutil"
+ "github.com/docker/libtrust"
)
type image struct {
@@ -22,7 +23,11 @@ type image struct {
func createRegistry(t *testing.T, driver driver.StorageDriver) distribution.Namespace {
ctx := context.Background()
- registry, err := NewRegistry(ctx, driver, EnableDelete)
+ k, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+ registry, err := NewRegistry(ctx, driver, EnableDelete, Schema1SigningKey(k))
if err != nil {
t.Fatalf("Failed to construct namespace")
}
@@ -139,13 +144,13 @@ func TestNoDeletionNoEffect(t *testing.T) {
ctx := context.Background()
inmemoryDriver := inmemory.New()
- registry := createRegistry(t, inmemoryDriver)
+ registry := createRegistry(t, inmemory.New())
repo := makeRepository(t, registry, "palailogos")
manifestService, err := repo.Manifests(ctx)
image1 := uploadRandomSchema1Image(t, repo)
image2 := uploadRandomSchema1Image(t, repo)
- image3 := uploadRandomSchema2Image(t, repo)
+ uploadRandomSchema2Image(t, repo)
// construct manifestlist for fun.
blobstatter := registry.BlobStatter()
@@ -160,20 +165,17 @@ func TestNoDeletionNoEffect(t *testing.T) {
t.Fatalf("Failed to add manifest list: %v", err)
}
+ before := allBlobs(t, registry)
+
// Run GC
err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false)
if err != nil {
t.Fatalf("Failed mark and sweep: %v", err)
}
- blobs := allBlobs(t, registry)
-
- // the +1 at the end is for the manifestList
- // the first +3 at the end for each manifest's blob
- // the second +3 at the end for each manifest's signature/config layer
- totalBlobCount := len(image1.layers) + len(image2.layers) + len(image3.layers) + 1 + 3 + 3
- if len(blobs) != totalBlobCount {
- t.Fatalf("Garbage collection affected storage")
+ after := allBlobs(t, registry)
+ if len(before) != len(after) {
+ t.Fatalf("Garbage collection affected storage: %d != %d", len(before), len(after))
}
}
diff --git a/registry/storage/manifeststore.go b/registry/storage/manifeststore.go
index 5a9165f9..68483c95 100644
--- a/registry/storage/manifeststore.go
+++ b/registry/storage/manifeststore.go
@@ -2,7 +2,6 @@ package storage
import (
"fmt"
- "path"
"encoding/json"
"github.com/docker/distribution"
@@ -12,7 +11,6 @@ import (
"github.com/docker/distribution/manifest/manifestlist"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
- "github.com/docker/distribution/registry/storage/driver"
)
// A ManifestHandler gets and puts manifests of a particular type.
@@ -141,48 +139,3 @@ func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Dig
})
return err
}
-
-// Only valid for schema1 signed manifests
-func (ms *manifestStore) GetSignatures(ctx context.Context, manifestDigest digest.Digest) ([]digest.Digest, error) {
- // sanity check that digest refers to a schema1 digest
- manifest, err := ms.Get(ctx, manifestDigest)
- if err != nil {
- return nil, err
- }
-
- if _, ok := manifest.(*schema1.SignedManifest); !ok {
- return nil, fmt.Errorf("digest %v is not for schema1 manifest", manifestDigest)
- }
-
- signaturesPath, err := pathFor(manifestSignaturesPathSpec{
- name: ms.repository.Named().Name(),
- revision: manifestDigest,
- })
- if err != nil {
- return nil, err
- }
-
- var digests []digest.Digest
- alg := string(digest.SHA256)
- signaturePaths, err := ms.blobStore.driver.List(ctx, path.Join(signaturesPath, alg))
-
- switch err.(type) {
- case nil:
- break
- case driver.PathNotFoundError:
- // Manifest may have been pushed with signature store disabled
- return digests, nil
- default:
- return nil, err
- }
-
- for _, sigPath := range signaturePaths {
- sigdigest, err := digest.ParseDigest(alg + ":" + path.Base(sigPath))
- if err != nil {
- // merely found not a digest
- continue
- }
- digests = append(digests, sigdigest)
- }
- return digests, nil
-}
diff --git a/registry/storage/manifeststore_test.go b/registry/storage/manifeststore_test.go
index fcb5adf9..cbd30c04 100644
--- a/registry/storage/manifeststore_test.go
+++ b/registry/storage/manifeststore_test.go
@@ -52,15 +52,11 @@ func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string, opt
}
func TestManifestStorage(t *testing.T) {
- testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
-}
-
-func TestManifestStorageDisabledSignatures(t *testing.T) {
k, err := libtrust.GenerateECP256PrivateKey()
if err != nil {
t.Fatal(err)
}
- testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, DisableSchema1Signatures, Schema1SigningKey(k))
+ testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, Schema1SigningKey(k))
}
func testManifestStorage(t *testing.T, options ...RegistryOption) {
@@ -71,7 +67,6 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) {
if err != nil {
t.Fatal(err)
}
- equalSignatures := env.registry.(*registry).schema1SignaturesEnabled
m := schema1.Manifest{
Versioned: manifest.Versioned{
@@ -175,12 +170,6 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) {
t.Fatalf("fetched payload does not match original payload: %q != %q", fetchedManifest.Canonical, sm.Canonical)
}
- if equalSignatures {
- if !reflect.DeepEqual(fetchedManifest, sm) {
- t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest.Manifest, sm.Manifest)
- }
- }
-
_, pl, err := fetchedManifest.Payload()
if err != nil {
t.Fatalf("error getting payload %#v", err)
@@ -223,12 +212,6 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) {
t.Fatalf("fetched manifest not equal: %q != %q", byDigestManifest.Canonical, fetchedManifest.Canonical)
}
- if equalSignatures {
- if !reflect.DeepEqual(fetchedByDigest, fetchedManifest) {
- t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedByDigest, fetchedManifest)
- }
- }
-
sigs, err := fetchedJWS.Signatures()
if err != nil {
t.Fatalf("unable to extract signatures: %v", err)
@@ -285,17 +268,6 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) {
t.Fatalf("unexpected error verifying manifest: %v", err)
}
- // Assemble our payload and two signatures to get what we expect!
- expectedJWS, err := libtrust.NewJSONSignature(payload, sigs[0], sigs2[0])
- if err != nil {
- t.Fatalf("unexpected error merging jws: %v", err)
- }
-
- expectedSigs, err := expectedJWS.Signatures()
- if err != nil {
- t.Fatalf("unexpected error getting expected signatures: %v", err)
- }
-
_, pl, err = fetched.Payload()
if err != nil {
t.Fatalf("error getting payload %#v", err)
@@ -315,19 +287,6 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) {
t.Fatalf("payloads are not equal")
}
- if equalSignatures {
- receivedSigs, err := receivedJWS.Signatures()
- if err != nil {
- t.Fatalf("error getting signatures: %v", err)
- }
-
- for i, sig := range receivedSigs {
- if !bytes.Equal(sig, expectedSigs[i]) {
- t.Fatalf("mismatched signatures from remote: %v != %v", string(sig), string(expectedSigs[i]))
- }
- }
- }
-
// Test deleting manifests
err = ms.Delete(ctx, dgst)
if err != nil {
diff --git a/registry/storage/paths.go b/registry/storage/paths.go
index 8985f043..1b142b88 100644
--- a/registry/storage/paths.go
+++ b/registry/storage/paths.go
@@ -30,8 +30,6 @@ const (
// revisions
// ->
// -> link
-// -> signatures
-// //link
// tags/
// -> current/link
// -> index
@@ -62,8 +60,7 @@ const (
//
// The third component of the repository directory is the manifests store,
// which is made up of a revision store and tag store. Manifests are stored in
-// the blob store and linked into the revision store. Signatures are separated
-// from the manifest payload data and linked into the blob store, as well.
+// the blob store and linked into the revision store.
// While the registry can save all revisions of a manifest, no relationship is
// implied as to the ordering of changes to a manifest. The tag store provides
// support for name, tag lookups of manifests, using "current/link" under a
@@ -77,8 +74,6 @@ const (
// manifestRevisionsPathSpec: /v2/repositories//_manifests/revisions/
// manifestRevisionPathSpec: /v2/repositories//_manifests/revisions///
// manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link
-// manifestSignaturesPathSpec: /v2/repositories//_manifests/revisions///signatures/
-// manifestSignatureLinkPathSpec: /v2/repositories//_manifests/revisions///signatures///link
//
// Tags:
//
@@ -148,33 +143,6 @@ func pathFor(spec pathSpec) (string, error) {
}
return path.Join(root, "link"), nil
- case manifestSignaturesPathSpec:
- root, err := pathFor(manifestRevisionPathSpec{
- name: v.name,
- revision: v.revision,
- })
-
- if err != nil {
- return "", err
- }
-
- return path.Join(root, "signatures"), nil
- case manifestSignatureLinkPathSpec:
- root, err := pathFor(manifestSignaturesPathSpec{
- name: v.name,
- revision: v.revision,
- })
-
- if err != nil {
- return "", err
- }
-
- signatureComponents, err := digestPathComponents(v.signature, false)
- if err != nil {
- return "", err
- }
-
- return path.Join(root, path.Join(append(signatureComponents, "link")...)), nil
case manifestTagsPathSpec:
return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil
case manifestTagPathSpec:
@@ -325,26 +293,6 @@ type manifestRevisionLinkPathSpec struct {
func (manifestRevisionLinkPathSpec) pathSpec() {}
-// manifestSignaturesPathSpec describes the path components for the directory
-// containing all the signatures for the target blob. Entries are named with
-// the underlying key id.
-type manifestSignaturesPathSpec struct {
- name string
- revision digest.Digest
-}
-
-func (manifestSignaturesPathSpec) pathSpec() {}
-
-// manifestSignatureLinkPathSpec describes the path components used to look up
-// a signature file by the hash of its blob.
-type manifestSignatureLinkPathSpec struct {
- name string
- revision digest.Digest
- signature digest.Digest
-}
-
-func (manifestSignatureLinkPathSpec) pathSpec() {}
-
// manifestTagsPathSpec describes the path elements required to point to the
// manifest tags directory.
type manifestTagsPathSpec struct {
diff --git a/registry/storage/paths_test.go b/registry/storage/paths_test.go
index 91004bd4..f739552a 100644
--- a/registry/storage/paths_test.go
+++ b/registry/storage/paths_test.go
@@ -26,21 +26,6 @@ func TestPathMapper(t *testing.T) {
},
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link",
},
- {
- spec: manifestSignatureLinkPathSpec{
- name: "foo/bar",
- revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
- signature: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
- },
- expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link",
- },
- {
- spec: manifestSignaturesPathSpec{
- name: "foo/bar",
- revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
- },
- expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures",
- },
{
spec: manifestTagsPathSpec{
name: "foo/bar",
@@ -113,7 +98,7 @@ func TestPathMapper(t *testing.T) {
// Add a few test cases to ensure we cover some errors
// Specify a path that requires a revision and get a digest validation error.
- badpath, err := pathFor(manifestSignaturesPathSpec{
+ badpath, err := pathFor(manifestRevisionPathSpec{
name: "foo/bar",
})
diff --git a/registry/storage/registry.go b/registry/storage/registry.go
index 3fe4ac68..94034b26 100644
--- a/registry/storage/registry.go
+++ b/registry/storage/registry.go
@@ -18,7 +18,6 @@ type registry struct {
blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider
deleteEnabled bool
resumableDigestEnabled bool
- schema1SignaturesEnabled bool
schema1SigningKey libtrust.PrivateKey
blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory
}
@@ -47,17 +46,8 @@ func DisableDigestResumption(registry *registry) error {
return nil
}
-// DisableSchema1Signatures is a functional option for NewRegistry. It disables
-// signature storage and ensures all schema1 manifests will only be returned
-// with a signature from a provided signing key.
-func DisableSchema1Signatures(registry *registry) error {
- registry.schema1SignaturesEnabled = false
- return nil
-}
-
// Schema1SigningKey returns a functional option for NewRegistry. It sets the
-// signing key for adding a signature to all schema1 manifests. This should be
-// used in conjunction with disabling signature store.
+// key for signing all schema1 manifests.
func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption {
return func(registry *registry) error {
registry.schema1SigningKey = key
@@ -116,9 +106,8 @@ func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, option
statter: statter,
pathFn: bs.path,
},
- statter: statter,
- resumableDigestEnabled: true,
- schema1SignaturesEnabled: true,
+ statter: statter,
+ resumableDigestEnabled: true,
}
for _, option := range options {
@@ -231,11 +220,6 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M
ctx: ctx,
repository: repo,
blobStore: blobStore,
- signatures: &signatureStore{
- ctx: ctx,
- repository: repo,
- blobStore: repo.blobStore,
- },
},
schema2Handler: &schema2ManifestHandler{
ctx: ctx,
diff --git a/registry/storage/signaturestore.go b/registry/storage/signaturestore.go
deleted file mode 100644
index 2940e041..00000000
--- a/registry/storage/signaturestore.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package storage
-
-import (
- "path"
- "sync"
-
- "github.com/docker/distribution/context"
- "github.com/docker/distribution/digest"
-)
-
-type signatureStore struct {
- repository *repository
- blobStore *blobStore
- ctx context.Context
-}
-
-func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) {
- signaturesPath, err := pathFor(manifestSignaturesPathSpec{
- name: s.repository.Named().Name(),
- revision: dgst,
- })
-
- if err != nil {
- return nil, err
- }
-
- // Need to append signature digest algorithm to path to get all items.
- // Perhaps, this should be in the pathMapper but it feels awkward. This
- // can be eliminated by implementing listAll on drivers.
- signaturesPath = path.Join(signaturesPath, "sha256")
-
- signaturePaths, err := s.blobStore.driver.List(s.ctx, signaturesPath)
- if err != nil {
- return nil, err
- }
-
- var wg sync.WaitGroup
- type result struct {
- index int
- signature []byte
- err error
- }
- ch := make(chan result)
-
- bs := s.linkedBlobStore(s.ctx, dgst)
- for i, sigPath := range signaturePaths {
- sigdgst, err := digest.ParseDigest("sha256:" + path.Base(sigPath))
- if err != nil {
- context.GetLogger(s.ctx).Errorf("could not get digest from path: %q, skipping", sigPath)
- continue
- }
-
- wg.Add(1)
- go func(idx int, sigdgst digest.Digest) {
- defer wg.Done()
- context.GetLogger(s.ctx).
- Debugf("fetching signature %q", sigdgst)
-
- r := result{index: idx}
-
- if p, err := bs.Get(s.ctx, sigdgst); err != nil {
- context.GetLogger(s.ctx).
- Errorf("error fetching signature %q: %v", sigdgst, err)
- r.err = err
- } else {
- r.signature = p
- }
-
- ch <- r
- }(i, sigdgst)
- }
- done := make(chan struct{})
- go func() {
- wg.Wait()
- close(done)
- }()
-
- // aggregrate the results
- signatures := make([][]byte, len(signaturePaths))
-loop:
- for {
- select {
- case result := <-ch:
- signatures[result.index] = result.signature
- if result.err != nil && err == nil {
- // only set the first one.
- err = result.err
- }
- case <-done:
- break loop
- }
- }
-
- return signatures, err
-}
-
-func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error {
- bs := s.linkedBlobStore(s.ctx, dgst)
- for _, signature := range signatures {
- if _, err := bs.Put(s.ctx, "application/json", signature); err != nil {
- return err
- }
- }
- return nil
-}
-
-// linkedBlobStore returns the namedBlobStore of the signatures for the
-// manifest with the given digest. Effectively, each signature link path
-// layout is a unique linked blob store.
-func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore {
- linkpath := func(name string, dgst digest.Digest) (string, error) {
- return pathFor(manifestSignatureLinkPathSpec{
- name: name,
- revision: revision,
- signature: dgst,
- })
-
- }
-
- return &linkedBlobStore{
- ctx: ctx,
- repository: s.repository,
- blobStore: s.blobStore,
- blobAccessController: &linkedBlobStatter{
- blobStore: s.blobStore,
- repository: s.repository,
- linkPathFns: []linkPathFunc{linkpath},
- },
- linkPathFns: []linkPathFunc{linkpath},
- }
-}
diff --git a/registry/storage/signedmanifesthandler.go b/registry/storage/signedmanifesthandler.go
index 8e13dd93..df6369f3 100644
--- a/registry/storage/signedmanifesthandler.go
+++ b/registry/storage/signedmanifesthandler.go
@@ -18,7 +18,6 @@ type signedManifestHandler struct {
repository *repository
blobStore *linkedBlobStore
ctx context.Context
- signatures *signatureStore
}
var _ ManifestHandler = &signedManifestHandler{}
@@ -30,13 +29,6 @@ func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Dige
signatures [][]byte
err error
)
- if ms.repository.schema1SignaturesEnabled {
- // Fetch the signatures for the manifest
- signatures, err = ms.signatures.Get(dgst)
- if err != nil {
- return nil, err
- }
- }
jsig, err := libtrust.NewJSONSignature(content, signatures...)
if err != nil {
@@ -47,8 +39,6 @@ func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Dige
if err := jsig.Sign(ms.repository.schema1SigningKey); err != nil {
return nil, err
}
- } else if !ms.repository.schema1SignaturesEnabled {
- return nil, fmt.Errorf("missing signing key with signature store disabled")
}
// Extract the pretty JWS
@@ -90,18 +80,6 @@ func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.
return "", err
}
- if ms.repository.schema1SignaturesEnabled {
- // Grab each json signature and store them.
- signatures, err := sm.Signatures()
- if err != nil {
- return "", err
- }
-
- if err := ms.signatures.Put(revision.Digest, signatures...); err != nil {
- return "", err
- }
- }
-
return revision.Digest, nil
}
From febcee65645cf56a1f359485f738ac0a573b9a81 Mon Sep 17 00:00:00 2001
From: Richard Scothern
Date: Fri, 27 May 2016 11:30:42 -0700
Subject: [PATCH 057/546] Add a deprecation document detailing signature store
removal
Signed-off-by: Richard Scothern
---
cmd/registry/config-dev.yml | 2 +-
docs/deprecated.md | 24 ++++++++++++++++++++++++
2 files changed, 25 insertions(+), 1 deletion(-)
create mode 100644 docs/deprecated.md
diff --git a/cmd/registry/config-dev.yml b/cmd/registry/config-dev.yml
index b6438be5..42c9dc16 100644
--- a/cmd/registry/config-dev.yml
+++ b/cmd/registry/config-dev.yml
@@ -24,7 +24,7 @@ storage:
cache:
blobdescriptor: redis
filesystem:
- rootdirectory: /var/lib/registry
+ rootdirectory: /var/lib/registry-hello-world-clean
maintenance:
uploadpurging:
enabled: false
diff --git a/docs/deprecated.md b/docs/deprecated.md
new file mode 100644
index 00000000..796d3fa5
--- /dev/null
+++ b/docs/deprecated.md
@@ -0,0 +1,24 @@
+
+
+# Docker Registry Deprecation
+
+This document details functionality or components which are deprecated within
+the registry.
+
+### v2.5.0
+
+The signature store has been removed from the registry. Since `v2.4.0` it has
+been possible to configure the registry to generate manifest signatures rather
+than load them from storage. In this version of the registry this becomes
+the default behavior. Signatures which are attached to manifests on put are
+not stored in the registry. This does not alter the functional behavior of
+the registry.
+
+Old signatures blobs can be removed from the registry storage by running the
+garbage-collect subcommand.
From 3069a04b8ceb0b1d5dcb475cad797052bbfc8c12 Mon Sep 17 00:00:00 2001
From: Sven Dowideit
Date: Thu, 19 May 2016 12:52:58 +0000
Subject: [PATCH 058/546] Make the index.html files the visible overview menu
entry
And move menu entry definition into a page that the user has no reason to navigate to
Signed-off-by: Sven Dowideit
---
docs/Dockerfile | 4 +-
docs/Makefile | 29 ++-------
docs/configuration.md | 2 +-
docs/deploying.md | 12 ++--
docs/help.md | 2 +-
docs/index.md | 6 +-
docs/introduction.md | 4 +-
docs/{overview.md => menu.md} | 8 ++-
docs/{ => recipes}/apache.md | 6 +-
docs/{ => recipes}/building.md | 2 +
docs/{recipes.md => recipes/index.md} | 8 +--
docs/recipes/menu.md | 22 +++++++
docs/{ => recipes}/mirror.md | 2 +
docs/{ => recipes}/nginx.md | 6 +-
docs/{ => recipes}/osx-setup-guide.md | 4 +-
.../osx/com.docker.registry.plist | 0
docs/{ => recipes}/osx/config.yml | 0
docs/spec/auth/index.md | 3 +
docs/spec/auth/jwt.md | 1 +
docs/spec/auth/oauth.md | 1 +
docs/spec/auth/scope.md | 1 +
docs/spec/auth/token.md | 1 +
docs/spec/index.md | 9 ++-
docs/spec/menu.md | 13 ++++
docs/storage-drivers/azure.md | 2 +
docs/storage-drivers/filesystem.md | 2 +
docs/storage-drivers/gcs.md | 2 +
docs/storage-drivers/index.md | 65 ++++++++++++++++++-
docs/storage-drivers/inmemory.md | 2 +
docs/storage-drivers/menu.md | 13 ++++
docs/storage-drivers/oss.md | 2 +
docs/storage-drivers/s3.md | 2 +
docs/storage-drivers/swift.md | 2 +
docs/storagedrivers.md | 64 ------------------
34 files changed, 180 insertions(+), 122 deletions(-)
rename docs/{overview.md => menu.md} (82%)
rename docs/{ => recipes}/apache.md (97%)
rename docs/{ => recipes}/building.md (99%)
rename docs/{recipes.md => recipes/index.md} (89%)
create mode 100644 docs/recipes/menu.md
rename docs/{ => recipes}/mirror.md (99%)
rename docs/{ => recipes}/nginx.md (97%)
rename docs/{ => recipes}/osx-setup-guide.md (94%)
rename docs/{ => recipes}/osx/com.docker.registry.plist (100%)
rename docs/{ => recipes}/osx/config.yml (100%)
create mode 100644 docs/spec/menu.md
create mode 100644 docs/storage-drivers/menu.md
delete mode 100644 docs/storagedrivers.md
diff --git a/docs/Dockerfile b/docs/Dockerfile
index 8fa63a33..fcc63422 100644
--- a/docs/Dockerfile
+++ b/docs/Dockerfile
@@ -1,9 +1,9 @@
FROM docs/base:oss
-MAINTAINER Mary Anthony (@moxiegirl)
+MAINTAINER Docker Docs
ENV PROJECT=registry
# To get the git info for this repo
COPY . /src
-RUN rm -r /docs/content/$PROJECT/
+RUN rm -rf /docs/content/$PROJECT/
COPY . /docs/content/$PROJECT/
diff --git a/docs/Makefile b/docs/Makefile
index 021e8f6e..585bc871 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -1,17 +1,4 @@
-.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate
-
-# env vars passed through directly to Docker's build scripts
-# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
-# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these
-DOCKER_ENVS := \
- -e BUILDFLAGS \
- -e DOCKER_CLIENTONLY \
- -e DOCKER_EXECDRIVER \
- -e DOCKER_GRAPHDRIVER \
- -e TESTDIRS \
- -e TESTFLAGS \
- -e TIMEOUT
-# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
+.PHONY: all default docs docs-build docs-shell shell test
# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs)
DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR))
@@ -25,9 +12,8 @@ HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER
HUGO_BIND_IP=0.0.0.0
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
-DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
-DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH))
-
+GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
+DOCKER_DOCS_IMAGE := registry-docs$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE
@@ -42,14 +28,11 @@ docs: docs-build
docs-draft: docs-build
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP)
-
docs-shell: docs-build
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
-
docs-build:
-# ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files
-# echo "$(GIT_BRANCH)" > GIT_BRANCH
-# echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET
-# echo "$(GITCOMMIT)" > GITCOMMIT
docker build -t "$(DOCKER_DOCS_IMAGE)" .
+
+test: docs-build
+ $(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)"
diff --git a/docs/configuration.md b/docs/configuration.md
index 6a1afb79..89dafda3 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -1680,7 +1680,7 @@ The TCP address to connect to, including a port number.
username: [username]
password: [password]
-Proxy enables a registry to be configured as a pull through cache to the official Docker Hub. See [mirror](mirror.md) for more information. Pushing to a registry configured as a pull through cache is currently unsupported.
+Proxy enables a registry to be configured as a pull through cache to the official Docker Hub. See [mirror](recipes/mirror.md) for more information. Pushing to a registry configured as a pull through cache is currently unsupported.
diff --git a/docs/deploying.md b/docs/deploying.md
index 1e7f5dd6..b67d5396 100644
--- a/docs/deploying.md
+++ b/docs/deploying.md
@@ -11,7 +11,7 @@ weight=3
# Deploying a registry server
-You need to [install Docker version 1.6.0 or newer](https://docs.docker.com/installation/).
+You need to [install Docker version 1.6.0 or newer](/engine/installation/index.md).
## Running on localhost
@@ -39,7 +39,7 @@ To stop your registry, you would:
## Storage
-By default, your registry data is persisted as a [docker volume](https://docs.docker.com/userguide/dockervolumes/) on the host filesystem. Properly understanding volumes is essential if you want to stick with a local filesystem storage.
+By default, your registry data is persisted as a [docker volume](/engine/userguide/containers/dockervolumes.md) on the host filesystem. Properly understanding volumes is essential if you want to stick with a local filesystem storage.
Specifically, you might want to point your volume location to a specific place in order to more easily access your registry data. To do so you can:
@@ -180,7 +180,7 @@ Seeing X509 errors is usually a sign you are trying to use self-signed certifica
### Alternatives
-1. You may want to leverage more advanced basic auth implementations through a proxy design, in front of the registry. You will find examples of such patterns in the [recipes list](recipes.md).
+1. You may want to leverage more advanced basic auth implementations through a proxy design, in front of the registry. You will find examples of such patterns in the [recipes list](recipes/index.md).
2. Alternatively, the Registry also supports delegated authentication, redirecting users to a specific, trusted token server. That approach requires significantly more investment, and only makes sense if you want to fully configure ACLs and more control over the Registry integration into your global authorization and authentication systems.
@@ -192,7 +192,7 @@ Beware that you will have to implement your own authentication service for this
As your registry configuration grows more complex, dealing with it can quickly become tedious.
-It's highly recommended to use [Docker Compose](https://docs.docker.com/compose/) to facilitate operating your registry.
+It's highly recommended to use [Docker Compose](/compose/index.md) to facilitate operating your registry.
Here is a simple `docker-compose.yml` example that condenses everything explained so far:
@@ -226,7 +226,7 @@ You will find more specific and advanced informations in the following sections:
- [Configuration reference](configuration.md)
- [Working with notifications](notifications.md)
- - [Advanced "recipes"](recipes.md)
+ - [Advanced "recipes"](recipes/index.md)
- [Registry API](spec/api.md)
- - [Storage driver model](storagedrivers.md)
+ - [Storage driver model](storage-drivers/index.md)
- [Token authentication](spec/auth/token.md)
diff --git a/docs/help.md b/docs/help.md
index c6ac7ad9..77ec378f 100644
--- a/docs/help.md
+++ b/docs/help.md
@@ -21,4 +21,4 @@ If you want to report a bug:
- be sure to first read about [how to contribute](https://github.com/docker/distribution/blob/master/CONTRIBUTING.md)
- you can then do so on the [GitHub project bugtracker](https://github.com/docker/distribution/issues)
-You can also find out more about the Docker's project [Getting Help resources](https://docs.docker.com/opensource/get-help/).
+You can also find out more about the Docker's project [Getting Help resources](/opensource/get-help.md).
diff --git a/docs/index.md b/docs/index.md
index 0252fcb5..2eb5f767 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -1,11 +1,11 @@
diff --git a/docs/introduction.md b/docs/introduction.md
index e8b05d8c..eceb5ffc 100644
--- a/docs/introduction.md
+++ b/docs/introduction.md
@@ -19,7 +19,7 @@ Users interact with a registry by using docker push and pull commands.
> Example: `docker pull registry-1.docker.io/distribution/registry:2.1`.
-Storage itself is delegated to drivers. The default storage driver is the local posix filesystem, which is suitable for development or small deployments. Additional cloud-based storage drivers like S3, Microsoft Azure, OpenStack Swift and Aliyun OSS are also supported. People looking into using other storage backends may do so by writing their own driver implementing the [Storage API](storagedrivers.md).
+Storage itself is delegated to drivers. The default storage driver is the local posix filesystem, which is suitable for development or small deployments. Additional cloud-based storage drivers like S3, Microsoft Azure, OpenStack Swift and Aliyun OSS are also supported. People looking into using other storage backends may do so by writing their own driver implementing the [Storage API](storage-drivers/index.md).
Since securing access to your hosted images is paramount, the Registry natively supports TLS and basic authentication.
@@ -34,7 +34,7 @@ Image names as used in typical docker commands reflect their origin:
* `docker pull ubuntu` instructs docker to pull an image named `ubuntu` from the official Docker Hub. This is simply a shortcut for the longer `docker pull docker.io/library/ubuntu` command
* `docker pull myregistrydomain:port/foo/bar` instructs docker to contact the registry located at `myregistrydomain:port` to find the image `foo/bar`
-You can find out more about the various Docker commands dealing with images in the [official Docker engine documentation](https://docs.docker.com/reference/commandline/cli/).
+You can find out more about the various Docker commands dealing with images in the [official Docker engine documentation](/engine/reference/commandline/cli.md).
## Use cases
diff --git a/docs/overview.md b/docs/menu.md
similarity index 82%
rename from docs/overview.md
rename to docs/menu.md
index d3532b62..7e24a690 100644
--- a/docs/overview.md
+++ b/docs/menu.md
@@ -1,10 +1,12 @@
@@ -17,5 +19,5 @@ The Docker Registry documentation includes the following topics:
* [Deploying a registry server](deploying.md)
* [Registry Configuration Reference](configuration.md)
* [Notifications](notifications.md)
-* [Recipes](recipes.md)
+* [Recipes](recipes/index.md)
* [Getting help](help.md)
diff --git a/docs/apache.md b/docs/recipes/apache.md
similarity index 97%
rename from docs/apache.md
rename to docs/recipes/apache.md
index f694ca3e..ac24113b 100644
--- a/docs/apache.md
+++ b/docs/recipes/apache.md
@@ -3,6 +3,8 @@
title = "Authenticating proxy with apache"
description = "Restricting access to your registry using an apache proxy"
keywords = ["registry, on-prem, images, tags, repository, distribution, authentication, proxy, apache, httpd, TLS, recipe, advanced"]
+[menu.main]
+parent="smn_recipes"
+++
@@ -16,7 +18,7 @@ Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO
### Alternatives
-If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](deploying.md#native-basic-auth).
+If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](../deploying.md#native-basic-auth).
### Solution
@@ -34,7 +36,7 @@ Furthermore, introducing an extra http layer in your communication pipeline will
## Setting things up
-Read again [the requirements](recipes.md#requirements).
+Read again [the requirements](index.md#requirements).
Ready?
diff --git a/docs/building.md b/docs/recipes/building.md
similarity index 99%
rename from docs/building.md
rename to docs/recipes/building.md
index 126a455a..4d67593a 100644
--- a/docs/building.md
+++ b/docs/recipes/building.md
@@ -3,6 +3,8 @@
title = "Build instructions"
description = "Explains how to build & hack on the registry"
keywords = ["registry, on-prem, images, tags, repository, distribution, build, recipe, advanced"]
+[menu.main]
+parent="smn_recipes"
+++
diff --git a/docs/recipes.md b/docs/recipes/index.md
similarity index 89%
rename from docs/recipes.md
rename to docs/recipes/index.md
index c9f64d53..df74ca2b 100644
--- a/docs/recipes.md
+++ b/docs/recipes/index.md
@@ -1,11 +1,11 @@
@@ -17,7 +17,7 @@ Most users are not expected to have a use for these.
## Requirements
-You should have followed entirely the basic [deployment guide](deploying.md).
+You should have followed entirely the basic [deployment guide](../deploying.md).
If you have not, please take the time to do so.
diff --git a/docs/recipes/menu.md b/docs/recipes/menu.md
new file mode 100644
index 00000000..5fdbec5b
--- /dev/null
+++ b/docs/recipes/menu.md
@@ -0,0 +1,22 @@
+
+
+# Recipes
+
+## The List
+
+ * [using Apache as an authenticating proxy](apache.md)
+ * [using Nginx as an authenticating proxy](nginx.md)
+ * [running a Registry on OS X](osx-setup-guide.md)
+ * [hacking the registry: build instructions](building.md)
+ * [mirror the Docker Hub](mirror.md)
diff --git a/docs/mirror.md b/docs/recipes/mirror.md
similarity index 99%
rename from docs/mirror.md
rename to docs/recipes/mirror.md
index feb2630c..ff437ba4 100644
--- a/docs/mirror.md
+++ b/docs/recipes/mirror.md
@@ -3,6 +3,8 @@
title = "Mirroring Docker Hub"
description = "Setting-up a local mirror for Docker Hub images"
keywords = ["registry, on-prem, images, tags, repository, distribution, mirror, Hub, recipe, advanced"]
+[menu.main]
+parent="smn_recipes"
+++
diff --git a/docs/nginx.md b/docs/recipes/nginx.md
similarity index 97%
rename from docs/nginx.md
rename to docs/recipes/nginx.md
index 361a1063..f4a67679 100644
--- a/docs/nginx.md
+++ b/docs/recipes/nginx.md
@@ -3,6 +3,8 @@
title = "Authenticating proxy with nginx"
description = "Restricting access to your registry using a nginx proxy"
keywords = ["registry, on-prem, images, tags, repository, distribution, nginx, proxy, authentication, TLS, recipe, advanced"]
+[menu.main]
+parent="smn_recipes"
+++
@@ -17,7 +19,7 @@ Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO
### Alternatives
-If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](deploying.md#native-basic-auth).
+If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](../deploying.md#native-basic-auth).
### Solution
@@ -53,7 +55,7 @@ Otherwise nginx will reset the ELB's values, and the requests will not be routed
## Setting things up
-Read again [the requirements](recipes.md#requirements).
+Read again [the requirements](index.md#requirements).
Ready?
diff --git a/docs/osx-setup-guide.md b/docs/recipes/osx-setup-guide.md
similarity index 94%
rename from docs/osx-setup-guide.md
rename to docs/recipes/osx-setup-guide.md
index 15a26ff4..d47d31c1 100644
--- a/docs/osx-setup-guide.md
+++ b/docs/recipes/osx-setup-guide.md
@@ -3,6 +3,8 @@
title = "Running on OS X"
description = "Explains how to run a registry on OS X"
keywords = ["registry, on-prem, images, tags, repository, distribution, OS X, recipe, advanced"]
+[menu.main]
+parent="smn_recipes"
+++
@@ -16,7 +18,7 @@ This is useful if you intend to run a registry server natively on OS X.
You can start a VM on OS X, and deploy your registry normally as a container using Docker inside that VM.
-The simplest road to get there is traditionally to use the [docker Toolbox](https://www.docker.com/toolbox), or [docker-machine](https://docs.docker.com/machine/), which usually relies on the [boot2docker](http://boot2docker.io/) iso inside a VirtualBox VM.
+The simplest road to get there is traditionally to use the [docker Toolbox](https://www.docker.com/toolbox), or [docker-machine](/machine/index.md), which usually relies on the [boot2docker](http://boot2docker.io/) iso inside a VirtualBox VM.
### Solution
diff --git a/docs/osx/com.docker.registry.plist b/docs/recipes/osx/com.docker.registry.plist
similarity index 100%
rename from docs/osx/com.docker.registry.plist
rename to docs/recipes/osx/com.docker.registry.plist
diff --git a/docs/osx/config.yml b/docs/recipes/osx/config.yml
similarity index 100%
rename from docs/osx/config.yml
rename to docs/recipes/osx/config.yml
diff --git a/docs/spec/auth/index.md b/docs/spec/auth/index.md
index b123af1a..f6ee8e1f 100644
--- a/docs/spec/auth/index.md
+++ b/docs/spec/auth/index.md
@@ -3,6 +3,9 @@
title = "Docker Registry Token Authentication"
description = "Docker Registry v2 authentication schema"
keywords = ["registry, on-prem, images, tags, repository, distribution, authentication, advanced"]
+[menu.main]
+parent="smn_registry_ref"
+weight=100
+++
diff --git a/docs/spec/auth/jwt.md b/docs/spec/auth/jwt.md
index 87de62af..c90bd6e8 100644
--- a/docs/spec/auth/jwt.md
+++ b/docs/spec/auth/jwt.md
@@ -5,6 +5,7 @@ description = "Describe the reference implementation of the Docker Registry v2 a
keywords = ["registry, on-prem, images, tags, repository, distribution, JWT authentication, advanced"]
[menu.main]
parent="smn_registry_ref"
+weight=101
+++
diff --git a/docs/spec/auth/oauth.md b/docs/spec/auth/oauth.md
index 1311df5d..3d1ae0aa 100644
--- a/docs/spec/auth/oauth.md
+++ b/docs/spec/auth/oauth.md
@@ -5,6 +5,7 @@ description = "Specifies the Docker Registry v2 authentication"
keywords = ["registry, on-prem, images, tags, repository, distribution, oauth2, advanced"]
[menu.main]
parent="smn_registry_ref"
+weight=102
+++
diff --git a/docs/spec/auth/scope.md b/docs/spec/auth/scope.md
index e626b6e1..a8f6c062 100644
--- a/docs/spec/auth/scope.md
+++ b/docs/spec/auth/scope.md
@@ -5,6 +5,7 @@ description = "Describes the scope and access fields used for registry authoriza
keywords = ["registry, on-prem, images, tags, repository, distribution, advanced, access, scope"]
[menu.main]
parent="smn_registry_ref"
+weight=103
+++
diff --git a/docs/spec/auth/token.md b/docs/spec/auth/token.md
index 65482699..12dfc685 100644
--- a/docs/spec/auth/token.md
+++ b/docs/spec/auth/token.md
@@ -5,6 +5,7 @@ description = "Specifies the Docker Registry v2 authentication"
keywords = ["registry, on-prem, images, tags, repository, distribution, Bearer authentication, advanced"]
[menu.main]
parent="smn_registry_ref"
+weight=104
+++
diff --git a/docs/spec/index.md b/docs/spec/index.md
index dddaa528..474bd455 100644
--- a/docs/spec/index.md
+++ b/docs/spec/index.md
@@ -1,18 +1,17 @@
# Docker Registry Reference
* [HTTP API V2](api.md)
-* [Storage Driver](../storagedrivers.md)
+* [Storage Driver](../storage-drivers/index.md)
* [Token Authentication Specification](auth/token.md)
* [Token Authentication Implementation](auth/jwt.md)
diff --git a/docs/spec/menu.md b/docs/spec/menu.md
new file mode 100644
index 00000000..ebc52327
--- /dev/null
+++ b/docs/spec/menu.md
@@ -0,0 +1,13 @@
+
+
diff --git a/docs/storage-drivers/azure.md b/docs/storage-drivers/azure.md
index 2783c427..a84888de 100644
--- a/docs/storage-drivers/azure.md
+++ b/docs/storage-drivers/azure.md
@@ -3,6 +3,8 @@
title = "Microsoft Azure storage driver"
description = "Explains how to use the Azure storage drivers"
keywords = ["registry, service, driver, images, storage, azure"]
+[menu.main]
+parent = "smn_storagedrivers"
+++
diff --git a/docs/storage-drivers/filesystem.md b/docs/storage-drivers/filesystem.md
index fea7ce4a..8e269cdb 100644
--- a/docs/storage-drivers/filesystem.md
+++ b/docs/storage-drivers/filesystem.md
@@ -3,6 +3,8 @@
title = "Filesystem storage driver"
description = "Explains how to use the filesystem storage drivers"
keywords = ["registry, service, driver, images, storage, filesystem"]
+[menu.main]
+parent="smn_storagedrivers"
+++
diff --git a/docs/storage-drivers/gcs.md b/docs/storage-drivers/gcs.md
index 0aa9b30d..1bc67f9e 100644
--- a/docs/storage-drivers/gcs.md
+++ b/docs/storage-drivers/gcs.md
@@ -3,6 +3,8 @@
title = "GCS storage driver"
description = "Explains how to use the Google Cloud Storage drivers"
keywords = ["registry, service, driver, images, storage, gcs, google, cloud"]
+[menu.main]
+parent="smn_storagedrivers"
+++
diff --git a/docs/storage-drivers/index.md b/docs/storage-drivers/index.md
index 2de729ad..89635bd3 100644
--- a/docs/storage-drivers/index.md
+++ b/docs/storage-drivers/index.md
@@ -1,7 +1,66 @@
+
+
+# Docker Registry Storage Driver
+
+This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers.
+
+## Provided Drivers
+
+This storage driver package comes bundled with several drivers:
+
+- [inmemory](inmemory.md): A temporary storage driver using a local inmemory map. This exists solely for reference and testing.
+- [filesystem](filesystem.md): A local storage driver configured to use a directory tree in the local filesystem.
+- [s3](s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket.
+- [azure](azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/).
+- [swift](swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/).
+- [oss](oss.md): A driver storing objects in [Aliyun OSS](http://www.aliyun.com/product/oss).
+- [gcs](gcs.md): A driver storing objects in a [Google Cloud Storage](https://cloud.google.com/storage/) bucket.
+
+## Storage Driver API
+
+The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems.
+
+Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key.
+
+Storage drivers are intended to be written in Go, providing compile-time
+validation of the `storagedriver.StorageDriver` interface.
+
+## Driver Selection and Configuration
+
+The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package.
+
+Storage driver factories may be registered by name using the
+`factory.Register` method, and then later invoked by calling `factory.Create`
+with a driver name and parameters map. If no such storage driver can be found,
+`factory.Create` will return an `InvalidStorageDriverError`.
+
+## Driver Contribution
+
+### Writing new storage drivers
+
+To create a valid storage driver, one must implement the
+`storagedriver.StorageDriver` interface and make sure to expose this driver
+via the factory system.
+
+#### Registering
+
+Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase.
+
+## Testing
+
+Storage driver test suites are provided in
+`storagedriver/testsuites/testsuites.go` and may be used for any storage
+driver written in Go. Tests can be registered using the `RegisterSuite`
+function, which run the same set of tests for any registered drivers.
diff --git a/docs/storage-drivers/inmemory.md b/docs/storage-drivers/inmemory.md
index 3109891e..1a14e77a 100644
--- a/docs/storage-drivers/inmemory.md
+++ b/docs/storage-drivers/inmemory.md
@@ -3,6 +3,8 @@
title = "In-memory storage driver"
description = "Explains how to use the in-memory storage drivers"
keywords = ["registry, service, driver, images, storage, in-memory"]
+[menu.main]
+parent="smn_storagedrivers"
+++
diff --git a/docs/storage-drivers/menu.md b/docs/storage-drivers/menu.md
new file mode 100644
index 00000000..3638649f
--- /dev/null
+++ b/docs/storage-drivers/menu.md
@@ -0,0 +1,13 @@
+
+
diff --git a/docs/storage-drivers/oss.md b/docs/storage-drivers/oss.md
index 2087c98d..a85e315e 100644
--- a/docs/storage-drivers/oss.md
+++ b/docs/storage-drivers/oss.md
@@ -3,6 +3,8 @@
title = "Aliyun OSS storage driver"
description = "Explains how to use the Aliyun OSS storage driver"
keywords = ["registry, service, driver, images, storage, OSS, aliyun"]
+[menu.main]
+parent="smn_storagedrivers"
+++
diff --git a/docs/storage-drivers/s3.md b/docs/storage-drivers/s3.md
index d78fc988..97cfbfc1 100644
--- a/docs/storage-drivers/s3.md
+++ b/docs/storage-drivers/s3.md
@@ -3,6 +3,8 @@
title = "S3 storage driver"
description = "Explains how to use the S3 storage drivers"
keywords = ["registry, service, driver, images, storage, S3"]
+[menu.main]
+parent="smn_storagedrivers"
+++
diff --git a/docs/storage-drivers/swift.md b/docs/storage-drivers/swift.md
index cab0bbd2..0cf582d1 100644
--- a/docs/storage-drivers/swift.md
+++ b/docs/storage-drivers/swift.md
@@ -3,6 +3,8 @@
title = "Swift storage driver"
description = "Explains how to use the OpenStack swift storage driver"
keywords = ["registry, service, driver, images, storage, swift"]
+[menu.main]
+parent="smn_storagedrivers"
+++
diff --git a/docs/storagedrivers.md b/docs/storagedrivers.md
deleted file mode 100644
index ab475c32..00000000
--- a/docs/storagedrivers.md
+++ /dev/null
@@ -1,64 +0,0 @@
-
-
-
-# Docker Registry Storage Driver
-
-This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers.
-
-## Provided Drivers
-
-This storage driver package comes bundled with several drivers:
-
-- [inmemory](storage-drivers/inmemory.md): A temporary storage driver using a local inmemory map. This exists solely for reference and testing.
-- [filesystem](storage-drivers/filesystem.md): A local storage driver configured to use a directory tree in the local filesystem.
-- [s3](storage-drivers/s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket.
-- [azure](storage-drivers/azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/).
-- [swift](storage-drivers/swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/).
-- [oss](storage-drivers/oss.md): A driver storing objects in [Aliyun OSS](http://www.aliyun.com/product/oss).
-- [gcs](storage-drivers/gcs.md): A driver storing objects in a [Google Cloud Storage](https://cloud.google.com/storage/) bucket.
-
-## Storage Driver API
-
-The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems.
-
-Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key.
-
-Storage drivers are intended to be written in Go, providing compile-time
-validation of the `storagedriver.StorageDriver` interface.
-
-## Driver Selection and Configuration
-
-The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package.
-
-Storage driver factories may be registered by name using the
-`factory.Register` method, and then later invoked by calling `factory.Create`
-with a driver name and parameters map. If no such storage driver can be found,
-`factory.Create` will return an `InvalidStorageDriverError`.
-
-## Driver Contribution
-
-### Writing new storage drivers
-
-To create a valid storage driver, one must implement the
-`storagedriver.StorageDriver` interface and make sure to expose this driver
-via the factory system.
-
-#### Registering
-
-Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase.
-
-## Testing
-
-Storage driver test suites are provided in
-`storagedriver/testsuites/testsuites.go` and may be used for any storage
-driver written in Go. Tests can be registered using the `RegisterSuite`
-function, which run the same set of tests for any registered drivers.
From d153bfffbdfb7a8b957ea4f57bafadcc1993e0c4 Mon Sep 17 00:00:00 2001
From: Richard Scothern
Date: Tue, 31 May 2016 11:14:25 -0700
Subject: [PATCH 059/546] Revert inadvertant filepath change
Signed-off-by: Richard Scothern
---
cmd/registry/config-dev.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/cmd/registry/config-dev.yml b/cmd/registry/config-dev.yml
index 42c9dc16..b6438be5 100644
--- a/cmd/registry/config-dev.yml
+++ b/cmd/registry/config-dev.yml
@@ -24,7 +24,7 @@ storage:
cache:
blobdescriptor: redis
filesystem:
- rootdirectory: /var/lib/registry-hello-world-clean
+ rootdirectory: /var/lib/registry
maintenance:
uploadpurging:
enabled: false
From bf4eb92f56026c5074466b61dcc4e0885e751d11 Mon Sep 17 00:00:00 2001
From: Sven Dowideit
Date: Tue, 31 May 2016 17:19:47 +0000
Subject: [PATCH 060/546] Add topics to the menu
Signed-off-by: Sven Dowideit
---
docs/compatibility.md | 7 +++++--
docs/deprecated.md | 5 ++++-
docs/garbage-collection.md | 3 +++
docs/index.md | 1 +
docs/insecure.md | 3 +++
5 files changed, 16 insertions(+), 3 deletions(-)
diff --git a/docs/compatibility.md b/docs/compatibility.md
index 9a7bdd42..6ff4c741 100644
--- a/docs/compatibility.md
+++ b/docs/compatibility.md
@@ -1,8 +1,11 @@
@@ -78,4 +81,4 @@ constraints of CAS.*
For this reason if a manifest is pulled by _digest_ from a registry 2.3 with Docker
Engine 1.9 and older, and the manifest was pushed with Docker Engine 1.10, a
security check will cause the Engine to receive a manifest it cannot use and the
-pull will fail.
\ No newline at end of file
+pull will fail.
diff --git a/docs/deprecated.md b/docs/deprecated.md
index 796d3fa5..73bde497 100644
--- a/docs/deprecated.md
+++ b/docs/deprecated.md
@@ -1,8 +1,11 @@
diff --git a/docs/garbage-collection.md b/docs/garbage-collection.md
index 688bfb25..2d03e787 100644
--- a/docs/garbage-collection.md
+++ b/docs/garbage-collection.md
@@ -3,6 +3,9 @@
title = "Garbage Collection"
description = "High level discussion of garbage collection"
keywords = ["registry, garbage, images, tags, repository, distribution"]
+[menu.main]
+parent="smn_registry_ref"
+weight=4
+++
diff --git a/docs/index.md b/docs/index.md
index 2eb5f767..e8be74e9 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -6,6 +6,7 @@ keywords = ["registry, on-prem, images, tags, repository, distribution"]
aliases = ["/registry/overview/"]
[menu.main]
parent="smn_registry"
+weight=1
+++
diff --git a/docs/insecure.md b/docs/insecure.md
index 88055d51..86d85930 100644
--- a/docs/insecure.md
+++ b/docs/insecure.md
@@ -3,6 +3,9 @@
title = "Testing an insecure registry"
description = "Deploying a Registry in an insecure fashion"
keywords = ["registry, on-prem, images, tags, repository, distribution, insecure"]
+[menu.main]
+parent="smn_registry_ref"
+weight=5
+++
From e4acec1806a4609e9e5568704e3b9721ba98e103 Mon Sep 17 00:00:00 2001
From: Sven Dowideit
Date: Wed, 1 Jun 2016 17:59:13 +0000
Subject: [PATCH 061/546] Next load of docs validation fixes
Signed-off-by: Sven Dowideit
---
docs/compatibility.md | 2 +-
docs/deploying.md | 2 +-
docs/index.md | 2 +-
docs/insecure.md | 2 +-
docs/spec/manifest-v2-2.md | 6 +++---
5 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/docs/compatibility.md b/docs/compatibility.md
index 6ff4c741..cba7e378 100644
--- a/docs/compatibility.md
+++ b/docs/compatibility.md
@@ -18,7 +18,7 @@ will cause the Engine to receive a manifest it cannot use and the pull will fail
## Registry Manifest Support
-Historically, the registry has supported a [single manifest type](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md)
+Historically, the registry has supported a [single manifest type](./spec/manifest-v2-1.md)
known as _Schema 1_.
With the move toward multiple architecture images the distribution project
diff --git a/docs/deploying.md b/docs/deploying.md
index b67d5396..ceed44a6 100644
--- a/docs/deploying.md
+++ b/docs/deploying.md
@@ -49,7 +49,7 @@ Specifically, you might want to point your volume location to a specific place i
### Alternatives
-You should usually consider using [another storage backend](https://github.com/docker/distribution/blob/master/docs/storagedrivers.md) instead of the local filesystem. Use the [storage configuration options](https://github.com/docker/distribution/blob/master/docs/configuration.md#storage) to configure an alternate storage backend.
+You should usually consider using [another storage backend](./storage-drivers/index.md) instead of the local filesystem. Use the [storage configuration options](./configuration.md#storage) to configure an alternate storage backend.
Using one of these will allow you to more easily scale your registry, and leverage your storage redundancy and availability features.
diff --git a/docs/index.md b/docs/index.md
index e8be74e9..95922284 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -29,7 +29,7 @@ You should use the Registry if you want to:
Users looking for a zero maintenance, ready-to-go solution are encouraged to head-over to the [Docker Hub](https://hub.docker.com), which provides a free-to-use, hosted Registry, plus additional features (organization accounts, automated builds, and more).
-Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/).
+Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](/docker-trusted-registry/overview.md).
## Requirements
diff --git a/docs/insecure.md b/docs/insecure.md
index 86d85930..38b3a355 100644
--- a/docs/insecure.md
+++ b/docs/insecure.md
@@ -63,7 +63,7 @@ This is more secure than the insecure registry solution. You must configure eve
2. Be sure to use the name `myregistrydomain.com` as a CN.
-3. Use the result to [start your registry with TLS enabled](https://github.com/docker/distribution/blob/master/docs/deploying.md#get-a-certificate)
+3. Use the result to [start your registry with TLS enabled](./deploying.md#get-a-certificate)
4. Instruct every docker daemon to trust that certificate.
diff --git a/docs/spec/manifest-v2-2.md b/docs/spec/manifest-v2-2.md
index c5c90648..56e974e2 100644
--- a/docs/spec/manifest-v2-2.md
+++ b/docs/spec/manifest-v2-2.md
@@ -76,7 +76,7 @@ image manifest based on the Content-Type returned in the HTTP response.
- **`digest`** *string*
The digest of the content, as defined by the
- [Registry V2 HTTP API Specificiation](https://docs.docker.com/registry/spec/api/#digest-parameter).
+ [Registry V2 HTTP API Specificiation](api.md#digest-parameter).
- **`platform`** *object*
@@ -191,7 +191,7 @@ image. It's the direct replacement for the schema-1 manifest.
- **`digest`** *string*
The digest of the content, as defined by the
- [Registry V2 HTTP API Specificiation](https://docs.docker.com/registry/spec/api/#digest-parameter).
+ [Registry V2 HTTP API Specificiation](api.md#digest-parameter).
- **`layers`** *array*
@@ -214,7 +214,7 @@ image. It's the direct replacement for the schema-1 manifest.
- **`digest`** *string*
The digest of the content, as defined by the
- [Registry V2 HTTP API Specificiation](https://docs.docker.com/registry/spec/api/#digest-parameter).
+ [Registry V2 HTTP API Specificiation](api.md#digest-parameter).
- **`urls`** *array*
From db90724ab05add8cf882e62f2bed5338b82331f0 Mon Sep 17 00:00:00 2001
From: allencloud
Date: Thu, 2 Jun 2016 13:31:13 +0800
Subject: [PATCH 062/546] fix typos
Signed-off-by: allencloud
---
ROADMAP.md | 2 +-
docs/configuration.md | 2 +-
docs/spec/api.md | 2 +-
docs/spec/api.md.tmpl | 2 +-
docs/spec/manifest-v2-2.md | 2 +-
health/doc.go | 2 +-
registry/auth/htpasswd/htpasswd.go | 2 +-
registry/client/errors.go | 2 +-
registry/handlers/api_test.go | 2 +-
registry/handlers/helpers.go | 2 +-
registry/registry.go | 2 +-
registry/storage/driver/middleware/cloudfront/middleware.go | 2 +-
registry/storage/filereader_test.go | 2 +-
registry/storage/linkedblobstore.go | 2 +-
14 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/ROADMAP.md b/ROADMAP.md
index 9cdfa36c..701127af 100644
--- a/ROADMAP.md
+++ b/ROADMAP.md
@@ -156,7 +156,7 @@ full and understand the problems behind deletes.
While, at first glance, implementing deleting seems simple, there are a number
mitigating factors that make many solutions not ideal or even pathological in
the context of a registry. The following paragraph discuss the background and
-approaches that could be applied to a arrive at a solution.
+approaches that could be applied to arrive at a solution.
The goal of deletes in any system is to remove unused or unneeded data. Only
data requested for deletion should be removed and no other data. Removing
diff --git a/docs/configuration.md b/docs/configuration.md
index cd8bae16..96df2f0d 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -1032,7 +1032,7 @@ and proxy connections to the registry server.
no
- An array of absolute paths to a x509 CA file
+ An array of absolute paths to an x509 CA file
|
diff --git a/docs/spec/api.md b/docs/spec/api.md
index 56e9242c..aeb6bee9 100644
--- a/docs/spec/api.md
+++ b/docs/spec/api.md
@@ -706,7 +706,7 @@ the uploaded blob data.
###### Digest Parameter
The "digest" parameter is designed as an opaque parameter to support
-verification of a successful transfer. For example, a HTTP URI parameter
+verification of a successful transfer. For example, an HTTP URI parameter
might be as follows:
```
diff --git a/docs/spec/api.md.tmpl b/docs/spec/api.md.tmpl
index 1a9e9f89..13a1fc2e 100644
--- a/docs/spec/api.md.tmpl
+++ b/docs/spec/api.md.tmpl
@@ -706,7 +706,7 @@ the uploaded blob data.
###### Digest Parameter
The "digest" parameter is designed as an opaque parameter to support
-verification of a successful transfer. For example, a HTTP URI parameter
+verification of a successful transfer. For example, an HTTP URI parameter
might be as follows:
```
diff --git a/docs/spec/manifest-v2-2.md b/docs/spec/manifest-v2-2.md
index 56e974e2..fc705639 100644
--- a/docs/spec/manifest-v2-2.md
+++ b/docs/spec/manifest-v2-2.md
@@ -57,7 +57,7 @@ image manifest based on the Content-Type returned in the HTTP response.
The manifests field contains a list of manifests for specific platforms.
- Fields of a object in the manifests list are:
+ Fields of an object in the manifests list are:
- **`mediaType`** *string*
diff --git a/health/doc.go b/health/doc.go
index 194b8a56..8c106b42 100644
--- a/health/doc.go
+++ b/health/doc.go
@@ -2,7 +2,7 @@
// The health package works expvar style. By importing the package the debug
// server is getting a "/debug/health" endpoint that returns the current
// status of the application.
-// If there are no errors, "/debug/health" will return a HTTP 200 status,
+// If there are no errors, "/debug/health" will return an HTTP 200 status,
// together with an empty JSON reply "{}". If there are any checks
// with errors, the JSON reply will include all the failed checks, and the
// response will be have an HTTP 503 status.
diff --git a/registry/auth/htpasswd/htpasswd.go b/registry/auth/htpasswd/htpasswd.go
index 494ad0a7..8e4f6167 100644
--- a/registry/auth/htpasswd/htpasswd.go
+++ b/registry/auth/htpasswd/htpasswd.go
@@ -46,7 +46,7 @@ func (htpasswd *htpasswd) authenticateUser(username string, password string) err
// parseHTPasswd parses the contents of htpasswd. This will read all the
// entries in the file, whether or not they are needed. An error is returned
-// if an syntax errors are encountered or if the reader fails.
+// if a syntax errors are encountered or if the reader fails.
func parseHTPasswd(rd io.Reader) (map[string][]byte, error) {
entries := map[string][]byte{}
scanner := bufio.NewScanner(rd)
diff --git a/registry/client/errors.go b/registry/client/errors.go
index adbaacf4..7606d0c9 100644
--- a/registry/client/errors.go
+++ b/registry/client/errors.go
@@ -11,7 +11,7 @@ import (
"github.com/docker/distribution/registry/api/errcode"
)
-// ErrNoErrorsInBody is returned when a HTTP response body parses to an empty
+// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
// errcode.Errors slice.
var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body")
diff --git a/registry/handlers/api_test.go b/registry/handlers/api_test.go
index 01fd4f4c..076207ed 100644
--- a/registry/handlers/api_test.go
+++ b/registry/handlers/api_test.go
@@ -926,7 +926,7 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name
}
// TODO(stevvooe): Add a test case where we take a mostly valid registry,
- // tamper with the content and ensure that we get a unverified manifest
+ // tamper with the content and ensure that we get an unverified manifest
// error.
// Push 2 random layers
diff --git a/registry/handlers/helpers.go b/registry/handlers/helpers.go
index b56c1566..dac4f7a8 100644
--- a/registry/handlers/helpers.go
+++ b/registry/handlers/helpers.go
@@ -20,7 +20,7 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler {
})
}
-// copyFullPayload copies the payload of a HTTP request to destWriter. If it
+// copyFullPayload copies the payload of an HTTP request to destWriter. If it
// receives less content than expected, and the client disconnected during the
// upload, it avoids sending a 400 error to keep the logs cleaner.
func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error {
diff --git a/registry/registry.go b/registry/registry.go
index a1ba3b1a..aec6a030 100644
--- a/registry/registry.go
+++ b/registry/registry.go
@@ -267,7 +267,7 @@ func logLevel(level configuration.Loglevel) log.Level {
return l
}
-// panicHandler add a HTTP handler to web app. The handler recover the happening
+// panicHandler add an HTTP handler to web app. The handler recover the happening
// panic. logrus.Panic transmits panic message to pre-config log hooks, which is
// defined in config.yml.
func panicHandler(handler http.Handler) http.Handler {
diff --git a/registry/storage/driver/middleware/cloudfront/middleware.go b/registry/storage/driver/middleware/cloudfront/middleware.go
index 9162c09d..b0618d1a 100644
--- a/registry/storage/driver/middleware/cloudfront/middleware.go
+++ b/registry/storage/driver/middleware/cloudfront/middleware.go
@@ -18,7 +18,7 @@ import (
storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware"
)
-// cloudFrontStorageMiddleware provides an simple implementation of layerHandler that
+// cloudFrontStorageMiddleware provides a simple implementation of layerHandler that
// constructs temporary signed CloudFront URLs from the storagedriver layer URL,
// then issues HTTP Temporary Redirects to this CloudFront content URL.
type cloudFrontStorageMiddleware struct {
diff --git a/registry/storage/filereader_test.go b/registry/storage/filereader_test.go
index 774a864b..f43873b3 100644
--- a/registry/storage/filereader_test.go
+++ b/registry/storage/filereader_test.go
@@ -183,7 +183,7 @@ func TestFileReaderNonExistentFile(t *testing.T) {
// conditions that can arise when reading a layer.
func TestFileReaderErrors(t *testing.T) {
// TODO(stevvooe): We need to cover error return types, driven by the
- // errors returned via the HTTP API. For now, here is a incomplete list:
+ // errors returned via the HTTP API. For now, here is an incomplete list:
//
// 1. Layer Not Found: returned when layer is not found or access is
// denied.
diff --git a/registry/storage/linkedblobstore.go b/registry/storage/linkedblobstore.go
index 68a347b4..d254bbb8 100644
--- a/registry/storage/linkedblobstore.go
+++ b/registry/storage/linkedblobstore.go
@@ -35,7 +35,7 @@ type linkedBlobStore struct {
// control the repository blob link set to which the blob store
// dispatches. This is required because manifest and layer blobs have not
// yet been fully merged. At some point, this functionality should be
- // removed an the blob links folder should be merged. The first entry is
+ // removed the blob links folder should be merged. The first entry is
// treated as the "canonical" link location and will be used for writes.
linkPathFns []linkPathFunc
From b4484550117b8a05d06333e0fd11b84572549c11 Mon Sep 17 00:00:00 2001
From: Sven Dowideit
Date: Thu, 2 Jun 2016 18:22:02 +0000
Subject: [PATCH 063/546] Revert to using the full URL, as the OSS checker
doesn't have access to the non-OSS docs
Signed-off-by: Sven Dowideit
---
docs/index.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/index.md b/docs/index.md
index 95922284..21ec7a9a 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -29,7 +29,7 @@ You should use the Registry if you want to:
Users looking for a zero maintenance, ready-to-go solution are encouraged to head-over to the [Docker Hub](https://hub.docker.com), which provides a free-to-use, hosted Registry, plus additional features (organization accounts, automated builds, and more).
-Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](/docker-trusted-registry/overview.md).
+Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/overview/).
## Requirements
From a3965b93bcaf6e6a0e7c4246250d429a04408cd7 Mon Sep 17 00:00:00 2001
From: Eric Yang
Date: Tue, 7 Jun 2016 11:00:37 +0800
Subject: [PATCH 064/546] Fixing link in readme
Fixing link in readme
Signed-off-by: Qizhao Yang
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index c21d4724..d35bcb68 100644
--- a/README.md
+++ b/README.md
@@ -83,7 +83,7 @@ created. For more information see [docker/migrator]
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
issues, fixes, and patches to this project. If you are contributing code, see
-the instructions for [building a development environment](docs/building.md).
+the instructions for [building a development environment](docs/recipes/building.md).
## Support
From 4901f1b2781c5de0d06a18a3990bf091bcd4f081 Mon Sep 17 00:00:00 2001
From: Antonio Murdaca
Date: Wed, 8 Jun 2016 10:19:15 +0200
Subject: [PATCH 065/546] registry: use const for status code 429
Signed-off-by: Antonio Murdaca
---
registry/api/errcode/register.go | 5 +----
registry/client/errors.go | 5 +----
2 files changed, 2 insertions(+), 8 deletions(-)
diff --git a/registry/api/errcode/register.go b/registry/api/errcode/register.go
index 7489e84f..71cf6f7a 100644
--- a/registry/api/errcode/register.go
+++ b/registry/api/errcode/register.go
@@ -71,10 +71,7 @@ var (
Message: "too many requests",
Description: `Returned when a client attempts to contact a
service too many times`,
- // FIXME: go1.5 doesn't export http.StatusTooManyRequests while
- // go1.6 does. Update the hardcoded value to the constant once
- // Docker updates golang version to 1.6.
- HTTPStatusCode: 429,
+ HTTPStatusCode: http.StatusTooManyRequests,
})
)
diff --git a/registry/client/errors.go b/registry/client/errors.go
index adbaacf4..804e69e0 100644
--- a/registry/client/errors.go
+++ b/registry/client/errors.go
@@ -54,10 +54,7 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
switch statusCode {
case http.StatusUnauthorized:
return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
- // FIXME: go1.5 doesn't export http.StatusTooManyRequests while
- // go1.6 does. Update the hardcoded value to the constant once
- // Docker updates golang version to 1.6.
- case 429:
+ case http.StatusTooManyRequests:
return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
default:
return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
From fc9aa2f351c9f780b77ebada9d87f098d1ebd3ba Mon Sep 17 00:00:00 2001
From: zhouhaibing089
Date: Wed, 8 Jun 2016 22:43:28 +0800
Subject: [PATCH 066/546] incorrect comment about auth type
Signed-off-by: haibzhou
---
configuration/configuration.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/configuration/configuration.go b/configuration/configuration.go
index 59c90fde..d38975f2 100644
--- a/configuration/configuration.go
+++ b/configuration/configuration.go
@@ -414,7 +414,7 @@ func (storage Storage) MarshalYAML() (interface{}, error) {
// Auth defines the configuration for registry authorization.
type Auth map[string]Parameters
-// Type returns the storage driver type, such as filesystem or s3
+// Type returns the auth type, such as htpasswd or token
func (auth Auth) Type() string {
// Return only key in this map
for k := range auth {
From 352924cd854d83024a71d42c7893bc873a156485 Mon Sep 17 00:00:00 2001
From: Richard Scothern
Date: Wed, 8 Jun 2016 10:39:17 -0700
Subject: [PATCH 067/546] Clarify API documentation around catalog fetch
behavior
Signed-off-by: Richard Scothern
---
docs/spec/api.md | 4 ++--
registry/api/v2/descriptors.go | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/docs/spec/api.md b/docs/spec/api.md
index aeb6bee9..39041146 100644
--- a/docs/spec/api.md
+++ b/docs/spec/api.md
@@ -4765,13 +4765,13 @@ List a set of available repositories in the local registry cluster. Does not pro
Retrieve a sorted, json list of repositories available in the registry.
-##### Catalog Fetch Complete
+##### Catalog Fetch
```
GET /v2/_catalog
```
-Request an unabridged list of repositories available.
+Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.
diff --git a/registry/api/v2/descriptors.go b/registry/api/v2/descriptors.go
index 58279994..fc42c1c4 100644
--- a/registry/api/v2/descriptors.go
+++ b/registry/api/v2/descriptors.go
@@ -1497,8 +1497,8 @@ var routeDescriptors = []RouteDescriptor{
Description: "Retrieve a sorted, json list of repositories available in the registry.",
Requests: []RequestDescriptor{
{
- Name: "Catalog Fetch Complete",
- Description: "Request an unabridged list of repositories available.",
+ Name: "Catalog Fetch",
+ Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.",
Successes: []ResponseDescriptor{
{
Description: "Returns the unabridged list of repositories as a json response.",
From 125f4ff7d7fbb5298e4ddf18db177d6583571cfa Mon Sep 17 00:00:00 2001
From: Derek McGowan
Date: Wed, 8 Jun 2016 17:02:29 -0700
Subject: [PATCH 068/546] Add option to get content digest from manifest get
The client may need the content digest to delete a manifest using the digest used by the registry.
Signed-off-by: Derek McGowan (github: dmcgowan)
---
registry/client/repository.go | 23 +++++++++++++++++++++++
registry/client/repository_test.go | 29 ++++++++++++++++++++++-------
2 files changed, 45 insertions(+), 7 deletions(-)
diff --git a/registry/client/repository.go b/registry/client/repository.go
index 8cc5f7f9..323ab508 100644
--- a/registry/client/repository.go
+++ b/registry/client/repository.go
@@ -394,11 +394,26 @@ func (o etagOption) Apply(ms distribution.ManifestService) error {
return fmt.Errorf("etag options is a client-only option")
}
+// ReturnContentDigest allows a client to set a the content digest on
+// a successful request from the 'Docker-Content-Digest' header. This
+// returned digest is represents the digest which the registry uses
+// to refer to the content and can be used to delete the content.
+func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption {
+ return contentDigestOption{dgst}
+}
+
+type contentDigestOption struct{ digest *digest.Digest }
+
+func (o contentDigestOption) Apply(ms distribution.ManifestService) error {
+ return nil
+}
+
func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
var (
digestOrTag string
ref reference.Named
err error
+ contentDgst *digest.Digest
)
for _, option := range options {
@@ -408,6 +423,8 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
if err != nil {
return nil, err
}
+ } else if opt, ok := option.(contentDigestOption); ok {
+ contentDgst = opt.digest
} else {
err := option.Apply(ms)
if err != nil {
@@ -450,6 +467,12 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
if resp.StatusCode == http.StatusNotModified {
return nil, distribution.ErrManifestNotModified
} else if SuccessStatus(resp.StatusCode) {
+ if contentDgst != nil {
+ dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest"))
+ if err == nil {
+ *contentDgst = dgst
+ }
+ }
mt := resp.Header.Get("Content-Type")
body, err := ioutil.ReadAll(resp.Body)
diff --git a/registry/client/repository_test.go b/registry/client/repository_test.go
index 2faeb276..19b6ca2c 100644
--- a/registry/client/repository_test.go
+++ b/registry/client/repository_test.go
@@ -605,6 +605,14 @@ func addTestManifestWithEtag(repo reference.Named, reference string, content []b
*m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag})
}
+func contentDigestString(mediatype string, content []byte) string {
+ if mediatype == schema1.MediaTypeSignedManifest {
+ m, _, _ := distribution.UnmarshalManifest(mediatype, content)
+ content = m.(*schema1.SignedManifest).Canonical
+ }
+ return digest.Canonical.FromBytes(content).String()
+}
+
func addTestManifest(repo reference.Named, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) {
*m = append(*m, testutil.RequestResponseMapping{
Request: testutil.Request{
@@ -615,9 +623,10 @@ func addTestManifest(repo reference.Named, reference string, mediatype string, c
StatusCode: http.StatusOK,
Body: content,
Headers: http.Header(map[string][]string{
- "Content-Length": {fmt.Sprint(len(content))},
- "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)},
- "Content-Type": {mediatype},
+ "Content-Length": {fmt.Sprint(len(content))},
+ "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)},
+ "Content-Type": {mediatype},
+ "Docker-Content-Digest": {contentDigestString(mediatype, content)},
}),
},
})
@@ -629,9 +638,10 @@ func addTestManifest(repo reference.Named, reference string, mediatype string, c
Response: testutil.Response{
StatusCode: http.StatusOK,
Headers: http.Header(map[string][]string{
- "Content-Length": {fmt.Sprint(len(content))},
- "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)},
- "Content-Type": {mediatype},
+ "Content-Length": {fmt.Sprint(len(content))},
+ "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)},
+ "Content-Type": {mediatype},
+ "Docker-Content-Digest": {digest.Canonical.FromBytes(content).String()},
}),
},
})
@@ -710,7 +720,8 @@ func TestV1ManifestFetch(t *testing.T) {
t.Fatal(err)
}
- manifest, err = ms.Get(ctx, dgst, distribution.WithTag("latest"))
+ var contentDigest digest.Digest
+ manifest, err = ms.Get(ctx, dgst, distribution.WithTag("latest"), ReturnContentDigest(&contentDigest))
if err != nil {
t.Fatal(err)
}
@@ -723,6 +734,10 @@ func TestV1ManifestFetch(t *testing.T) {
t.Fatal(err)
}
+ if contentDigest != dgst {
+ t.Fatalf("Unexpected returned content digest %v, expected %v", contentDigest, dgst)
+ }
+
manifest, err = ms.Get(ctx, dgst, distribution.WithTag("badcontenttype"))
if err != nil {
t.Fatal(err)
From 1c999392214614d7f97b2cc450d26ea3b3342970 Mon Sep 17 00:00:00 2001
From: Derek McGowan
Date: Thu, 9 Jun 2016 17:35:14 -0700
Subject: [PATCH 069/546] Vendor letsencrypt packages
Signed-off-by: Derek McGowan (github: dmcgowan)
---
Godeps/Godeps.json | 12 +
vendor/golang.org/x/crypto/ocsp/ocsp.go | 592 +++++++
vendor/golang.org/x/time/LICENSE | 27 +
vendor/golang.org/x/time/PATENTS | 22 +
vendor/golang.org/x/time/rate/rate.go | 368 ++++
vendor/rsc.io/letsencrypt/LICENSE | 27 +
vendor/rsc.io/letsencrypt/README | 152 ++
vendor/rsc.io/letsencrypt/lets.go | 753 +++++++++
.../vendor/github.com/xenolf/lego/LICENSE | 21 +
.../github.com/xenolf/lego/acme/challenges.go | 16 +
.../github.com/xenolf/lego/acme/client.go | 638 +++++++
.../xenolf/lego/acme/client_test.go | 198 +++
.../github.com/xenolf/lego/acme/crypto.go | 323 ++++
.../xenolf/lego/acme/crypto_test.go | 93 ++
.../github.com/xenolf/lego/acme/error.go | 73 +
.../github.com/xenolf/lego/acme/http.go | 117 ++
.../xenolf/lego/acme/http_challenge.go | 41 +
.../xenolf/lego/acme/http_challenge_server.go | 79 +
.../xenolf/lego/acme/http_challenge_test.go | 57 +
.../github.com/xenolf/lego/acme/http_test.go | 100 ++
.../vendor/github.com/xenolf/lego/acme/jws.go | 107 ++
.../github.com/xenolf/lego/acme/messages.go | 115 ++
.../github.com/xenolf/lego/acme/provider.go | 28 +
.../xenolf/lego/acme/tls_sni_challenge.go | 73 +
.../lego/acme/tls_sni_challenge_server.go | 62 +
.../lego/acme/tls_sni_challenge_test.go | 65 +
.../github.com/xenolf/lego/acme/utils.go | 29 +
.../github.com/xenolf/lego/acme/utils_test.go | 26 +
.../gopkg.in/square/go-jose.v1/BUG-BOUNTY.md | 10 +
.../square/go-jose.v1/CONTRIBUTING.md | 14 +
.../vendor/gopkg.in/square/go-jose.v1/LICENSE | 202 +++
.../gopkg.in/square/go-jose.v1/README.md | 209 +++
.../gopkg.in/square/go-jose.v1/asymmetric.go | 498 ++++++
.../square/go-jose.v1/asymmetric_test.go | 431 +++++
.../square/go-jose.v1/cipher/cbc_hmac.go | 196 +++
.../square/go-jose.v1/cipher/cbc_hmac_test.go | 498 ++++++
.../square/go-jose.v1/cipher/concat_kdf.go | 75 +
.../go-jose.v1/cipher/concat_kdf_test.go | 150 ++
.../square/go-jose.v1/cipher/ecdh_es.go | 51 +
.../square/go-jose.v1/cipher/ecdh_es_test.go | 98 ++
.../square/go-jose.v1/cipher/key_wrap.go | 109 ++
.../square/go-jose.v1/cipher/key_wrap_test.go | 133 ++
.../gopkg.in/square/go-jose.v1/crypter.go | 349 ++++
.../square/go-jose.v1/crypter_test.go | 784 +++++++++
.../vendor/gopkg.in/square/go-jose.v1/doc.go | 26 +
.../gopkg.in/square/go-jose.v1/doc_test.go | 226 +++
.../gopkg.in/square/go-jose.v1/encoding.go | 191 +++
.../square/go-jose.v1/encoding_test.go | 173 ++
.../gopkg.in/square/go-jose.v1/json/LICENSE | 27 +
.../gopkg.in/square/go-jose.v1/json/README.md | 13 +
.../square/go-jose.v1/json/bench_test.go | 223 +++
.../gopkg.in/square/go-jose.v1/json/decode.go | 1183 +++++++++++++
.../square/go-jose.v1/json/decode_test.go | 1474 +++++++++++++++++
.../gopkg.in/square/go-jose.v1/json/encode.go | 1197 +++++++++++++
.../square/go-jose.v1/json/encode_test.go | 538 ++++++
.../gopkg.in/square/go-jose.v1/json/indent.go | 141 ++
.../square/go-jose.v1/json/number_test.go | 133 ++
.../square/go-jose.v1/json/scanner.go | 623 +++++++
.../square/go-jose.v1/json/scanner_test.go | 316 ++++
.../gopkg.in/square/go-jose.v1/json/stream.go | 480 ++++++
.../square/go-jose.v1/json/stream_test.go | 354 ++++
.../square/go-jose.v1/json/tagkey_test.go | 115 ++
.../gopkg.in/square/go-jose.v1/json/tags.go | 44 +
.../square/go-jose.v1/json/tags_test.go | 28 +
.../go-jose.v1/json/testdata/code.json.gz | Bin 0 -> 120432 bytes
.../gopkg.in/square/go-jose.v1/json_fork.go | 31 +
.../square/go-jose.v1/json_fork_test.go | 76 +
.../gopkg.in/square/go-jose.v1/json_std.go | 31 +
.../square/go-jose.v1/json_std_test.go | 106 ++
.../vendor/gopkg.in/square/go-jose.v1/jwe.go | 278 ++++
.../gopkg.in/square/go-jose.v1/jwe_test.go | 537 ++++++
.../vendor/gopkg.in/square/go-jose.v1/jwk.go | 380 +++++
.../gopkg.in/square/go-jose.v1/jwk_test.go | 525 ++++++
.../vendor/gopkg.in/square/go-jose.v1/jws.go | 252 +++
.../gopkg.in/square/go-jose.v1/jws_test.go | 302 ++++
.../gopkg.in/square/go-jose.v1/shared.go | 224 +++
.../gopkg.in/square/go-jose.v1/signing.go | 218 +++
.../square/go-jose.v1/signing_test.go | 447 +++++
.../gopkg.in/square/go-jose.v1/symmetric.go | 349 ++++
.../square/go-jose.v1/symmetric_test.go | 131 ++
.../gopkg.in/square/go-jose.v1/utils.go | 74 +
.../gopkg.in/square/go-jose.v1/utils_test.go | 225 +++
vendor/rsc.io/letsencrypt/vendor/vendor.json | 31 +
83 files changed, 19743 insertions(+)
create mode 100644 vendor/golang.org/x/crypto/ocsp/ocsp.go
create mode 100644 vendor/golang.org/x/time/LICENSE
create mode 100644 vendor/golang.org/x/time/PATENTS
create mode 100644 vendor/golang.org/x/time/rate/rate.go
create mode 100644 vendor/rsc.io/letsencrypt/LICENSE
create mode 100644 vendor/rsc.io/letsencrypt/README
create mode 100644 vendor/rsc.io/letsencrypt/lets.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/LICENSE
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/indent.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/number_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/scanner.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/scanner_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/stream.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/stream_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tagkey_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tags.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tags_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/testdata/code.json.gz
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_fork.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_fork_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_std.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_std_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwe.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwe_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwk.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwk_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jws.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jws_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/shared.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/signing.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/signing_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/symmetric.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/symmetric_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/utils.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/utils_test.go
create mode 100644 vendor/rsc.io/letsencrypt/vendor/vendor.json
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index 56cd2dd9..8597ea43 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -270,6 +270,10 @@
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b"
},
+ {
+ "ImportPath": "golang.org/x/crypto/ocsp",
+ "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b"
+ },
{
"ImportPath": "golang.org/x/net/context",
"Rev": "4876518f9e71663000c348837735820161a42df7"
@@ -314,6 +318,10 @@
"ImportPath": "golang.org/x/oauth2/jwt",
"Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf"
},
+ {
+ "ImportPath": "golang.org/x/time/rate",
+ "Rev": "a4bde12657593d5e90d0533a3e4fd95e635124cb"
+ },
{
"ImportPath": "google.golang.org/api/gensupport",
"Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54"
@@ -425,6 +433,10 @@
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420"
+ },
+ {
+ "ImportPath": "rsc.io/letsencrypt",
+ "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c"
}
]
}
diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go
new file mode 100644
index 00000000..f6a1bd4d
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go
@@ -0,0 +1,592 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses
+// are signed messages attesting to the validity of a certificate for a small
+// period of time. This is used to manage revocation for X.509 certificates.
+package ocsp
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "math/big"
+ "time"
+)
+
+var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1})
+
+// These are internal structures that reflect the ASN.1 structure of an OCSP
+// response. See RFC 2560, section 4.2.
+
+const (
+ ocspSuccess = 0
+ ocspMalformed = 1
+ ocspInternalError = 2
+ ocspTryLater = 3
+ ocspSigRequired = 4
+ ocspUnauthorized = 5
+)
+
+type certID struct {
+ HashAlgorithm pkix.AlgorithmIdentifier
+ NameHash []byte
+ IssuerKeyHash []byte
+ SerialNumber *big.Int
+}
+
+// https://tools.ietf.org/html/rfc2560#section-4.1.1
+type ocspRequest struct {
+ TBSRequest tbsRequest
+}
+
+type tbsRequest struct {
+ Version int `asn1:"explicit,tag:0,default:0,optional"`
+ RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"`
+ RequestList []request
+}
+
+type request struct {
+ Cert certID
+}
+
+type responseASN1 struct {
+ Status asn1.Enumerated
+ Response responseBytes `asn1:"explicit,tag:0"`
+}
+
+type responseBytes struct {
+ ResponseType asn1.ObjectIdentifier
+ Response []byte
+}
+
+type basicResponse struct {
+ TBSResponseData responseData
+ SignatureAlgorithm pkix.AlgorithmIdentifier
+ Signature asn1.BitString
+ Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"`
+}
+
+type responseData struct {
+ Raw asn1.RawContent
+ Version int `asn1:"optional,default:1,explicit,tag:0"`
+ RawResponderName asn1.RawValue `asn1:"optional,explicit,tag:1"`
+ KeyHash []byte `asn1:"optional,explicit,tag:2"`
+ ProducedAt time.Time `asn1:"generalized"`
+ Responses []singleResponse
+}
+
+type singleResponse struct {
+ CertID certID
+ Good asn1.Flag `asn1:"tag:0,optional"`
+ Revoked revokedInfo `asn1:"explicit,tag:1,optional"`
+ Unknown asn1.Flag `asn1:"tag:2,optional"`
+ ThisUpdate time.Time `asn1:"generalized"`
+ NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"`
+}
+
+type revokedInfo struct {
+ RevocationTime time.Time `asn1:"generalized"`
+ Reason int `asn1:"explicit,tag:0,optional"`
+}
+
+var (
+ oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
+ oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
+ oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
+ oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
+ oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
+ oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
+ oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
+ oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 4, 3, 2}
+ oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
+ oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
+ oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
+ oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
+)
+
+var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{
+ crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}),
+ crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}),
+ crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}),
+ crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}),
+}
+
+// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
+var signatureAlgorithmDetails = []struct {
+ algo x509.SignatureAlgorithm
+ oid asn1.ObjectIdentifier
+ pubKeyAlgo x509.PublicKeyAlgorithm
+ hash crypto.Hash
+}{
+ {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */},
+ {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5},
+ {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1},
+ {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256},
+ {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384},
+ {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512},
+ {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1},
+ {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256},
+ {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1},
+ {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256},
+ {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384},
+ {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512},
+}
+
+// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
+func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) {
+ var pubType x509.PublicKeyAlgorithm
+
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
+ pubType = x509.RSA
+ hashFunc = crypto.SHA256
+ sigAlgo.Algorithm = oidSignatureSHA256WithRSA
+ sigAlgo.Parameters = asn1.RawValue{
+ Tag: 5,
+ }
+
+ case *ecdsa.PublicKey:
+ pubType = x509.ECDSA
+
+ switch pub.Curve {
+ case elliptic.P224(), elliptic.P256():
+ hashFunc = crypto.SHA256
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA256
+ case elliptic.P384():
+ hashFunc = crypto.SHA384
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA384
+ case elliptic.P521():
+ hashFunc = crypto.SHA512
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA512
+ default:
+ err = errors.New("x509: unknown elliptic curve")
+ }
+
+ default:
+ err = errors.New("x509: only RSA and ECDSA keys supported")
+ }
+
+ if err != nil {
+ return
+ }
+
+ if requestedSigAlgo == 0 {
+ return
+ }
+
+ found := false
+ for _, details := range signatureAlgorithmDetails {
+ if details.algo == requestedSigAlgo {
+ if details.pubKeyAlgo != pubType {
+ err = errors.New("x509: requested SignatureAlgorithm does not match private key type")
+ return
+ }
+ sigAlgo.Algorithm, hashFunc = details.oid, details.hash
+ if hashFunc == 0 {
+ err = errors.New("x509: cannot sign with hash function requested")
+ return
+ }
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ err = errors.New("x509: unknown SignatureAlgorithm")
+ }
+
+ return
+}
+
+// TODO(agl): this is taken from crypto/x509 and so should probably be exported
+// from crypto/x509 or crypto/x509/pkix.
+func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm {
+ for _, details := range signatureAlgorithmDetails {
+ if oid.Equal(details.oid) {
+ return details.algo
+ }
+ }
+ return x509.UnknownSignatureAlgorithm
+}
+
+// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form.
+func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash {
+ for hash, oid := range hashOIDs {
+ if oid.Equal(target) {
+ return hash
+ }
+ }
+ return crypto.Hash(0)
+}
+
+// This is the exposed reflection of the internal OCSP structures.
+
+const (
+ // Good means that the certificate is valid.
+ Good = iota
+ // Revoked means that the certificate has been deliberately revoked.
+ Revoked = iota
+ // Unknown means that the OCSP responder doesn't know about the certificate.
+ Unknown = iota
+ // ServerFailed means that the OCSP responder failed to process the request.
+ ServerFailed = iota
+)
+
+// Request represents an OCSP request. See RFC 2560.
+type Request struct {
+ HashAlgorithm crypto.Hash
+ IssuerNameHash []byte
+ IssuerKeyHash []byte
+ SerialNumber *big.Int
+}
+
+// Response represents an OCSP response. See RFC 2560.
+type Response struct {
+ // Status is one of {Good, Revoked, Unknown, ServerFailed}
+ Status int
+ SerialNumber *big.Int
+ ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time
+ RevocationReason int
+ Certificate *x509.Certificate
+ // TBSResponseData contains the raw bytes of the signed response. If
+ // Certificate is nil then this can be used to verify Signature.
+ TBSResponseData []byte
+ Signature []byte
+ SignatureAlgorithm x509.SignatureAlgorithm
+}
+
+// These are pre-serialized error responses for the various non-success codes
+// defined by OCSP. The Unauthorized code in particular can be used by an OCSP
+// responder that supports only pre-signed responses as a response to requests
+// for certificates with unknown status. See RFC 5019.
+var (
+ MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01}
+ InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02}
+ TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03}
+ SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05}
+ UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06}
+)
+
+// CheckSignatureFrom checks that the signature in resp is a valid signature
+// from issuer. This should only be used if resp.Certificate is nil. Otherwise,
+// the OCSP response contained an intermediate certificate that created the
+// signature. That signature is checked by ParseResponse and only
+// resp.Certificate remains to be validated.
+func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error {
+ return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature)
+}
+
+// ParseError results from an invalid OCSP response.
+type ParseError string
+
+func (p ParseError) Error() string {
+ return string(p)
+}
+
+// ParseRequest parses an OCSP request in DER form. It only supports
+// requests for a single certificate. Signed requests are not supported.
+// If a request includes a signature, it will result in a ParseError.
+func ParseRequest(bytes []byte) (*Request, error) {
+ var req ocspRequest
+ rest, err := asn1.Unmarshal(bytes, &req)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, ParseError("trailing data in OCSP request")
+ }
+
+ if len(req.TBSRequest.RequestList) == 0 {
+ return nil, ParseError("OCSP request contains no request body")
+ }
+ innerRequest := req.TBSRequest.RequestList[0]
+
+ hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm)
+ if hashFunc == crypto.Hash(0) {
+ return nil, ParseError("OCSP request uses unknown hash function")
+ }
+
+ return &Request{
+ HashAlgorithm: hashFunc,
+ IssuerNameHash: innerRequest.Cert.NameHash,
+ IssuerKeyHash: innerRequest.Cert.IssuerKeyHash,
+ SerialNumber: innerRequest.Cert.SerialNumber,
+ }, nil
+}
+
+// ParseResponse parses an OCSP response in DER form. It only supports
+// responses for a single certificate. If the response contains a certificate
+// then the signature over the response is checked. If issuer is not nil then
+// it will be used to validate the signature or embedded certificate. Invalid
+// signatures or parse failures will result in a ParseError.
+func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) {
+ var resp responseASN1
+ rest, err := asn1.Unmarshal(bytes, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, ParseError("trailing data in OCSP response")
+ }
+
+ ret := new(Response)
+ if resp.Status != ocspSuccess {
+ ret.Status = ServerFailed
+ return ret, nil
+ }
+
+ if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) {
+ return nil, ParseError("bad OCSP response type")
+ }
+
+ var basicResp basicResponse
+ rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(basicResp.Certificates) > 1 {
+ return nil, ParseError("OCSP response contains bad number of certificates")
+ }
+
+ if len(basicResp.TBSResponseData.Responses) != 1 {
+ return nil, ParseError("OCSP response contains bad number of responses")
+ }
+
+ ret.TBSResponseData = basicResp.TBSResponseData.Raw
+ ret.Signature = basicResp.Signature.RightAlign()
+ ret.SignatureAlgorithm = getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm)
+
+ if len(basicResp.Certificates) > 0 {
+ ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := ret.CheckSignatureFrom(ret.Certificate); err != nil {
+ return nil, ParseError("bad OCSP signature")
+ }
+
+ if issuer != nil {
+ if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil {
+ return nil, ParseError("bad signature on embedded certificate")
+ }
+ }
+ } else if issuer != nil {
+ if err := ret.CheckSignatureFrom(issuer); err != nil {
+ return nil, ParseError("bad OCSP signature")
+ }
+ }
+
+ r := basicResp.TBSResponseData.Responses[0]
+
+ ret.SerialNumber = r.CertID.SerialNumber
+
+ switch {
+ case bool(r.Good):
+ ret.Status = Good
+ case bool(r.Unknown):
+ ret.Status = Unknown
+ default:
+ ret.Status = Revoked
+ ret.RevokedAt = r.Revoked.RevocationTime
+ ret.RevocationReason = r.Revoked.Reason
+ }
+
+ ret.ProducedAt = basicResp.TBSResponseData.ProducedAt
+ ret.ThisUpdate = r.ThisUpdate
+ ret.NextUpdate = r.NextUpdate
+
+ return ret, nil
+}
+
+// RequestOptions contains options for constructing OCSP requests.
+type RequestOptions struct {
+ // Hash contains the hash function that should be used when
+ // constructing the OCSP request. If zero, SHA-1 will be used.
+ Hash crypto.Hash
+}
+
+func (opts *RequestOptions) hash() crypto.Hash {
+ if opts == nil || opts.Hash == 0 {
+ // SHA-1 is nearly universally used in OCSP.
+ return crypto.SHA1
+ }
+ return opts.Hash
+}
+
+// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If
+// opts is nil then sensible defaults are used.
+func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) {
+ hashFunc := opts.hash()
+
+ // OCSP seems to be the only place where these raw hash identifiers are
+ // used. I took the following from
+ // http://msdn.microsoft.com/en-us/library/ff635603.aspx
+ var hashOID asn1.ObjectIdentifier
+ hashOID, ok := hashOIDs[hashFunc]
+ if !ok {
+ return nil, x509.ErrUnsupportedAlgorithm
+ }
+
+ if !hashFunc.Available() {
+ return nil, x509.ErrUnsupportedAlgorithm
+ }
+ h := opts.hash().New()
+
+ var publicKeyInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ PublicKey asn1.BitString
+ }
+ if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
+ return nil, err
+ }
+
+ h.Write(publicKeyInfo.PublicKey.RightAlign())
+ issuerKeyHash := h.Sum(nil)
+
+ h.Reset()
+ h.Write(issuer.RawSubject)
+ issuerNameHash := h.Sum(nil)
+
+ return asn1.Marshal(ocspRequest{
+ tbsRequest{
+ Version: 0,
+ RequestList: []request{
+ {
+ Cert: certID{
+ pkix.AlgorithmIdentifier{
+ Algorithm: hashOID,
+ Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
+ },
+ issuerNameHash,
+ issuerKeyHash,
+ cert.SerialNumber,
+ },
+ },
+ },
+ },
+ })
+}
+
+// CreateResponse returns a DER-encoded OCSP response with the specified contents.
+// The fields in the response are populated as follows:
+//
+// The responder cert is used to populate the ResponderName field, and the certificate
+// itself is provided alongside the OCSP response signature.
+//
+// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields.
+// (SHA-1 is used for the hash function; this is not configurable.)
+//
+// The template is used to populate the SerialNumber, RevocationStatus, RevokedAt,
+// RevocationReason, ThisUpdate, and NextUpdate fields.
+//
+// The ProducedAt date is automatically set to the current date, to the nearest minute.
+func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) {
+ var publicKeyInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ PublicKey asn1.BitString
+ }
+ if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
+ return nil, err
+ }
+
+ h := sha1.New()
+ h.Write(publicKeyInfo.PublicKey.RightAlign())
+ issuerKeyHash := h.Sum(nil)
+
+ h.Reset()
+ h.Write(issuer.RawSubject)
+ issuerNameHash := h.Sum(nil)
+
+ innerResponse := singleResponse{
+ CertID: certID{
+ HashAlgorithm: pkix.AlgorithmIdentifier{
+ Algorithm: hashOIDs[crypto.SHA1],
+ Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
+ },
+ NameHash: issuerNameHash,
+ IssuerKeyHash: issuerKeyHash,
+ SerialNumber: template.SerialNumber,
+ },
+ ThisUpdate: template.ThisUpdate.UTC(),
+ NextUpdate: template.NextUpdate.UTC(),
+ }
+
+ switch template.Status {
+ case Good:
+ innerResponse.Good = true
+ case Unknown:
+ innerResponse.Unknown = true
+ case Revoked:
+ innerResponse.Revoked = revokedInfo{
+ RevocationTime: template.RevokedAt.UTC(),
+ Reason: template.RevocationReason,
+ }
+ }
+
+ responderName := asn1.RawValue{
+ Class: 2, // context-specific
+ Tag: 1, // explicit tag
+ IsCompound: true,
+ Bytes: responderCert.RawSubject,
+ }
+ tbsResponseData := responseData{
+ Version: 0,
+ RawResponderName: responderName,
+ ProducedAt: time.Now().Truncate(time.Minute).UTC(),
+ Responses: []singleResponse{innerResponse},
+ }
+
+ tbsResponseDataDER, err := asn1.Marshal(tbsResponseData)
+ if err != nil {
+ return nil, err
+ }
+
+ hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ responseHash := hashFunc.New()
+ responseHash.Write(tbsResponseDataDER)
+ signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc)
+ if err != nil {
+ return nil, err
+ }
+
+ response := basicResponse{
+ TBSResponseData: tbsResponseData,
+ SignatureAlgorithm: signatureAlgorithm,
+ Signature: asn1.BitString{
+ Bytes: signature,
+ BitLength: 8 * len(signature),
+ },
+ }
+ if template.Certificate != nil {
+ response.Certificates = []asn1.RawValue{
+ asn1.RawValue{FullBytes: template.Certificate.Raw},
+ }
+ }
+ responseDER, err := asn1.Marshal(response)
+ if err != nil {
+ return nil, err
+ }
+
+ return asn1.Marshal(responseASN1{
+ Status: ocspSuccess,
+ Response: responseBytes{
+ ResponseType: idPKIXOCSPBasic,
+ Response: responseDER,
+ },
+ })
+}
diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE
new file mode 100644
index 00000000..6a66aea5
--- /dev/null
+++ b/vendor/golang.org/x/time/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/time/PATENTS b/vendor/golang.org/x/time/PATENTS
new file mode 100644
index 00000000..73309904
--- /dev/null
+++ b/vendor/golang.org/x/time/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
new file mode 100644
index 00000000..2131b921
--- /dev/null
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -0,0 +1,368 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rate provides a rate limiter.
+package rate
+
+import (
+ "fmt"
+ "math"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// Limit defines the maximum frequency of some events.
+// Limit is represented as number of events per second.
+// A zero Limit allows no events.
+type Limit float64
+
+// Inf is the infinite rate limit; it allows all events (even if burst is zero).
+const Inf = Limit(math.MaxFloat64)
+
+// Every converts a minimum time interval between events to a Limit.
+func Every(interval time.Duration) Limit {
+ if interval <= 0 {
+ return Inf
+ }
+ return 1 / Limit(interval.Seconds())
+}
+
+// A Limiter controls how frequently events are allowed to happen.
+// It implements a "token bucket" of size b, initially full and refilled
+// at rate r tokens per second.
+// Informally, in any large enough time interval, the Limiter limits the
+// rate to r tokens per second, with a maximum burst size of b events.
+// As a special case, if r == Inf (the infinite rate), b is ignored.
+// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets.
+//
+// The zero value is a valid Limiter, but it will reject all events.
+// Use NewLimiter to create non-zero Limiters.
+//
+// Limiter has three main methods, Allow, Reserve, and Wait.
+// Most callers should use Wait.
+//
+// Each of the three methods consumes a single token.
+// They differ in their behavior when no token is available.
+// If no token is available, Allow returns false.
+// If no token is available, Reserve returns a reservation for a future token
+// and the amount of time the caller must wait before using it.
+// If no token is available, Wait blocks until one can be obtained
+// or its associated context.Context is canceled.
+//
+// The methods AllowN, ReserveN, and WaitN consume n tokens.
+type Limiter struct {
+ limit Limit
+ burst int
+
+ mu sync.Mutex
+ tokens float64
+ // last is the last time the limiter's tokens field was updated
+ last time.Time
+ // lastEvent is the latest time of a rate-limited event (past or future)
+ lastEvent time.Time
+}
+
+// Limit returns the maximum overall event rate.
+func (lim *Limiter) Limit() Limit {
+ lim.mu.Lock()
+ defer lim.mu.Unlock()
+ return lim.limit
+}
+
+// Burst returns the maximum burst size. Burst is the maximum number of tokens
+// that can be consumed in a single call to Allow, Reserve, or Wait, so higher
+// Burst values allow more events to happen at once.
+// A zero Burst allows no events, unless limit == Inf.
+func (lim *Limiter) Burst() int {
+ return lim.burst
+}
+
+// NewLimiter returns a new Limiter that allows events up to rate r and permits
+// bursts of at most b tokens.
+func NewLimiter(r Limit, b int) *Limiter {
+ return &Limiter{
+ limit: r,
+ burst: b,
+ }
+}
+
+// Allow is shorthand for AllowN(time.Now(), 1).
+func (lim *Limiter) Allow() bool {
+ return lim.AllowN(time.Now(), 1)
+}
+
+// AllowN reports whether n events may happen at time now.
+// Use this method if you intend to drop / skip events that exceed the rate limit.
+// Otherwise use Reserve or Wait.
+func (lim *Limiter) AllowN(now time.Time, n int) bool {
+ return lim.reserveN(now, n, 0).ok
+}
+
+// A Reservation holds information about events that are permitted by a Limiter to happen after a delay.
+// A Reservation may be canceled, which may enable the Limiter to permit additional events.
+type Reservation struct {
+ ok bool
+ lim *Limiter
+ tokens int
+ timeToAct time.Time
+ // This is the Limit at reservation time, it can change later.
+ limit Limit
+}
+
+// OK returns whether the limiter can provide the requested number of tokens
+// within the maximum wait time. If OK is false, Delay returns InfDuration, and
+// Cancel does nothing.
+func (r *Reservation) OK() bool {
+ return r.ok
+}
+
+// Delay is shorthand for DelayFrom(time.Now()).
+func (r *Reservation) Delay() time.Duration {
+ return r.DelayFrom(time.Now())
+}
+
+// InfDuration is the duration returned by Delay when a Reservation is not OK.
+const InfDuration = time.Duration(1<<63 - 1)
+
+// DelayFrom returns the duration for which the reservation holder must wait
+// before taking the reserved action. Zero duration means act immediately.
+// InfDuration means the limiter cannot grant the tokens requested in this
+// Reservation within the maximum wait time.
+func (r *Reservation) DelayFrom(now time.Time) time.Duration {
+ if !r.ok {
+ return InfDuration
+ }
+ delay := r.timeToAct.Sub(now)
+ if delay < 0 {
+ return 0
+ }
+ return delay
+}
+
+// Cancel is shorthand for CancelAt(time.Now()).
+func (r *Reservation) Cancel() {
+ r.CancelAt(time.Now())
+ return
+}
+
+// CancelAt indicates that the reservation holder will not perform the reserved action
+// and reverses the effects of this Reservation on the rate limit as much as possible,
+// considering that other reservations may have already been made.
+func (r *Reservation) CancelAt(now time.Time) {
+ if !r.ok {
+ return
+ }
+
+ r.lim.mu.Lock()
+ defer r.lim.mu.Unlock()
+
+ if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) {
+ return
+ }
+
+ // calculate tokens to restore
+ // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved
+ // after r was obtained. These tokens should not be restored.
+ restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct))
+ if restoreTokens <= 0 {
+ return
+ }
+ // advance time to now
+ now, _, tokens := r.lim.advance(now)
+ // calculate new number of tokens
+ tokens += restoreTokens
+ if burst := float64(r.lim.burst); tokens > burst {
+ tokens = burst
+ }
+ // update state
+ r.lim.last = now
+ r.lim.tokens = tokens
+ if r.timeToAct == r.lim.lastEvent {
+ prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens)))
+ if !prevEvent.Before(now) {
+ r.lim.lastEvent = prevEvent
+ }
+ }
+
+ return
+}
+
+// Reserve is shorthand for ReserveN(time.Now(), 1).
+func (lim *Limiter) Reserve() *Reservation {
+ return lim.ReserveN(time.Now(), 1)
+}
+
+// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen.
+// The Limiter takes this Reservation into account when allowing future events.
+// ReserveN returns false if n exceeds the Limiter's burst size.
+// Usage example:
+// r, ok := lim.ReserveN(time.Now(), 1)
+// if !ok {
+// // Not allowed to act! Did you remember to set lim.burst to be > 0 ?
+// }
+// time.Sleep(r.Delay())
+// Act()
+// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events.
+// If you need to respect a deadline or cancel the delay, use Wait instead.
+// To drop or skip events exceeding rate limit, use Allow instead.
+func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation {
+ r := lim.reserveN(now, n, InfDuration)
+ return &r
+}
+
+// Wait is shorthand for WaitN(ctx, 1).
+func (lim *Limiter) Wait(ctx context.Context) (err error) {
+ return lim.WaitN(ctx, 1)
+}
+
+// WaitN blocks until lim permits n events to happen.
+// It returns an error if n exceeds the Limiter's burst size, the Context is
+// canceled, or the expected wait time exceeds the Context's Deadline.
+func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) {
+ if n > lim.burst {
+ return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst)
+ }
+ // Check if ctx is already cancelled
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ // Determine wait limit
+ now := time.Now()
+ waitLimit := InfDuration
+ if deadline, ok := ctx.Deadline(); ok {
+ waitLimit = deadline.Sub(now)
+ }
+ // Reserve
+ r := lim.reserveN(now, n, waitLimit)
+ if !r.ok {
+ return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n)
+ }
+ // Wait
+ t := time.NewTimer(r.DelayFrom(now))
+ defer t.Stop()
+ select {
+ case <-t.C:
+ // We can proceed.
+ return nil
+ case <-ctx.Done():
+ // Context was canceled before we could proceed. Cancel the
+ // reservation, which may permit other events to proceed sooner.
+ r.Cancel()
+ return ctx.Err()
+ }
+}
+
+// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit).
+func (lim *Limiter) SetLimit(newLimit Limit) {
+ lim.SetLimitAt(time.Now(), newLimit)
+}
+
+// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated
+// or underutilized by those which reserved (using Reserve or Wait) but did not yet act
+// before SetLimitAt was called.
+func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) {
+ lim.mu.Lock()
+ defer lim.mu.Unlock()
+
+ now, _, tokens := lim.advance(now)
+
+ lim.last = now
+ lim.tokens = tokens
+ lim.limit = newLimit
+}
+
+// reserveN is a helper method for AllowN, ReserveN, and WaitN.
+// maxFutureReserve specifies the maximum reservation wait duration allowed.
+// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN.
+func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation {
+ lim.mu.Lock()
+ defer lim.mu.Unlock()
+
+ if lim.limit == Inf {
+ return Reservation{
+ ok: true,
+ lim: lim,
+ tokens: n,
+ timeToAct: now,
+ }
+ }
+
+ now, last, tokens := lim.advance(now)
+
+ // Calculate the remaining number of tokens resulting from the request.
+ tokens -= float64(n)
+
+ // Calculate the wait duration
+ var waitDuration time.Duration
+ if tokens < 0 {
+ waitDuration = lim.limit.durationFromTokens(-tokens)
+ }
+
+ // Decide result
+ ok := n <= lim.burst && waitDuration <= maxFutureReserve
+
+ // Prepare reservation
+ r := Reservation{
+ ok: ok,
+ lim: lim,
+ limit: lim.limit,
+ }
+ if ok {
+ r.tokens = n
+ r.timeToAct = now.Add(waitDuration)
+ }
+
+ // Update state
+ if ok {
+ lim.last = now
+ lim.tokens = tokens
+ lim.lastEvent = r.timeToAct
+ } else {
+ lim.last = last
+ }
+
+ return r
+}
+
+// advance calculates and returns an updated state for lim resulting from the passage of time.
+// lim is not changed.
+func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) {
+ last := lim.last
+ if now.Before(last) {
+ last = now
+ }
+
+ // Avoid making delta overflow below when last is very old.
+ maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens)
+ elapsed := now.Sub(last)
+ if elapsed > maxElapsed {
+ elapsed = maxElapsed
+ }
+
+ // Calculate the new number of tokens, due to time that passed.
+ delta := lim.limit.tokensFromDuration(elapsed)
+ tokens := lim.tokens + delta
+ if burst := float64(lim.burst); tokens > burst {
+ tokens = burst
+ }
+
+ return now, last, tokens
+}
+
+// durationFromTokens is a unit conversion function from the number of tokens to the duration
+// of time it takes to accumulate them at a rate of limit tokens per second.
+func (limit Limit) durationFromTokens(tokens float64) time.Duration {
+ seconds := tokens / float64(limit)
+ return time.Nanosecond * time.Duration(1e9*seconds)
+}
+
+// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
+// which could be accumulated during that duration at a rate of limit tokens per second.
+func (limit Limit) tokensFromDuration(d time.Duration) float64 {
+ return d.Seconds() * float64(limit)
+}
diff --git a/vendor/rsc.io/letsencrypt/LICENSE b/vendor/rsc.io/letsencrypt/LICENSE
new file mode 100644
index 00000000..6a66aea5
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/rsc.io/letsencrypt/README b/vendor/rsc.io/letsencrypt/README
new file mode 100644
index 00000000..98a875f3
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/README
@@ -0,0 +1,152 @@
+package letsencrypt // import "rsc.io/letsencrypt"
+
+Package letsencrypt obtains TLS certificates from LetsEncrypt.org.
+
+LetsEncrypt.org is a service that issues free SSL/TLS certificates to
+servers that can prove control over the given domain's DNS records or the
+servers pointed at by those records.
+
+
+Quick Start
+
+A complete HTTP/HTTPS web server using TLS certificates from
+LetsEncrypt.org, redirecting all HTTP access to HTTPS, and maintaining TLS
+certificates in a file letsencrypt.cache across server restarts.
+
+ package main
+
+ import (
+ "fmt"
+ "log"
+ "net/http"
+ "rsc.io/letsencrypt"
+ )
+
+ func main() {
+ http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, "Hello, TLS!\n")
+ })
+ var m letsencrypt.Manager
+ if err := m.CacheFile("letsencrypt.cache"); err != nil {
+ log.Fatal(err)
+ }
+ log.Fatal(m.Serve())
+ }
+
+
+Overview
+
+The fundamental type in this package is the Manager, which manages obtaining
+and refreshing a collection of TLS certificates, typically for use by an
+HTTPS server. The example above shows the most basic use of a Manager. The
+use can be customized by calling additional methods of the Manager.
+
+
+Registration
+
+A Manager m registers anonymously with LetsEncrypt.org, including agreeing
+to the letsencrypt.org terms of service, the first time it needs to obtain a
+certificate. To register with a particular email address and with the option
+of a prompt for agreement with the terms of service, call m.Register.
+
+
+GetCertificate
+
+The Manager's GetCertificate method returns certificates from the Manager's
+cache, filling the cache by requesting certificates from LetsEncrypt.org. In
+this way, a server with a tls.Config.GetCertificate set to m.GetCertificate
+will demand load a certificate for any host name it serves. To force loading
+of certificates ahead of time, install m.GetCertificate as before but then
+call m.Cert for each host name.
+
+A Manager can only obtain a certificate for a given host name if it can
+prove control of that host name to LetsEncrypt.org. By default it proves
+control by answering an HTTPS-based challenge: when the LetsEncrypt.org
+servers connect to the named host on port 443 (HTTPS), the TLS SNI handshake
+must use m.GetCertificate to obtain a per-host certificate. The most common
+way to satisfy this requirement is for the host name to resolve to the IP
+address of a (single) computer running m.ServeHTTPS, or at least running a
+Go TLS server with tls.Config.GetCertificate set to m.GetCertificate.
+However, other configurations are possible. For example, a group of machines
+could use an implementation of tls.Config.GetCertificate that cached
+certificates but handled cache misses by making RPCs to a Manager m on an
+elected leader machine.
+
+In typical usage, then, the setting of tls.Config.GetCertificate to
+m.GetCertificate serves two purposes: it provides certificates to the TLS
+server for ordinary serving, and it also answers challenges to prove
+ownership of the domains in order to obtain those certificates.
+
+To force the loading of a certificate for a given host into the Manager's
+cache, use m.Cert.
+
+
+Persistent Storage
+
+If a server always starts with a zero Manager m, the server effectively
+fetches a new certificate for each of its host name from LetsEncrypt.org on
+each restart. This is unfortunate both because the server cannot start if
+LetsEncrypt.org is unavailable and because LetsEncrypt.org limits how often
+it will issue a certificate for a given host name (at time of writing, the
+limit is 5 per week for a given host name). To save server state proactively
+to a cache file and to reload the server state from that same file when
+creating a new manager, call m.CacheFile with the name of the file to use.
+
+For alternate storage uses, m.Marshal returns the current state of the
+Manager as an opaque string, m.Unmarshal sets the state of the Manager using
+a string previously returned by m.Marshal (usually a different m), and
+m.Watch returns a channel that receives notifications about state changes.
+
+
+Limits
+
+To avoid hitting basic rate limits on LetsEncrypt.org, a given Manager
+limits all its interactions to at most one request every minute, with an
+initial allowed burst of 20 requests.
+
+By default, if GetCertificate is asked for a certificate it does not have,
+it will in turn ask LetsEncrypt.org for that certificate. This opens a
+potential attack where attackers connect to a server by IP address and
+pretend to be asking for an incorrect host name. Then GetCertificate will
+attempt to obtain a certificate for that host, incorrectly, eventually
+hitting LetsEncrypt.org's rate limit for certificate requests and making it
+impossible to obtain actual certificates. Because servers hold certificates
+for months at a time, however, an attack would need to be sustained over a
+time period of at least a month in order to cause real problems.
+
+To mitigate this kind of attack, a given Manager limits itself to an average
+of one certificate request for a new host every three hours, with an initial
+allowed burst of up to 20 requests. Long-running servers will therefore stay
+within the LetsEncrypt.org limit of 300 failed requests per month.
+Certificate refreshes are not subject to this limit.
+
+To eliminate the attack entirely, call m.SetHosts to enumerate the exact set
+of hosts that are allowed in certificate requests.
+
+
+Web Servers
+
+The basic requirement for use of a Manager is that there be an HTTPS server
+running on port 443 and calling m.GetCertificate to obtain TLS certificates.
+Using standard primitives, the way to do this is:
+
+ srv := &http.Server{
+ Addr: ":https",
+ TLSConfig: &tls.Config{
+ GetCertificate: m.GetCertificate,
+ },
+ }
+ srv.ListenAndServeTLS("", "")
+
+However, this pattern of serving HTTPS with demand-loaded TLS certificates
+comes up enough to wrap into a single method m.ServeHTTPS.
+
+Similarly, many HTTPS servers prefer to redirect HTTP clients to the HTTPS
+URLs. That functionality is provided by RedirectHTTP.
+
+The combination of serving HTTPS with demand-loaded TLS certificates and
+serving HTTPS redirects to HTTP clients is provided by m.Serve, as used in
+the original example above.
+
+func RedirectHTTP(w http.ResponseWriter, r *http.Request)
+type Manager struct { ... }
diff --git a/vendor/rsc.io/letsencrypt/lets.go b/vendor/rsc.io/letsencrypt/lets.go
new file mode 100644
index 00000000..3a845363
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/lets.go
@@ -0,0 +1,753 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package letsencrypt obtains TLS certificates from LetsEncrypt.org.
+//
+// LetsEncrypt.org is a service that issues free SSL/TLS certificates to servers
+// that can prove control over the given domain's DNS records or
+// the servers pointed at by those records.
+//
+// Quick Start
+//
+// A complete HTTP/HTTPS web server using TLS certificates from LetsEncrypt.org,
+// redirecting all HTTP access to HTTPS, and maintaining TLS certificates in a file
+// letsencrypt.cache across server restarts.
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+// "net/http"
+// "rsc.io/letsencrypt"
+// )
+//
+// func main() {
+// http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+// fmt.Fprintf(w, "Hello, TLS!\n")
+// })
+// var m letsencrypt.Manager
+// if err := m.CacheFile("letsencrypt.cache"); err != nil {
+// log.Fatal(err)
+// }
+// log.Fatal(m.Serve())
+// }
+//
+// Overview
+//
+// The fundamental type in this package is the Manager, which
+// manages obtaining and refreshing a collection of TLS certificates,
+// typically for use by an HTTPS server.
+// The example above shows the most basic use of a Manager.
+// The use can be customized by calling additional methods of the Manager.
+//
+// Registration
+//
+// A Manager m registers anonymously with LetsEncrypt.org, including agreeing to
+// the letsencrypt.org terms of service, the first time it needs to obtain a certificate.
+// To register with a particular email address and with the option of a
+// prompt for agreement with the terms of service, call m.Register.
+//
+// GetCertificate
+//
+// The Manager's GetCertificate method returns certificates
+// from the Manager's cache, filling the cache by requesting certificates
+// from LetsEncrypt.org. In this way, a server with a tls.Config.GetCertificate
+// set to m.GetCertificate will demand load a certificate for any host name
+// it serves. To force loading of certificates ahead of time, install m.GetCertificate
+// as before but then call m.Cert for each host name.
+//
+// A Manager can only obtain a certificate for a given host name if it can prove
+// control of that host name to LetsEncrypt.org. By default it proves control by
+// answering an HTTPS-based challenge: when
+// the LetsEncrypt.org servers connect to the named host on port 443 (HTTPS),
+// the TLS SNI handshake must use m.GetCertificate to obtain a per-host certificate.
+// The most common way to satisfy this requirement is for the host name to
+// resolve to the IP address of a (single) computer running m.ServeHTTPS,
+// or at least running a Go TLS server with tls.Config.GetCertificate set to m.GetCertificate.
+// However, other configurations are possible. For example, a group of machines
+// could use an implementation of tls.Config.GetCertificate that cached
+// certificates but handled cache misses by making RPCs to a Manager m
+// on an elected leader machine.
+//
+// In typical usage, then, the setting of tls.Config.GetCertificate to m.GetCertificate
+// serves two purposes: it provides certificates to the TLS server for ordinary serving,
+// and it also answers challenges to prove ownership of the domains in order to
+// obtain those certificates.
+//
+// To force the loading of a certificate for a given host into the Manager's cache,
+// use m.Cert.
+//
+// Persistent Storage
+//
+// If a server always starts with a zero Manager m, the server effectively fetches
+// a new certificate for each of its host name from LetsEncrypt.org on each restart.
+// This is unfortunate both because the server cannot start if LetsEncrypt.org is
+// unavailable and because LetsEncrypt.org limits how often it will issue a certificate
+// for a given host name (at time of writing, the limit is 5 per week for a given host name).
+// To save server state proactively to a cache file and to reload the server state from
+// that same file when creating a new manager, call m.CacheFile with the name of
+// the file to use.
+//
+// For alternate storage uses, m.Marshal returns the current state of the Manager
+// as an opaque string, m.Unmarshal sets the state of the Manager using a string
+// previously returned by m.Marshal (usually a different m), and m.Watch returns
+// a channel that receives notifications about state changes.
+//
+// Limits
+//
+// To avoid hitting basic rate limits on LetsEncrypt.org, a given Manager limits all its
+// interactions to at most one request every minute, with an initial allowed burst of
+// 20 requests.
+//
+// By default, if GetCertificate is asked for a certificate it does not have, it will in turn
+// ask LetsEncrypt.org for that certificate. This opens a potential attack where attackers
+// connect to a server by IP address and pretend to be asking for an incorrect host name.
+// Then GetCertificate will attempt to obtain a certificate for that host, incorrectly,
+// eventually hitting LetsEncrypt.org's rate limit for certificate requests and making it
+// impossible to obtain actual certificates. Because servers hold certificates for months
+// at a time, however, an attack would need to be sustained over a time period
+// of at least a month in order to cause real problems.
+//
+// To mitigate this kind of attack, a given Manager limits
+// itself to an average of one certificate request for a new host every three hours,
+// with an initial allowed burst of up to 20 requests.
+// Long-running servers will therefore stay
+// within the LetsEncrypt.org limit of 300 failed requests per month.
+// Certificate refreshes are not subject to this limit.
+//
+// To eliminate the attack entirely, call m.SetHosts to enumerate the exact set
+// of hosts that are allowed in certificate requests.
+//
+// Web Servers
+//
+// The basic requirement for use of a Manager is that there be an HTTPS server
+// running on port 443 and calling m.GetCertificate to obtain TLS certificates.
+// Using standard primitives, the way to do this is:
+//
+// srv := &http.Server{
+// Addr: ":https",
+// TLSConfig: &tls.Config{
+// GetCertificate: m.GetCertificate,
+// },
+// }
+// srv.ListenAndServeTLS("", "")
+//
+// However, this pattern of serving HTTPS with demand-loaded TLS certificates
+// comes up enough to wrap into a single method m.ServeHTTPS.
+//
+// Similarly, many HTTPS servers prefer to redirect HTTP clients to the HTTPS URLs.
+// That functionality is provided by RedirectHTTP.
+//
+// The combination of serving HTTPS with demand-loaded TLS certificates and
+// serving HTTPS redirects to HTTP clients is provided by m.Serve, as used in
+// the original example above.
+//
+package letsencrypt
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/time/rate"
+
+ "github.com/xenolf/lego/acme"
+)
+
+const letsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory"
+const debug = false
+
+// A Manager m takes care of obtaining and refreshing a collection of TLS certificates
+// obtained by LetsEncrypt.org.
+// The zero Manager is not yet registered with LetsEncrypt.org and has no TLS certificates
+// but is nonetheless ready for use.
+// See the package comment for an overview of how to use a Manager.
+type Manager struct {
+ mu sync.Mutex
+ state state
+ rateLimit *rate.Limiter
+ newHostLimit *rate.Limiter
+ certCache map[string]*cacheEntry
+ certTokens map[string]*tls.Certificate
+ watchChan chan struct{}
+}
+
+// Serve runs an HTTP/HTTPS web server using TLS certificates obtained by the manager.
+// The HTTP server redirects all requests to the HTTPS server.
+// The HTTPS server obtains TLS certificates as needed and responds to requests
+// by invoking http.DefaultServeMux.
+//
+// Serve does not return unitil the HTTPS server fails to start or else stops.
+// Either way, Serve can only return a non-nil error, never nil.
+func (m *Manager) Serve() error {
+ l, err := net.Listen("tcp", ":http")
+ if err != nil {
+ return err
+ }
+ defer l.Close()
+ go http.Serve(l, http.HandlerFunc(RedirectHTTP))
+
+ return m.ServeHTTPS()
+}
+
+// ServeHTTPS runs an HTTPS web server using TLS certificates obtained by the manager.
+// The HTTPS server obtains TLS certificates as needed and responds to requests
+// by invoking http.DefaultServeMux.
+// ServeHTTPS does not return unitil the HTTPS server fails to start or else stops.
+// Either way, ServeHTTPS can only return a non-nil error, never nil.
+func (m *Manager) ServeHTTPS() error {
+ srv := &http.Server{
+ Addr: ":https",
+ TLSConfig: &tls.Config{
+ GetCertificate: m.GetCertificate,
+ },
+ }
+ return srv.ListenAndServeTLS("", "")
+}
+
+// RedirectHTTP is an HTTP handler (suitable for use with http.HandleFunc)
+// that responds to all requests by redirecting to the same URL served over HTTPS.
+// It should only be invoked for requests received over HTTP.
+func RedirectHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.TLS != nil || r.Host == "" {
+ http.Error(w, "not found", 404)
+ }
+
+ u := r.URL
+ u.Host = r.Host
+ u.Scheme = "https"
+ http.Redirect(w, r, u.String(), 302)
+}
+
+// state is the serializable state for the Manager.
+// It also implements acme.User.
+type state struct {
+ Email string
+ Reg *acme.RegistrationResource
+ Key string
+ key *ecdsa.PrivateKey
+ Hosts []string
+ Certs map[string]stateCert
+}
+
+func (s *state) GetEmail() string { return s.Email }
+func (s *state) GetRegistration() *acme.RegistrationResource { return s.Reg }
+func (s *state) GetPrivateKey() crypto.PrivateKey { return s.key }
+
+type stateCert struct {
+ Cert string
+ Key string
+}
+
+func (cert stateCert) toTLS() (*tls.Certificate, error) {
+ c, err := tls.X509KeyPair([]byte(cert.Cert), []byte(cert.Key))
+ if err != nil {
+ return nil, err
+ }
+ return &c, err
+}
+
+type cacheEntry struct {
+ host string
+ m *Manager
+
+ mu sync.Mutex
+ cert *tls.Certificate
+ timeout time.Time
+ refreshing bool
+ err error
+}
+
+func (m *Manager) init() {
+ m.mu.Lock()
+ if m.certCache == nil {
+ m.rateLimit = rate.NewLimiter(rate.Every(1*time.Minute), 20)
+ m.newHostLimit = rate.NewLimiter(rate.Every(3*time.Hour), 20)
+ m.certCache = map[string]*cacheEntry{}
+ m.certTokens = map[string]*tls.Certificate{}
+ m.watchChan = make(chan struct{}, 1)
+ m.watchChan <- struct{}{}
+ }
+ m.mu.Unlock()
+}
+
+// Watch returns the manager's watch channel,
+// which delivers a notification after every time the
+// manager's state (as exposed by Marshal and Unmarshal) changes.
+// All calls to Watch return the same watch channel.
+//
+// The watch channel includes notifications about changes
+// before the first call to Watch, so that in the pattern below,
+// the range loop executes once immediately, saving
+// the result of setup (along with any background updates that
+// may have raced in quickly).
+//
+// m := new(letsencrypt.Manager)
+// setup(m)
+// go backgroundUpdates(m)
+// for range m.Watch() {
+// save(m.Marshal())
+// }
+//
+func (m *Manager) Watch() <-chan struct{} {
+ m.init()
+ m.updated()
+ return m.watchChan
+}
+
+func (m *Manager) updated() {
+ select {
+ case m.watchChan <- struct{}{}:
+ default:
+ }
+}
+
+func (m *Manager) CacheFile(name string) error {
+ f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, 0600)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ data, err := ioutil.ReadFile(name)
+ if err != nil {
+ return err
+ }
+ if len(data) > 0 {
+ if err := m.Unmarshal(string(data)); err != nil {
+ return err
+ }
+ }
+ go func() {
+ for range m.Watch() {
+ err := ioutil.WriteFile(name, []byte(m.Marshal()), 0600)
+ if err != nil {
+ log.Printf("writing letsencrypt cache: %v", err)
+ }
+ }
+ }()
+ return nil
+}
+
+// Registered reports whether the manager has registered with letsencrypt.org yet.
+func (m *Manager) Registered() bool {
+ m.init()
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return m.registered()
+}
+
+func (m *Manager) registered() bool {
+ return m.state.Reg != nil && m.state.Reg.Body.Agreement != ""
+}
+
+// Register registers the manager with letsencrypt.org, using the given email address.
+// Registration may require agreeing to the letsencrypt.org terms of service.
+// If so, Register calls prompt(url) where url is the URL of the terms of service.
+// Prompt should report whether the caller agrees to the terms.
+// A nil prompt func is taken to mean that the user always agrees.
+// The email address is sent to LetsEncrypt.org but otherwise unchecked;
+// it can be omitted by passing the empty string.
+//
+// Calling Register is only required to make sure registration uses a
+// particular email address or to insert an explicit prompt into the
+// registration sequence. If the manager is not registered, it will
+// automatically register with no email address and automatic
+// agreement to the terms of service at the first call to Cert or GetCertificate.
+func (m *Manager) Register(email string, prompt func(string) bool) error {
+ m.init()
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ return m.register(email, prompt)
+}
+
+func (m *Manager) register(email string, prompt func(string) bool) error {
+ if m.registered() {
+ return fmt.Errorf("already registered")
+ }
+ m.state.Email = email
+ if m.state.key == nil {
+ key, err := newKey()
+ if err != nil {
+ return fmt.Errorf("generating key: %v", err)
+ }
+ Key, err := marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("generating key: %v", err)
+ }
+ m.state.key = key
+ m.state.Key = string(Key)
+ }
+
+ c, err := acme.NewClient(letsEncryptURL, &m.state, acme.EC256)
+ if err != nil {
+ return fmt.Errorf("create client: %v", err)
+ }
+ reg, err := c.Register()
+ if err != nil {
+ return fmt.Errorf("register: %v", err)
+ }
+
+ m.state.Reg = reg
+ if reg.Body.Agreement == "" {
+ if prompt != nil && !prompt(reg.TosURL) {
+ return fmt.Errorf("did not agree to TOS")
+ }
+ if err := c.AgreeToTOS(); err != nil {
+ return fmt.Errorf("agreeing to TOS: %v", err)
+ }
+ }
+
+ m.updated()
+
+ return nil
+}
+
+// Marshal returns an encoding of the manager's state,
+// suitable for writing to disk and reloading by calling Unmarshal.
+// The state includes registration status, the configured host list
+// from SetHosts, and all known certificates, including their private
+// cryptographic keys.
+// Consequently, the state should be kept private.
+func (m *Manager) Marshal() string {
+ m.init()
+ js, err := json.MarshalIndent(&m.state, "", "\t")
+ if err != nil {
+ panic("unexpected json.Marshal failure")
+ }
+ return string(js)
+}
+
+// Unmarshal restores the state encoded by a previous call to Marshal
+// (perhaps on a different Manager in a different program).
+func (m *Manager) Unmarshal(enc string) error {
+ m.init()
+ var st state
+ if err := json.Unmarshal([]byte(enc), &st); err != nil {
+ return err
+ }
+ if st.Key != "" {
+ key, err := unmarshalKey(st.Key)
+ if err != nil {
+ return err
+ }
+ st.key = key
+ }
+ m.state = st
+ for host, cert := range m.state.Certs {
+ c, err := cert.toTLS()
+ if err != nil {
+ log.Printf("letsencrypt: ignoring entry for %s: %v", host, err)
+ continue
+ }
+ m.certCache[host] = &cacheEntry{host: host, m: m, cert: c}
+ }
+ m.updated()
+ return nil
+}
+
+// SetHosts sets the manager's list of known host names.
+// If the list is non-nil, the manager will only ever attempt to acquire
+// certificates for host names on the list.
+// If the list is nil, the manager does not restrict the hosts it will
+// ask for certificates for.
+func (m *Manager) SetHosts(hosts []string) {
+ m.init()
+ m.mu.Lock()
+ m.state.Hosts = append(m.state.Hosts[:0], hosts...)
+ m.mu.Unlock()
+ m.updated()
+}
+
+// GetCertificate can be placed a tls.Config's GetCertificate field to make
+// the TLS server use Let's Encrypt certificates.
+// Each time a client connects to the TLS server expecting a new host name,
+// the TLS server's call to GetCertificate will trigger an exchange with the
+// Let's Encrypt servers to obtain that certificate, subject to the manager rate limits.
+//
+// As noted in the Manager's documentation comment,
+// to obtain a certificate for a given host name, that name
+// must resolve to a computer running a TLS server on port 443
+// that obtains TLS SNI certificates by calling m.GetCertificate.
+// In the standard usage, then, installing m.GetCertificate in the tls.Config
+// both automatically provisions the TLS certificates needed for
+// ordinary HTTPS service and answers the challenges from LetsEncrypt.org.
+func (m *Manager) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
+ m.init()
+
+ host := clientHello.ServerName
+
+ if debug {
+ log.Printf("GetCertificate %s", host)
+ }
+
+ if strings.HasSuffix(host, ".acme.invalid") {
+ m.mu.Lock()
+ cert := m.certTokens[host]
+ m.mu.Unlock()
+ if cert == nil {
+ return nil, fmt.Errorf("unknown host")
+ }
+ return cert, nil
+ }
+
+ return m.Cert(host)
+}
+
+// Cert returns the certificate for the given host name, obtaining a new one if necessary.
+//
+// As noted in the documentation for Manager and for the GetCertificate method,
+// obtaining a certificate requires that m.GetCertificate be associated with host.
+// In most servers, simply starting a TLS server with a configuration referring
+// to m.GetCertificate is sufficient, and Cert need not be called.
+//
+// The main use of Cert is to force the manager to obtain a certificate
+// for a particular host name ahead of time.
+func (m *Manager) Cert(host string) (*tls.Certificate, error) {
+ host = strings.ToLower(host)
+ if debug {
+ log.Printf("Cert %s", host)
+ }
+
+ m.init()
+ m.mu.Lock()
+ if !m.registered() {
+ m.register("", nil)
+ }
+
+ ok := false
+ if m.state.Hosts == nil {
+ ok = true
+ } else {
+ for _, h := range m.state.Hosts {
+ if host == h {
+ ok = true
+ break
+ }
+ }
+ }
+ if !ok {
+ m.mu.Unlock()
+ return nil, fmt.Errorf("unknown host")
+ }
+
+ // Otherwise look in our cert cache.
+ entry, ok := m.certCache[host]
+ if !ok {
+ r := m.rateLimit.Reserve()
+ ok := r.OK()
+ if ok {
+ ok = m.newHostLimit.Allow()
+ if !ok {
+ r.Cancel()
+ }
+ }
+ if !ok {
+ m.mu.Unlock()
+ return nil, fmt.Errorf("rate limited")
+ }
+ entry = &cacheEntry{host: host, m: m}
+ m.certCache[host] = entry
+ }
+ m.mu.Unlock()
+
+ entry.mu.Lock()
+ defer entry.mu.Unlock()
+ entry.init()
+ if entry.err != nil {
+ return nil, entry.err
+ }
+ return entry.cert, nil
+}
+
+func (e *cacheEntry) init() {
+ if e.err != nil && time.Now().Before(e.timeout) {
+ return
+ }
+ if e.cert != nil {
+ if e.timeout.IsZero() {
+ t, err := certRefreshTime(e.cert)
+ if err != nil {
+ e.err = err
+ e.timeout = time.Now().Add(1 * time.Minute)
+ e.cert = nil
+ return
+ }
+ e.timeout = t
+ }
+ if time.Now().After(e.timeout) && !e.refreshing {
+ e.refreshing = true
+ go e.refresh()
+ }
+ return
+ }
+
+ cert, refreshTime, err := e.m.verify(e.host)
+ e.m.mu.Lock()
+ e.m.certCache[e.host] = e
+ e.m.mu.Unlock()
+ e.install(cert, refreshTime, err)
+}
+
+func (e *cacheEntry) install(cert *tls.Certificate, refreshTime time.Time, err error) {
+ e.cert = nil
+ e.timeout = time.Time{}
+ e.err = nil
+
+ if err != nil {
+ e.err = err
+ e.timeout = time.Now().Add(1 * time.Minute)
+ return
+ }
+
+ e.cert = cert
+ e.timeout = refreshTime
+}
+
+func (e *cacheEntry) refresh() {
+ e.m.rateLimit.Wait(context.Background())
+ cert, refreshTime, err := e.m.verify(e.host)
+
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ e.refreshing = false
+ if err == nil {
+ e.install(cert, refreshTime, nil)
+ }
+}
+
+func (m *Manager) verify(host string) (cert *tls.Certificate, refreshTime time.Time, err error) {
+ c, err := acme.NewClient(letsEncryptURL, &m.state, acme.EC256)
+ if err != nil {
+ return
+ }
+ if err = c.SetChallengeProvider(acme.TLSSNI01, tlsProvider{m}); err != nil {
+ return
+ }
+ c.SetChallengeProvider(acme.TLSSNI01, tlsProvider{m})
+ c.ExcludeChallenges([]acme.Challenge{acme.HTTP01})
+ acmeCert, errmap := c.ObtainCertificate([]string{host}, true, nil)
+ if len(errmap) > 0 {
+ if debug {
+ log.Printf("ObtainCertificate %v => %v", host, errmap)
+ }
+ err = fmt.Errorf("%v", errmap)
+ return
+ }
+ entryCert := stateCert{
+ Cert: string(acmeCert.Certificate),
+ Key: string(acmeCert.PrivateKey),
+ }
+ cert, err = entryCert.toTLS()
+ if err != nil {
+ if debug {
+ log.Printf("ObtainCertificate %v toTLS failure: %v", host, err)
+ }
+ err = err
+ return
+ }
+ if refreshTime, err = certRefreshTime(cert); err != nil {
+ return
+ }
+
+ m.mu.Lock()
+ if m.state.Certs == nil {
+ m.state.Certs = make(map[string]stateCert)
+ }
+ m.state.Certs[host] = entryCert
+ m.mu.Unlock()
+ m.updated()
+
+ return cert, refreshTime, nil
+}
+
+func certRefreshTime(cert *tls.Certificate) (time.Time, error) {
+ xc, err := x509.ParseCertificate(cert.Certificate[0])
+ if err != nil {
+ if debug {
+ log.Printf("ObtainCertificate to X.509 failure: %v", err)
+ }
+ return time.Time{}, err
+ }
+ t := xc.NotBefore.Add(xc.NotAfter.Sub(xc.NotBefore) / 2)
+ monthEarly := xc.NotAfter.Add(-30 * 24 * time.Hour)
+ if t.Before(monthEarly) {
+ t = monthEarly
+ }
+ return t, nil
+}
+
+// tlsProvider implements acme.ChallengeProvider for TLS handshake challenges.
+type tlsProvider struct {
+ m *Manager
+}
+
+func (p tlsProvider) Present(domain, token, keyAuth string) error {
+ cert, dom, err := acme.TLSSNI01ChallengeCertDomain(keyAuth)
+ if err != nil {
+ return err
+ }
+
+ p.m.mu.Lock()
+ p.m.certTokens[dom] = &cert
+ p.m.mu.Unlock()
+
+ return nil
+}
+
+func (p tlsProvider) CleanUp(domain, token, keyAuth string) error {
+ _, dom, err := acme.TLSSNI01ChallengeCertDomain(keyAuth)
+ if err != nil {
+ return err
+ }
+
+ p.m.mu.Lock()
+ delete(p.m.certTokens, dom)
+ p.m.mu.Unlock()
+
+ return nil
+}
+
+func marshalKey(key *ecdsa.PrivateKey) ([]byte, error) {
+ data, err := x509.MarshalECPrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+ return pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data}), nil
+}
+
+func unmarshalKey(text string) (*ecdsa.PrivateKey, error) {
+ b, _ := pem.Decode([]byte(text))
+ if b == nil {
+ return nil, fmt.Errorf("unmarshalKey: missing key")
+ }
+ if b.Type != "EC PRIVATE KEY" {
+ return nil, fmt.Errorf("unmarshalKey: found %q, not %q", b.Type, "EC PRIVATE KEY")
+ }
+ k, err := x509.ParseECPrivateKey(b.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshalKey: %v", err)
+ }
+ return k, nil
+}
+
+func newKey() (*ecdsa.PrivateKey, error) {
+ return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/LICENSE b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/LICENSE
new file mode 100644
index 00000000..17460b71
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Sebastian Erhart
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go
new file mode 100644
index 00000000..85790050
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go
@@ -0,0 +1,16 @@
+package acme
+
+// Challenge is a string that identifies a particular type and version of ACME challenge.
+type Challenge string
+
+const (
+ // HTTP01 is the "http-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#http
+ // Note: HTTP01ChallengePath returns the URL path to fulfill this challenge
+ HTTP01 = Challenge("http-01")
+ // TLSSNI01 is the "tls-sni-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#tls-with-server-name-indication-tls-sni
+ // Note: TLSSNI01ChallengeCert returns a certificate to fulfill this challenge
+ TLSSNI01 = Challenge("tls-sni-01")
+ // DNS01 is the "dns-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#dns
+ // Note: DNS01Record returns a DNS record which will fulfill this challenge
+ DNS01 = Challenge("dns-01")
+)
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go
new file mode 100644
index 00000000..16e4cbe0
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go
@@ -0,0 +1,638 @@
+// Package acme implements the ACME protocol for Let's Encrypt and other conforming providers.
+package acme
+
+import (
+ "crypto"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ // Logger is an optional custom logger.
+ Logger *log.Logger
+)
+
+// logf writes a log entry. It uses Logger if not
+// nil, otherwise it uses the default log.Logger.
+func logf(format string, args ...interface{}) {
+ if Logger != nil {
+ Logger.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// User interface is to be implemented by users of this library.
+// It is used by the client type to get user specific information.
+type User interface {
+ GetEmail() string
+ GetRegistration() *RegistrationResource
+ GetPrivateKey() crypto.PrivateKey
+}
+
+// Interface for all challenge solvers to implement.
+type solver interface {
+ Solve(challenge challenge, domain string) error
+}
+
+type validateFunc func(j *jws, domain, uri string, chlng challenge) error
+
+// Client is the user-friendy way to ACME
+type Client struct {
+ directory directory
+ user User
+ jws *jws
+ keyType KeyType
+ issuerCert []byte
+ solvers map[Challenge]solver
+}
+
+// NewClient creates a new ACME client on behalf of the user. The client will depend on
+// the ACME directory located at caDirURL for the rest of its actions. It will
+// generate private keys for certificates of size keyBits.
+func NewClient(caDirURL string, user User, keyType KeyType) (*Client, error) {
+ privKey := user.GetPrivateKey()
+ if privKey == nil {
+ return nil, errors.New("private key was nil")
+ }
+
+ var dir directory
+ if _, err := getJSON(caDirURL, &dir); err != nil {
+ return nil, fmt.Errorf("get directory at '%s': %v", caDirURL, err)
+ }
+
+ if dir.NewRegURL == "" {
+ return nil, errors.New("directory missing new registration URL")
+ }
+ if dir.NewAuthzURL == "" {
+ return nil, errors.New("directory missing new authz URL")
+ }
+ if dir.NewCertURL == "" {
+ return nil, errors.New("directory missing new certificate URL")
+ }
+ if dir.RevokeCertURL == "" {
+ return nil, errors.New("directory missing revoke certificate URL")
+ }
+
+ jws := &jws{privKey: privKey, directoryURL: caDirURL}
+
+ // REVIEW: best possibility?
+ // Add all available solvers with the right index as per ACME
+ // spec to this map. Otherwise they won`t be found.
+ solvers := make(map[Challenge]solver)
+ solvers[HTTP01] = &httpChallenge{jws: jws, validate: validate, provider: &HTTPProviderServer{}}
+ solvers[TLSSNI01] = &tlsSNIChallenge{jws: jws, validate: validate, provider: &TLSProviderServer{}}
+
+ return &Client{directory: dir, user: user, jws: jws, keyType: keyType, solvers: solvers}, nil
+}
+
+// SetChallengeProvider specifies a custom provider that will make the solution available
+func (c *Client) SetChallengeProvider(challenge Challenge, p ChallengeProvider) error {
+ switch challenge {
+ case HTTP01:
+ c.solvers[challenge] = &httpChallenge{jws: c.jws, validate: validate, provider: p}
+ case TLSSNI01:
+ c.solvers[challenge] = &tlsSNIChallenge{jws: c.jws, validate: validate, provider: p}
+ default:
+ return fmt.Errorf("Unknown challenge %v", challenge)
+ }
+ return nil
+}
+
+// SetHTTPAddress specifies a custom interface:port to be used for HTTP based challenges.
+// If this option is not used, the default port 80 and all interfaces will be used.
+// To only specify a port and no interface use the ":port" notation.
+func (c *Client) SetHTTPAddress(iface string) error {
+ host, port, err := net.SplitHostPort(iface)
+ if err != nil {
+ return err
+ }
+
+ if chlng, ok := c.solvers[HTTP01]; ok {
+ chlng.(*httpChallenge).provider = NewHTTPProviderServer(host, port)
+ }
+
+ return nil
+}
+
+// SetTLSAddress specifies a custom interface:port to be used for TLS based challenges.
+// If this option is not used, the default port 443 and all interfaces will be used.
+// To only specify a port and no interface use the ":port" notation.
+func (c *Client) SetTLSAddress(iface string) error {
+ host, port, err := net.SplitHostPort(iface)
+ if err != nil {
+ return err
+ }
+
+ if chlng, ok := c.solvers[TLSSNI01]; ok {
+ chlng.(*tlsSNIChallenge).provider = NewTLSProviderServer(host, port)
+ }
+ return nil
+}
+
+// ExcludeChallenges explicitly removes challenges from the pool for solving.
+func (c *Client) ExcludeChallenges(challenges []Challenge) {
+ // Loop through all challenges and delete the requested one if found.
+ for _, challenge := range challenges {
+ delete(c.solvers, challenge)
+ }
+}
+
+// Register the current account to the ACME server.
+func (c *Client) Register() (*RegistrationResource, error) {
+ if c == nil || c.user == nil {
+ return nil, errors.New("acme: cannot register a nil client or user")
+ }
+ logf("[INFO] acme: Registering account for %s", c.user.GetEmail())
+
+ regMsg := registrationMessage{
+ Resource: "new-reg",
+ }
+ if c.user.GetEmail() != "" {
+ regMsg.Contact = []string{"mailto:" + c.user.GetEmail()}
+ } else {
+ regMsg.Contact = []string{}
+ }
+
+ var serverReg Registration
+ hdr, err := postJSON(c.jws, c.directory.NewRegURL, regMsg, &serverReg)
+ if err != nil {
+ return nil, err
+ }
+
+ reg := &RegistrationResource{Body: serverReg}
+
+ links := parseLinks(hdr["Link"])
+ reg.URI = hdr.Get("Location")
+ if links["terms-of-service"] != "" {
+ reg.TosURL = links["terms-of-service"]
+ }
+
+ if links["next"] != "" {
+ reg.NewAuthzURL = links["next"]
+ } else {
+ return nil, errors.New("acme: The server did not return 'next' link to proceed")
+ }
+
+ return reg, nil
+}
+
+// AgreeToTOS updates the Client registration and sends the agreement to
+// the server.
+func (c *Client) AgreeToTOS() error {
+ reg := c.user.GetRegistration()
+
+ reg.Body.Agreement = c.user.GetRegistration().TosURL
+ reg.Body.Resource = "reg"
+ _, err := postJSON(c.jws, c.user.GetRegistration().URI, c.user.GetRegistration().Body, nil)
+ return err
+}
+
+// ObtainCertificate tries to obtain a single certificate using all domains passed into it.
+// The first domain in domains is used for the CommonName field of the certificate, all other
+// domains are added using the Subject Alternate Names extension. A new private key is generated
+// for every invocation of this function. If you do not want that you can supply your own private key
+// in the privKey parameter. If this parameter is non-nil it will be used instead of generating a new one.
+// If bundle is true, the []byte contains both the issuer certificate and
+// your issued certificate as a bundle.
+// This function will never return a partial certificate. If one domain in the list fails,
+// the whole certificate will fail.
+func (c *Client) ObtainCertificate(domains []string, bundle bool, privKey crypto.PrivateKey) (CertificateResource, map[string]error) {
+ if bundle {
+ logf("[INFO][%s] acme: Obtaining bundled SAN certificate", strings.Join(domains, ", "))
+ } else {
+ logf("[INFO][%s] acme: Obtaining SAN certificate", strings.Join(domains, ", "))
+ }
+
+ challenges, failures := c.getChallenges(domains)
+ // If any challenge fails - return. Do not generate partial SAN certificates.
+ if len(failures) > 0 {
+ return CertificateResource{}, failures
+ }
+
+ errs := c.solveChallenges(challenges)
+ // If any challenge fails - return. Do not generate partial SAN certificates.
+ if len(errs) > 0 {
+ return CertificateResource{}, errs
+ }
+
+ logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", "))
+
+ cert, err := c.requestCertificate(challenges, bundle, privKey)
+ if err != nil {
+ for _, chln := range challenges {
+ failures[chln.Domain] = err
+ }
+ }
+
+ return cert, failures
+}
+
+// RevokeCertificate takes a PEM encoded certificate or bundle and tries to revoke it at the CA.
+func (c *Client) RevokeCertificate(certificate []byte) error {
+ certificates, err := parsePEMBundle(certificate)
+ if err != nil {
+ return err
+ }
+
+ x509Cert := certificates[0]
+ if x509Cert.IsCA {
+ return fmt.Errorf("Certificate bundle starts with a CA certificate")
+ }
+
+ encodedCert := base64.URLEncoding.EncodeToString(x509Cert.Raw)
+
+ _, err = postJSON(c.jws, c.directory.RevokeCertURL, revokeCertMessage{Resource: "revoke-cert", Certificate: encodedCert}, nil)
+ return err
+}
+
+// RenewCertificate takes a CertificateResource and tries to renew the certificate.
+// If the renewal process succeeds, the new certificate will ge returned in a new CertResource.
+// Please be aware that this function will return a new certificate in ANY case that is not an error.
+// If the server does not provide us with a new cert on a GET request to the CertURL
+// this function will start a new-cert flow where a new certificate gets generated.
+// If bundle is true, the []byte contains both the issuer certificate and
+// your issued certificate as a bundle.
+// For private key reuse the PrivateKey property of the passed in CertificateResource should be non-nil.
+func (c *Client) RenewCertificate(cert CertificateResource, bundle bool) (CertificateResource, error) {
+ // Input certificate is PEM encoded. Decode it here as we may need the decoded
+ // cert later on in the renewal process. The input may be a bundle or a single certificate.
+ certificates, err := parsePEMBundle(cert.Certificate)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ x509Cert := certificates[0]
+ if x509Cert.IsCA {
+ return CertificateResource{}, fmt.Errorf("[%s] Certificate bundle starts with a CA certificate", cert.Domain)
+ }
+
+ // This is just meant to be informal for the user.
+ timeLeft := x509Cert.NotAfter.Sub(time.Now().UTC())
+ logf("[INFO][%s] acme: Trying renewal with %d hours remaining", cert.Domain, int(timeLeft.Hours()))
+
+ // The first step of renewal is to check if we get a renewed cert
+ // directly from the cert URL.
+ resp, err := httpGet(cert.CertURL)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ defer resp.Body.Close()
+ serverCertBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ serverCert, err := x509.ParseCertificate(serverCertBytes)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ // If the server responds with a different certificate we are effectively renewed.
+ // TODO: Further test if we can actually use the new certificate (Our private key works)
+ if !x509Cert.Equal(serverCert) {
+ logf("[INFO][%s] acme: Server responded with renewed certificate", cert.Domain)
+ issuedCert := pemEncode(derCertificateBytes(serverCertBytes))
+ // If bundle is true, we want to return a certificate bundle.
+ // To do this, we need the issuer certificate.
+ if bundle {
+ // The issuer certificate link is always supplied via an "up" link
+ // in the response headers of a new certificate.
+ links := parseLinks(resp.Header["Link"])
+ issuerCert, err := c.getIssuerCertificate(links["up"])
+ if err != nil {
+ // If we fail to acquire the issuer cert, return the issued certificate - do not fail.
+ logf("[ERROR][%s] acme: Could not bundle issuer certificate: %v", cert.Domain, err)
+ } else {
+ // Success - append the issuer cert to the issued cert.
+ issuerCert = pemEncode(derCertificateBytes(issuerCert))
+ issuedCert = append(issuedCert, issuerCert...)
+ }
+ }
+
+ cert.Certificate = issuedCert
+ return cert, nil
+ }
+
+ var privKey crypto.PrivateKey
+ if cert.PrivateKey != nil {
+ privKey, err = parsePEMPrivateKey(cert.PrivateKey)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ }
+
+ var domains []string
+ var failures map[string]error
+ // check for SAN certificate
+ if len(x509Cert.DNSNames) > 1 {
+ domains = append(domains, x509Cert.Subject.CommonName)
+ for _, sanDomain := range x509Cert.DNSNames {
+ if sanDomain == x509Cert.Subject.CommonName {
+ continue
+ }
+ domains = append(domains, sanDomain)
+ }
+ } else {
+ domains = append(domains, x509Cert.Subject.CommonName)
+ }
+
+ newCert, failures := c.ObtainCertificate(domains, bundle, privKey)
+ return newCert, failures[cert.Domain]
+}
+
+// Looks through the challenge combinations to find a solvable match.
+// Then solves the challenges in series and returns.
+func (c *Client) solveChallenges(challenges []authorizationResource) map[string]error {
+ // loop through the resources, basically through the domains.
+ failures := make(map[string]error)
+ for _, authz := range challenges {
+ // no solvers - no solving
+ if solvers := c.chooseSolvers(authz.Body, authz.Domain); solvers != nil {
+ for i, solver := range solvers {
+ // TODO: do not immediately fail if one domain fails to validate.
+ err := solver.Solve(authz.Body.Challenges[i], authz.Domain)
+ if err != nil {
+ failures[authz.Domain] = err
+ }
+ }
+ } else {
+ failures[authz.Domain] = fmt.Errorf("[%s] acme: Could not determine solvers", authz.Domain)
+ }
+ }
+
+ return failures
+}
+
+// Checks all combinations from the server and returns an array of
+// solvers which should get executed in series.
+func (c *Client) chooseSolvers(auth authorization, domain string) map[int]solver {
+ for _, combination := range auth.Combinations {
+ solvers := make(map[int]solver)
+ for _, idx := range combination {
+ if solver, ok := c.solvers[auth.Challenges[idx].Type]; ok {
+ solvers[idx] = solver
+ } else {
+ logf("[INFO][%s] acme: Could not find solver for: %s", domain, auth.Challenges[idx].Type)
+ }
+ }
+
+ // If we can solve the whole combination, return the solvers
+ if len(solvers) == len(combination) {
+ return solvers
+ }
+ }
+ return nil
+}
+
+// Get the challenges needed to proof our identifier to the ACME server.
+func (c *Client) getChallenges(domains []string) ([]authorizationResource, map[string]error) {
+ resc, errc := make(chan authorizationResource), make(chan domainError)
+
+ for _, domain := range domains {
+ go func(domain string) {
+ authMsg := authorization{Resource: "new-authz", Identifier: identifier{Type: "dns", Value: domain}}
+ var authz authorization
+ hdr, err := postJSON(c.jws, c.user.GetRegistration().NewAuthzURL, authMsg, &authz)
+ if err != nil {
+ errc <- domainError{Domain: domain, Error: err}
+ return
+ }
+
+ links := parseLinks(hdr["Link"])
+ if links["next"] == "" {
+ logf("[ERROR][%s] acme: Server did not provide next link to proceed", domain)
+ return
+ }
+
+ resc <- authorizationResource{Body: authz, NewCertURL: links["next"], AuthURL: hdr.Get("Location"), Domain: domain}
+ }(domain)
+ }
+
+ responses := make(map[string]authorizationResource)
+ failures := make(map[string]error)
+ for i := 0; i < len(domains); i++ {
+ select {
+ case res := <-resc:
+ responses[res.Domain] = res
+ case err := <-errc:
+ failures[err.Domain] = err.Error
+ }
+ }
+
+ challenges := make([]authorizationResource, 0, len(responses))
+ for _, domain := range domains {
+ if challenge, ok := responses[domain]; ok {
+ challenges = append(challenges, challenge)
+ }
+ }
+
+ close(resc)
+ close(errc)
+
+ return challenges, failures
+}
+
+func (c *Client) requestCertificate(authz []authorizationResource, bundle bool, privKey crypto.PrivateKey) (CertificateResource, error) {
+ if len(authz) == 0 {
+ return CertificateResource{}, errors.New("Passed no authorizations to requestCertificate!")
+ }
+
+ commonName := authz[0]
+ var err error
+ if privKey == nil {
+ privKey, err = generatePrivateKey(c.keyType)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ }
+
+ var san []string
+ var authURLs []string
+ for _, auth := range authz[1:] {
+ san = append(san, auth.Domain)
+ authURLs = append(authURLs, auth.AuthURL)
+ }
+
+ // TODO: should the CSR be customizable?
+ csr, err := generateCsr(privKey, commonName.Domain, san)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ csrString := base64.URLEncoding.EncodeToString(csr)
+ jsonBytes, err := json.Marshal(csrMessage{Resource: "new-cert", Csr: csrString, Authorizations: authURLs})
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ resp, err := c.jws.post(commonName.NewCertURL, jsonBytes)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ privateKeyPem := pemEncode(privKey)
+ cerRes := CertificateResource{
+ Domain: commonName.Domain,
+ CertURL: resp.Header.Get("Location"),
+ PrivateKey: privateKeyPem}
+
+ for {
+ switch resp.StatusCode {
+ case 201, 202:
+ cert, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
+ resp.Body.Close()
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ // The server returns a body with a length of zero if the
+ // certificate was not ready at the time this request completed.
+ // Otherwise the body is the certificate.
+ if len(cert) > 0 {
+
+ cerRes.CertStableURL = resp.Header.Get("Content-Location")
+ cerRes.AccountRef = c.user.GetRegistration().URI
+
+ issuedCert := pemEncode(derCertificateBytes(cert))
+ // If bundle is true, we want to return a certificate bundle.
+ // To do this, we need the issuer certificate.
+ if bundle {
+ // The issuer certificate link is always supplied via an "up" link
+ // in the response headers of a new certificate.
+ links := parseLinks(resp.Header["Link"])
+ issuerCert, err := c.getIssuerCertificate(links["up"])
+ if err != nil {
+ // If we fail to acquire the issuer cert, return the issued certificate - do not fail.
+ logf("[WARNING][%s] acme: Could not bundle issuer certificate: %v", commonName.Domain, err)
+ } else {
+ // Success - append the issuer cert to the issued cert.
+ issuerCert = pemEncode(derCertificateBytes(issuerCert))
+ issuedCert = append(issuedCert, issuerCert...)
+ }
+ }
+
+ cerRes.Certificate = issuedCert
+ logf("[INFO][%s] Server responded with a certificate.", commonName.Domain)
+ return cerRes, nil
+ }
+
+ // The certificate was granted but is not yet issued.
+ // Check retry-after and loop.
+ ra := resp.Header.Get("Retry-After")
+ retryAfter, err := strconv.Atoi(ra)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+
+ logf("[INFO][%s] acme: Server responded with status 202; retrying after %ds", commonName.Domain, retryAfter)
+ time.Sleep(time.Duration(retryAfter) * time.Second)
+
+ break
+ default:
+ return CertificateResource{}, handleHTTPError(resp)
+ }
+
+ resp, err = httpGet(cerRes.CertURL)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ }
+}
+
+// getIssuerCertificate requests the issuer certificate and caches it for
+// subsequent requests.
+func (c *Client) getIssuerCertificate(url string) ([]byte, error) {
+ logf("[INFO] acme: Requesting issuer cert from %s", url)
+ if c.issuerCert != nil {
+ return c.issuerCert, nil
+ }
+
+ resp, err := httpGet(url)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = x509.ParseCertificate(issuerBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ c.issuerCert = issuerBytes
+ return issuerBytes, err
+}
+
+func parseLinks(links []string) map[string]string {
+ aBrkt := regexp.MustCompile("[<>]")
+ slver := regexp.MustCompile("(.+) *= *\"(.+)\"")
+ linkMap := make(map[string]string)
+
+ for _, link := range links {
+
+ link = aBrkt.ReplaceAllString(link, "")
+ parts := strings.Split(link, ";")
+
+ matches := slver.FindStringSubmatch(parts[1])
+ if len(matches) > 0 {
+ linkMap[matches[2]] = parts[0]
+ }
+ }
+
+ return linkMap
+}
+
+// validate makes the ACME server start validating a
+// challenge response, only returning once it is done.
+func validate(j *jws, domain, uri string, chlng challenge) error {
+ var challengeResponse challenge
+
+ hdr, err := postJSON(j, uri, chlng, &challengeResponse)
+ if err != nil {
+ return err
+ }
+
+ // After the path is sent, the ACME server will access our server.
+ // Repeatedly check the server for an updated status on our request.
+ for {
+ switch challengeResponse.Status {
+ case "valid":
+ logf("[INFO][%s] The server validated our request", domain)
+ return nil
+ case "pending":
+ break
+ case "invalid":
+ return handleChallengeError(challengeResponse)
+ default:
+ return errors.New("The server returned an unexpected state.")
+ }
+
+ ra, err := strconv.Atoi(hdr.Get("Retry-After"))
+ if err != nil {
+ // The ACME server MUST return a Retry-After.
+ // If it doesn't, we'll just poll hard.
+ ra = 1
+ }
+ time.Sleep(time.Duration(ra) * time.Second)
+
+ hdr, err = getJSON(uri, &challengeResponse)
+ if err != nil {
+ return err
+ }
+ }
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go
new file mode 100644
index 00000000..e309554f
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go
@@ -0,0 +1,198 @@
+package acme
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "encoding/json"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+)
+
+func TestNewClient(t *testing.T) {
+ keyBits := 32 // small value keeps test fast
+ keyType := RSA2048
+ key, err := rsa.GenerateKey(rand.Reader, keyBits)
+ if err != nil {
+ t.Fatal("Could not generate test key:", err)
+ }
+ user := mockUser{
+ email: "test@test.com",
+ regres: new(RegistrationResource),
+ privatekey: key,
+ }
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"})
+ w.Write(data)
+ }))
+
+ client, err := NewClient(ts.URL, user, keyType)
+ if err != nil {
+ t.Fatalf("Could not create client: %v", err)
+ }
+
+ if client.jws == nil {
+ t.Fatalf("Expected client.jws to not be nil")
+ }
+ if expected, actual := key, client.jws.privKey; actual != expected {
+ t.Errorf("Expected jws.privKey to be %p but was %p", expected, actual)
+ }
+
+ if client.keyType != keyType {
+ t.Errorf("Expected keyType to be %s but was %s", keyType, client.keyType)
+ }
+
+ if expected, actual := 2, len(client.solvers); actual != expected {
+ t.Fatalf("Expected %d solver(s), got %d", expected, actual)
+ }
+}
+
+func TestClientOptPort(t *testing.T) {
+ keyBits := 32 // small value keeps test fast
+ key, err := rsa.GenerateKey(rand.Reader, keyBits)
+ if err != nil {
+ t.Fatal("Could not generate test key:", err)
+ }
+ user := mockUser{
+ email: "test@test.com",
+ regres: new(RegistrationResource),
+ privatekey: key,
+ }
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"})
+ w.Write(data)
+ }))
+
+ optPort := "1234"
+ optHost := ""
+ client, err := NewClient(ts.URL, user, RSA2048)
+ if err != nil {
+ t.Fatalf("Could not create client: %v", err)
+ }
+ client.SetHTTPAddress(net.JoinHostPort(optHost, optPort))
+ client.SetTLSAddress(net.JoinHostPort(optHost, optPort))
+
+ httpSolver, ok := client.solvers[HTTP01].(*httpChallenge)
+ if !ok {
+ t.Fatal("Expected http-01 solver to be httpChallenge type")
+ }
+ if httpSolver.jws != client.jws {
+ t.Error("Expected http-01 to have same jws as client")
+ }
+ if got := httpSolver.provider.(*HTTPProviderServer).port; got != optPort {
+ t.Errorf("Expected http-01 to have port %s but was %s", optPort, got)
+ }
+ if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost {
+ t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got)
+ }
+
+ httpsSolver, ok := client.solvers[TLSSNI01].(*tlsSNIChallenge)
+ if !ok {
+ t.Fatal("Expected tls-sni-01 solver to be httpChallenge type")
+ }
+ if httpsSolver.jws != client.jws {
+ t.Error("Expected tls-sni-01 to have same jws as client")
+ }
+ if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort {
+ t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got)
+ }
+ if got := httpsSolver.provider.(*TLSProviderServer).iface; got != optHost {
+ t.Errorf("Expected tls-sni-01 to have port %s but was %s", optHost, got)
+ }
+
+ // test setting different host
+ optHost = "127.0.0.1"
+ client.SetHTTPAddress(net.JoinHostPort(optHost, optPort))
+ client.SetTLSAddress(net.JoinHostPort(optHost, optPort))
+
+ if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost {
+ t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got)
+ }
+ if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort {
+ t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got)
+ }
+}
+
+func TestValidate(t *testing.T) {
+ var statuses []string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Minimal stub ACME server for validation.
+ w.Header().Add("Replay-Nonce", "12345")
+ w.Header().Add("Retry-After", "0")
+ switch r.Method {
+ case "HEAD":
+ case "POST":
+ st := statuses[0]
+ statuses = statuses[1:]
+ writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"})
+
+ case "GET":
+ st := statuses[0]
+ statuses = statuses[1:]
+ writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"})
+
+ default:
+ http.Error(w, r.Method, http.StatusMethodNotAllowed)
+ }
+ }))
+ defer ts.Close()
+
+ privKey, _ := rsa.GenerateKey(rand.Reader, 512)
+ j := &jws{privKey: privKey, directoryURL: ts.URL}
+
+ tsts := []struct {
+ name string
+ statuses []string
+ want string
+ }{
+ {"POST-unexpected", []string{"weird"}, "unexpected"},
+ {"POST-valid", []string{"valid"}, ""},
+ {"POST-invalid", []string{"invalid"}, "Error Detail"},
+ {"GET-unexpected", []string{"pending", "weird"}, "unexpected"},
+ {"GET-valid", []string{"pending", "valid"}, ""},
+ {"GET-invalid", []string{"pending", "invalid"}, "Error Detail"},
+ }
+
+ for _, tst := range tsts {
+ statuses = tst.statuses
+ if err := validate(j, "example.com", ts.URL, challenge{Type: "http-01", Token: "token"}); err == nil && tst.want != "" {
+ t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want)
+ } else if err != nil && !strings.Contains(err.Error(), tst.want) {
+ t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want)
+ }
+ }
+}
+
+// writeJSONResponse marshals the body as JSON and writes it to the response.
+func writeJSONResponse(w http.ResponseWriter, body interface{}) {
+ bs, err := json.Marshal(body)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if _, err := w.Write(bs); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+// stubValidate is like validate, except it does nothing.
+func stubValidate(j *jws, domain, uri string, chlng challenge) error {
+ return nil
+}
+
+type mockUser struct {
+ email string
+ regres *RegistrationResource
+ privatekey *rsa.PrivateKey
+}
+
+func (u mockUser) GetEmail() string { return u.email }
+func (u mockUser) GetRegistration() *RegistrationResource { return u.regres }
+func (u mockUser) GetPrivateKey() crypto.PrivateKey { return u.privatekey }
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go
new file mode 100644
index 00000000..fc20442f
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go
@@ -0,0 +1,323 @@
+package acme
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/base64"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/big"
+ "net/http"
+ "strings"
+ "time"
+
+ "golang.org/x/crypto/ocsp"
+)
+
+// KeyType represents the key algo as well as the key size or curve to use.
+type KeyType string
+type derCertificateBytes []byte
+
+// Constants for all key types we support.
+const (
+ EC256 = KeyType("P256")
+ EC384 = KeyType("P384")
+ RSA2048 = KeyType("2048")
+ RSA4096 = KeyType("4096")
+ RSA8192 = KeyType("8192")
+)
+
+const (
+ // OCSPGood means that the certificate is valid.
+ OCSPGood = ocsp.Good
+ // OCSPRevoked means that the certificate has been deliberately revoked.
+ OCSPRevoked = ocsp.Revoked
+ // OCSPUnknown means that the OCSP responder doesn't know about the certificate.
+ OCSPUnknown = ocsp.Unknown
+ // OCSPServerFailed means that the OCSP responder failed to process the request.
+ OCSPServerFailed = ocsp.ServerFailed
+)
+
+// GetOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response,
+// the parsed response, and an error, if any. The returned []byte can be passed directly
+// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the
+// issued certificate, this function will try to get the issuer certificate from the
+// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return
+// values are nil, the OCSP status may be assumed OCSPUnknown.
+func GetOCSPForCert(bundle []byte) ([]byte, *ocsp.Response, error) {
+ certificates, err := parsePEMBundle(bundle)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // We expect the certificate slice to be ordered downwards the chain.
+ // SRV CRT -> CA. We need to pull the leaf and issuer certs out of it,
+ // which should always be the first two certificates. If there's no
+ // OCSP server listed in the leaf cert, there's nothing to do. And if
+ // we have only one certificate so far, we need to get the issuer cert.
+ issuedCert := certificates[0]
+ if len(issuedCert.OCSPServer) == 0 {
+ return nil, nil, errors.New("no OCSP server specified in cert")
+ }
+ if len(certificates) == 1 {
+ // TODO: build fallback. If this fails, check the remaining array entries.
+ if len(issuedCert.IssuingCertificateURL) == 0 {
+ return nil, nil, errors.New("no issuing certificate URL")
+ }
+
+ resp, err := httpGet(issuedCert.IssuingCertificateURL[0])
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ issuerCert, err := x509.ParseCertificate(issuerBytes)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Insert it into the slice on position 0
+ // We want it ordered right SRV CRT -> CA
+ certificates = append(certificates, issuerCert)
+ }
+ issuerCert := certificates[1]
+
+ // Finally kick off the OCSP request.
+ ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ reader := bytes.NewReader(ocspReq)
+ req, err := httpPost(issuedCert.OCSPServer[0], "application/ocsp-request", reader)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer req.Body.Close()
+
+ ocspResBytes, err := ioutil.ReadAll(limitReader(req.Body, 1024*1024))
+ ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if ocspRes.Certificate == nil {
+ err = ocspRes.CheckSignatureFrom(issuerCert)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return ocspResBytes, ocspRes, nil
+}
+
+func getKeyAuthorization(token string, key interface{}) (string, error) {
+ var publicKey crypto.PublicKey
+ switch k := key.(type) {
+ case *ecdsa.PrivateKey:
+ publicKey = k.Public()
+ case *rsa.PrivateKey:
+ publicKey = k.Public()
+ }
+
+ // Generate the Key Authorization for the challenge
+ jwk := keyAsJWK(publicKey)
+ if jwk == nil {
+ return "", errors.New("Could not generate JWK from key.")
+ }
+ thumbBytes, err := jwk.Thumbprint(crypto.SHA256)
+ if err != nil {
+ return "", err
+ }
+
+ // unpad the base64URL
+ keyThumb := base64.URLEncoding.EncodeToString(thumbBytes)
+ index := strings.Index(keyThumb, "=")
+ if index != -1 {
+ keyThumb = keyThumb[:index]
+ }
+
+ return token + "." + keyThumb, nil
+}
+
+// parsePEMBundle parses a certificate bundle from top to bottom and returns
+// a slice of x509 certificates. This function will error if no certificates are found.
+func parsePEMBundle(bundle []byte) ([]*x509.Certificate, error) {
+ var certificates []*x509.Certificate
+ var certDERBlock *pem.Block
+
+ for {
+ certDERBlock, bundle = pem.Decode(bundle)
+ if certDERBlock == nil {
+ break
+ }
+
+ if certDERBlock.Type == "CERTIFICATE" {
+ cert, err := x509.ParseCertificate(certDERBlock.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ certificates = append(certificates, cert)
+ }
+ }
+
+ if len(certificates) == 0 {
+ return nil, errors.New("No certificates were found while parsing the bundle.")
+ }
+
+ return certificates, nil
+}
+
+func parsePEMPrivateKey(key []byte) (crypto.PrivateKey, error) {
+ keyBlock, _ := pem.Decode(key)
+
+ switch keyBlock.Type {
+ case "RSA PRIVATE KEY":
+ return x509.ParsePKCS1PrivateKey(keyBlock.Bytes)
+ case "EC PRIVATE KEY":
+ return x509.ParseECPrivateKey(keyBlock.Bytes)
+ default:
+ return nil, errors.New("Unknown PEM header value")
+ }
+}
+
+func generatePrivateKey(keyType KeyType) (crypto.PrivateKey, error) {
+
+ switch keyType {
+ case EC256:
+ return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ case EC384:
+ return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
+ case RSA2048:
+ return rsa.GenerateKey(rand.Reader, 2048)
+ case RSA4096:
+ return rsa.GenerateKey(rand.Reader, 4096)
+ case RSA8192:
+ return rsa.GenerateKey(rand.Reader, 8192)
+ }
+
+ return nil, fmt.Errorf("Invalid KeyType: %s", keyType)
+}
+
+func generateCsr(privateKey crypto.PrivateKey, domain string, san []string) ([]byte, error) {
+ template := x509.CertificateRequest{
+ Subject: pkix.Name{
+ CommonName: domain,
+ },
+ }
+
+ if len(san) > 0 {
+ template.DNSNames = san
+ }
+
+ return x509.CreateCertificateRequest(rand.Reader, &template, privateKey)
+}
+
+func pemEncode(data interface{}) []byte {
+ var pemBlock *pem.Block
+ switch key := data.(type) {
+ case *ecdsa.PrivateKey:
+ keyBytes, _ := x509.MarshalECPrivateKey(key)
+ pemBlock = &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}
+ case *rsa.PrivateKey:
+ pemBlock = &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}
+ break
+ case derCertificateBytes:
+ pemBlock = &pem.Block{Type: "CERTIFICATE", Bytes: []byte(data.(derCertificateBytes))}
+ }
+
+ return pem.EncodeToMemory(pemBlock)
+}
+
+func pemDecode(data []byte) (*pem.Block, error) {
+ pemBlock, _ := pem.Decode(data)
+ if pemBlock == nil {
+ return nil, fmt.Errorf("Pem decode did not yield a valid block. Is the certificate in the right format?")
+ }
+
+ return pemBlock, nil
+}
+
+func pemDecodeTox509(pem []byte) (*x509.Certificate, error) {
+ pemBlock, err := pemDecode(pem)
+ if pemBlock == nil {
+ return nil, err
+ }
+
+ return x509.ParseCertificate(pemBlock.Bytes)
+}
+
+// GetPEMCertExpiration returns the "NotAfter" date of a PEM encoded certificate.
+// The certificate has to be PEM encoded. Any other encodings like DER will fail.
+func GetPEMCertExpiration(cert []byte) (time.Time, error) {
+ pemBlock, err := pemDecode(cert)
+ if pemBlock == nil {
+ return time.Time{}, err
+ }
+
+ return getCertExpiration(pemBlock.Bytes)
+}
+
+// getCertExpiration returns the "NotAfter" date of a DER encoded certificate.
+func getCertExpiration(cert []byte) (time.Time, error) {
+ pCert, err := x509.ParseCertificate(cert)
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ return pCert.NotAfter, nil
+}
+
+func generatePemCert(privKey *rsa.PrivateKey, domain string) ([]byte, error) {
+ derBytes, err := generateDerCert(privKey, time.Time{}, domain)
+ if err != nil {
+ return nil, err
+ }
+
+ return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}), nil
+}
+
+func generateDerCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]byte, error) {
+ serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+ serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
+ if err != nil {
+ return nil, err
+ }
+
+ if expiration.IsZero() {
+ expiration = time.Now().Add(365)
+ }
+
+ template := x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: pkix.Name{
+ CommonName: "ACME Challenge TEMP",
+ },
+ NotBefore: time.Now(),
+ NotAfter: expiration,
+
+ KeyUsage: x509.KeyUsageKeyEncipherment,
+ BasicConstraintsValid: true,
+ DNSNames: []string{domain},
+ }
+
+ return x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey)
+}
+
+func limitReader(rd io.ReadCloser, numBytes int64) io.ReadCloser {
+ return http.MaxBytesReader(nil, rd, numBytes)
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go
new file mode 100644
index 00000000..d2fc5088
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go
@@ -0,0 +1,93 @@
+package acme
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/rsa"
+ "testing"
+ "time"
+)
+
+func TestGeneratePrivateKey(t *testing.T) {
+ key, err := generatePrivateKey(RSA2048)
+ if err != nil {
+ t.Error("Error generating private key:", err)
+ }
+ if key == nil {
+ t.Error("Expected key to not be nil, but it was")
+ }
+}
+
+func TestGenerateCSR(t *testing.T) {
+ key, err := rsa.GenerateKey(rand.Reader, 512)
+ if err != nil {
+ t.Fatal("Error generating private key:", err)
+ }
+
+ csr, err := generateCsr(key, "fizz.buzz", nil)
+ if err != nil {
+ t.Error("Error generating CSR:", err)
+ }
+ if csr == nil || len(csr) == 0 {
+ t.Error("Expected CSR with data, but it was nil or length 0")
+ }
+}
+
+func TestPEMEncode(t *testing.T) {
+ buf := bytes.NewBufferString("TestingRSAIsSoMuchFun")
+
+ reader := MockRandReader{b: buf}
+ key, err := rsa.GenerateKey(reader, 32)
+ if err != nil {
+ t.Fatal("Error generating private key:", err)
+ }
+
+ data := pemEncode(key)
+
+ if data == nil {
+ t.Fatal("Expected result to not be nil, but it was")
+ }
+ if len(data) != 127 {
+ t.Errorf("Expected PEM encoding to be length 127, but it was %d", len(data))
+ }
+}
+
+func TestPEMCertExpiration(t *testing.T) {
+ privKey, err := generatePrivateKey(RSA2048)
+ if err != nil {
+ t.Fatal("Error generating private key:", err)
+ }
+
+ expiration := time.Now().Add(365)
+ expiration = expiration.Round(time.Second)
+ certBytes, err := generateDerCert(privKey.(*rsa.PrivateKey), expiration, "test.com")
+ if err != nil {
+ t.Fatal("Error generating cert:", err)
+ }
+
+ buf := bytes.NewBufferString("TestingRSAIsSoMuchFun")
+
+ // Some random string should return an error.
+ if ctime, err := GetPEMCertExpiration(buf.Bytes()); err == nil {
+ t.Errorf("Expected getCertExpiration to return an error for garbage string but returned %v", ctime)
+ }
+
+ // A DER encoded certificate should return an error.
+ if _, err := GetPEMCertExpiration(certBytes); err == nil {
+ t.Errorf("Expected getCertExpiration to return an error for DER certificates but returned none.")
+ }
+
+ // A PEM encoded certificate should work ok.
+ pemCert := pemEncode(derCertificateBytes(certBytes))
+ if ctime, err := GetPEMCertExpiration(pemCert); err != nil || !ctime.Equal(expiration.UTC()) {
+ t.Errorf("Expected getCertExpiration to return %v but returned %v. Error: %v", expiration, ctime, err)
+ }
+}
+
+type MockRandReader struct {
+ b *bytes.Buffer
+}
+
+func (r MockRandReader) Read(p []byte) (int, error) {
+ return r.b.Read(p)
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go
new file mode 100644
index 00000000..b32561a3
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go
@@ -0,0 +1,73 @@
+package acme
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strings"
+)
+
+const (
+ tosAgreementError = "Must agree to subscriber agreement before any further actions"
+)
+
+// RemoteError is the base type for all errors specific to the ACME protocol.
+type RemoteError struct {
+ StatusCode int `json:"status,omitempty"`
+ Type string `json:"type"`
+ Detail string `json:"detail"`
+}
+
+func (e RemoteError) Error() string {
+ return fmt.Sprintf("acme: Error %d - %s - %s", e.StatusCode, e.Type, e.Detail)
+}
+
+// TOSError represents the error which is returned if the user needs to
+// accept the TOS.
+// TODO: include the new TOS url if we can somehow obtain it.
+type TOSError struct {
+ RemoteError
+}
+
+type domainError struct {
+ Domain string
+ Error error
+}
+
+type challengeError struct {
+ RemoteError
+ records []validationRecord
+}
+
+func (c challengeError) Error() string {
+
+ var errStr string
+ for _, validation := range c.records {
+ errStr = errStr + fmt.Sprintf("\tValidation for %s:%s\n\tResolved to:\n\t\t%s\n\tUsed: %s\n\n",
+ validation.Hostname, validation.Port, strings.Join(validation.ResolvedAddresses, "\n\t\t"), validation.UsedAddress)
+ }
+
+ return fmt.Sprintf("%s\nError Detail:\n%s", c.RemoteError.Error(), errStr)
+}
+
+func handleHTTPError(resp *http.Response) error {
+ var errorDetail RemoteError
+ decoder := json.NewDecoder(resp.Body)
+ err := decoder.Decode(&errorDetail)
+ if err != nil {
+ return err
+ }
+
+ errorDetail.StatusCode = resp.StatusCode
+
+ // Check for errors we handle specifically
+ if errorDetail.StatusCode == http.StatusForbidden && errorDetail.Detail == tosAgreementError {
+ return TOSError{errorDetail}
+ }
+
+ return errorDetail
+}
+
+func handleChallengeError(chlng challenge) error {
+ return challengeError{chlng.Error, chlng.ValidationRecords}
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go
new file mode 100644
index 00000000..410aead6
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go
@@ -0,0 +1,117 @@
+package acme
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// UserAgent (if non-empty) will be tacked onto the User-Agent string in requests.
+var UserAgent string
+
+// defaultClient is an HTTP client with a reasonable timeout value.
+var defaultClient = http.Client{Timeout: 10 * time.Second}
+
+const (
+ // defaultGoUserAgent is the Go HTTP package user agent string. Too
+ // bad it isn't exported. If it changes, we should update it here, too.
+ defaultGoUserAgent = "Go-http-client/1.1"
+
+ // ourUserAgent is the User-Agent of this underlying library package.
+ ourUserAgent = "xenolf-acme"
+)
+
+// httpHead performs a HEAD request with a proper User-Agent string.
+// The response body (resp.Body) is already closed when this function returns.
+func httpHead(url string) (resp *http.Response, err error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("User-Agent", userAgent())
+
+ resp, err = defaultClient.Do(req)
+ if err != nil {
+ return resp, err
+ }
+ resp.Body.Close()
+ return resp, err
+}
+
+// httpPost performs a POST request with a proper User-Agent string.
+// Callers should close resp.Body when done reading from it.
+func httpPost(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ req.Header.Set("User-Agent", userAgent())
+
+ return defaultClient.Do(req)
+}
+
+// httpGet performs a GET request with a proper User-Agent string.
+// Callers should close resp.Body when done reading from it.
+func httpGet(url string) (resp *http.Response, err error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("User-Agent", userAgent())
+
+ return defaultClient.Do(req)
+}
+
+// getJSON performs an HTTP GET request and parses the response body
+// as JSON, into the provided respBody object.
+func getJSON(uri string, respBody interface{}) (http.Header, error) {
+ resp, err := httpGet(uri)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get %q: %v", uri, err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= http.StatusBadRequest {
+ return resp.Header, handleHTTPError(resp)
+ }
+
+ return resp.Header, json.NewDecoder(resp.Body).Decode(respBody)
+}
+
+// postJSON performs an HTTP POST request and parses the response body
+// as JSON, into the provided respBody object.
+func postJSON(j *jws, uri string, reqBody, respBody interface{}) (http.Header, error) {
+ jsonBytes, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, errors.New("Failed to marshal network message...")
+ }
+
+ resp, err := j.post(uri, jsonBytes)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to post JWS message. -> %v", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= http.StatusBadRequest {
+ return resp.Header, handleHTTPError(resp)
+ }
+
+ if respBody == nil {
+ return resp.Header, nil
+ }
+
+ return resp.Header, json.NewDecoder(resp.Body).Decode(respBody)
+}
+
+// userAgent builds and returns the User-Agent string to use in requests.
+func userAgent() string {
+ ua := fmt.Sprintf("%s (%s; %s) %s %s", defaultGoUserAgent, runtime.GOOS, runtime.GOARCH, ourUserAgent, UserAgent)
+ return strings.TrimSpace(ua)
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go
new file mode 100644
index 00000000..95cb1fd8
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go
@@ -0,0 +1,41 @@
+package acme
+
+import (
+ "fmt"
+ "log"
+)
+
+type httpChallenge struct {
+ jws *jws
+ validate validateFunc
+ provider ChallengeProvider
+}
+
+// HTTP01ChallengePath returns the URL path for the `http-01` challenge
+func HTTP01ChallengePath(token string) string {
+ return "/.well-known/acme-challenge/" + token
+}
+
+func (s *httpChallenge) Solve(chlng challenge, domain string) error {
+
+ logf("[INFO][%s] acme: Trying to solve HTTP-01", domain)
+
+ // Generate the Key Authorization for the challenge
+ keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey)
+ if err != nil {
+ return err
+ }
+
+ err = s.provider.Present(domain, chlng.Token, keyAuth)
+ if err != nil {
+ return fmt.Errorf("[%s] error presenting token: %v", domain, err)
+ }
+ defer func() {
+ err := s.provider.CleanUp(domain, chlng.Token, keyAuth)
+ if err != nil {
+ log.Printf("[%s] error cleaning up: %v", domain, err)
+ }
+ }()
+
+ return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth})
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go
new file mode 100644
index 00000000..42541380
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go
@@ -0,0 +1,79 @@
+package acme
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "strings"
+)
+
+// HTTPProviderServer implements ChallengeProvider for `http-01` challenge
+// It may be instantiated without using the NewHTTPProviderServer function if
+// you want only to use the default values.
+type HTTPProviderServer struct {
+ iface string
+ port string
+ done chan bool
+ listener net.Listener
+}
+
+// NewHTTPProviderServer creates a new HTTPProviderServer on the selected interface and port.
+// Setting iface and / or port to an empty string will make the server fall back to
+// the "any" interface and port 80 respectively.
+func NewHTTPProviderServer(iface, port string) *HTTPProviderServer {
+ return &HTTPProviderServer{iface: iface, port: port}
+}
+
+// Present starts a web server and makes the token available at `HTTP01ChallengePath(token)` for web requests.
+func (s *HTTPProviderServer) Present(domain, token, keyAuth string) error {
+ if s.port == "" {
+ s.port = "80"
+ }
+
+ var err error
+ s.listener, err = net.Listen("tcp", net.JoinHostPort(s.iface, s.port))
+ if err != nil {
+ return fmt.Errorf("Could not start HTTP server for challenge -> %v", err)
+ }
+
+ s.done = make(chan bool)
+ go s.serve(domain, token, keyAuth)
+ return nil
+}
+
+// CleanUp closes the HTTP server and removes the token from `HTTP01ChallengePath(token)`
+func (s *HTTPProviderServer) CleanUp(domain, token, keyAuth string) error {
+ if s.listener == nil {
+ return nil
+ }
+ s.listener.Close()
+ <-s.done
+ return nil
+}
+
+func (s *HTTPProviderServer) serve(domain, token, keyAuth string) {
+ path := HTTP01ChallengePath(token)
+
+ // The handler validates the HOST header and request type.
+ // For validation it then writes the token the server returned with the challenge
+ mux := http.NewServeMux()
+ mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+ if strings.HasPrefix(r.Host, domain) && r.Method == "GET" {
+ w.Header().Add("Content-Type", "text/plain")
+ w.Write([]byte(keyAuth))
+ logf("[INFO][%s] Served key authentication", domain)
+ } else {
+ logf("[INFO] Received request for domain %s with method %s", r.Host, r.Method)
+ w.Write([]byte("TEST"))
+ }
+ })
+
+ httpServer := &http.Server{
+ Handler: mux,
+ }
+ // Once httpServer is shut down we don't want any lingering
+ // connections, so disable KeepAlives.
+ httpServer.SetKeepAlivesEnabled(false)
+ httpServer.Serve(s.listener)
+ s.done <- true
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go
new file mode 100644
index 00000000..fdd8f4d2
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go
@@ -0,0 +1,57 @@
+package acme
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "io/ioutil"
+ "strings"
+ "testing"
+)
+
+func TestHTTPChallenge(t *testing.T) {
+ privKey, _ := rsa.GenerateKey(rand.Reader, 512)
+ j := &jws{privKey: privKey}
+ clientChallenge := challenge{Type: HTTP01, Token: "http1"}
+ mockValidate := func(_ *jws, _, _ string, chlng challenge) error {
+ uri := "http://localhost:23457/.well-known/acme-challenge/" + chlng.Token
+ resp, err := httpGet(uri)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if want := "text/plain"; resp.Header.Get("Content-Type") != want {
+ t.Errorf("Get(%q) Content-Type: got %q, want %q", uri, resp.Header.Get("Content-Type"), want)
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+ bodyStr := string(body)
+
+ if bodyStr != chlng.KeyAuthorization {
+ t.Errorf("Get(%q) Body: got %q, want %q", uri, bodyStr, chlng.KeyAuthorization)
+ }
+
+ return nil
+ }
+ solver := &httpChallenge{jws: j, validate: mockValidate, provider: &HTTPProviderServer{port: "23457"}}
+
+ if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil {
+ t.Errorf("Solve error: got %v, want nil", err)
+ }
+}
+
+func TestHTTPChallengeInvalidPort(t *testing.T) {
+ privKey, _ := rsa.GenerateKey(rand.Reader, 128)
+ j := &jws{privKey: privKey}
+ clientChallenge := challenge{Type: HTTP01, Token: "http2"}
+ solver := &httpChallenge{jws: j, validate: stubValidate, provider: &HTTPProviderServer{port: "123456"}}
+
+ if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil {
+ t.Errorf("Solve error: got %v, want error", err)
+ } else if want := "invalid port 123456"; !strings.HasSuffix(err.Error(), want) {
+ t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want)
+ }
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go
new file mode 100644
index 00000000..33a48a33
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go
@@ -0,0 +1,100 @@
+package acme
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+)
+
+func TestHTTPHeadUserAgent(t *testing.T) {
+ var ua, method string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ua = r.Header.Get("User-Agent")
+ method = r.Method
+ }))
+ defer ts.Close()
+
+ _, err := httpHead(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if method != "HEAD" {
+ t.Errorf("Expected method to be HEAD, got %s", method)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua)
+ }
+}
+
+func TestHTTPGetUserAgent(t *testing.T) {
+ var ua, method string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ua = r.Header.Get("User-Agent")
+ method = r.Method
+ }))
+ defer ts.Close()
+
+ res, err := httpGet(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+
+ if method != "GET" {
+ t.Errorf("Expected method to be GET, got %s", method)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua)
+ }
+}
+
+func TestHTTPPostUserAgent(t *testing.T) {
+ var ua, method string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ua = r.Header.Get("User-Agent")
+ method = r.Method
+ }))
+ defer ts.Close()
+
+ res, err := httpPost(ts.URL, "text/plain", strings.NewReader("falalalala"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+
+ if method != "POST" {
+ t.Errorf("Expected method to be POST, got %s", method)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua)
+ }
+}
+
+func TestUserAgent(t *testing.T) {
+ ua := userAgent()
+
+ if !strings.Contains(ua, defaultGoUserAgent) {
+ t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua)
+ }
+ if strings.HasSuffix(ua, " ") {
+ t.Errorf("UA should not have trailing spaces; got '%s'", ua)
+ }
+
+ // customize the UA by appending a value
+ UserAgent = "MyApp/1.2.3"
+ ua = userAgent()
+ if !strings.Contains(ua, defaultGoUserAgent) {
+ t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua)
+ }
+ if !strings.Contains(ua, ourUserAgent) {
+ t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua)
+ }
+ if !strings.Contains(ua, UserAgent) {
+ t.Errorf("Expected custom UA to contain %s, got '%s'", UserAgent, ua)
+ }
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go
new file mode 100644
index 00000000..8435d0cf
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go
@@ -0,0 +1,107 @@
+package acme
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "fmt"
+ "net/http"
+
+ "gopkg.in/square/go-jose.v1"
+)
+
+type jws struct {
+ directoryURL string
+ privKey crypto.PrivateKey
+ nonces []string
+}
+
+func keyAsJWK(key interface{}) *jose.JsonWebKey {
+ switch k := key.(type) {
+ case *ecdsa.PublicKey:
+ return &jose.JsonWebKey{Key: k, Algorithm: "EC"}
+ case *rsa.PublicKey:
+ return &jose.JsonWebKey{Key: k, Algorithm: "RSA"}
+
+ default:
+ return nil
+ }
+}
+
+// Posts a JWS signed message to the specified URL
+func (j *jws) post(url string, content []byte) (*http.Response, error) {
+ signedContent, err := j.signContent(content)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := httpPost(url, "application/jose+json", bytes.NewBuffer([]byte(signedContent.FullSerialize())))
+ if err != nil {
+ return nil, err
+ }
+
+ j.getNonceFromResponse(resp)
+
+ return resp, err
+}
+
+func (j *jws) signContent(content []byte) (*jose.JsonWebSignature, error) {
+
+ var alg jose.SignatureAlgorithm
+ switch k := j.privKey.(type) {
+ case *rsa.PrivateKey:
+ alg = jose.RS256
+ case *ecdsa.PrivateKey:
+ if k.Curve == elliptic.P256() {
+ alg = jose.ES256
+ } else if k.Curve == elliptic.P384() {
+ alg = jose.ES384
+ }
+ }
+
+ signer, err := jose.NewSigner(alg, j.privKey)
+ if err != nil {
+ return nil, err
+ }
+ signer.SetNonceSource(j)
+
+ signed, err := signer.Sign(content)
+ if err != nil {
+ return nil, err
+ }
+ return signed, nil
+}
+
+func (j *jws) getNonceFromResponse(resp *http.Response) error {
+ nonce := resp.Header.Get("Replay-Nonce")
+ if nonce == "" {
+ return fmt.Errorf("Server did not respond with a proper nonce header.")
+ }
+
+ j.nonces = append(j.nonces, nonce)
+ return nil
+}
+
+func (j *jws) getNonce() error {
+ resp, err := httpHead(j.directoryURL)
+ if err != nil {
+ return err
+ }
+
+ return j.getNonceFromResponse(resp)
+}
+
+func (j *jws) Nonce() (string, error) {
+ nonce := ""
+ if len(j.nonces) == 0 {
+ err := j.getNonce()
+ if err != nil {
+ return nonce, err
+ }
+ }
+
+ nonce, j.nonces = j.nonces[len(j.nonces)-1], j.nonces[:len(j.nonces)-1]
+ return nonce, nil
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go
new file mode 100644
index 00000000..d1fac920
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go
@@ -0,0 +1,115 @@
+package acme
+
+import (
+ "time"
+
+ "gopkg.in/square/go-jose.v1"
+)
+
+type directory struct {
+ NewAuthzURL string `json:"new-authz"`
+ NewCertURL string `json:"new-cert"`
+ NewRegURL string `json:"new-reg"`
+ RevokeCertURL string `json:"revoke-cert"`
+}
+
+type recoveryKeyMessage struct {
+ Length int `json:"length,omitempty"`
+ Client jose.JsonWebKey `json:"client,omitempty"`
+ Server jose.JsonWebKey `json:"client,omitempty"`
+}
+
+type registrationMessage struct {
+ Resource string `json:"resource"`
+ Contact []string `json:"contact"`
+ // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"`
+}
+
+// Registration is returned by the ACME server after the registration
+// The client implementation should save this registration somewhere.
+type Registration struct {
+ Resource string `json:"resource,omitempty"`
+ ID int `json:"id"`
+ Key jose.JsonWebKey `json:"key"`
+ Contact []string `json:"contact"`
+ Agreement string `json:"agreement,omitempty"`
+ Authorizations string `json:"authorizations,omitempty"`
+ Certificates string `json:"certificates,omitempty"`
+ // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"`
+}
+
+// RegistrationResource represents all important informations about a registration
+// of which the client needs to keep track itself.
+type RegistrationResource struct {
+ Body Registration `json:"body,omitempty"`
+ URI string `json:"uri,omitempty"`
+ NewAuthzURL string `json:"new_authzr_uri,omitempty"`
+ TosURL string `json:"terms_of_service,omitempty"`
+}
+
+type authorizationResource struct {
+ Body authorization
+ Domain string
+ NewCertURL string
+ AuthURL string
+}
+
+type authorization struct {
+ Resource string `json:"resource,omitempty"`
+ Identifier identifier `json:"identifier"`
+ Status string `json:"status,omitempty"`
+ Expires time.Time `json:"expires,omitempty"`
+ Challenges []challenge `json:"challenges,omitempty"`
+ Combinations [][]int `json:"combinations,omitempty"`
+}
+
+type identifier struct {
+ Type string `json:"type"`
+ Value string `json:"value"`
+}
+
+type validationRecord struct {
+ URI string `json:"url,omitempty"`
+ Hostname string `json:"hostname,omitempty"`
+ Port string `json:"port,omitempty"`
+ ResolvedAddresses []string `json:"addressesResolved,omitempty"`
+ UsedAddress string `json:"addressUsed,omitempty"`
+}
+
+type challenge struct {
+ Resource string `json:"resource,omitempty"`
+ Type Challenge `json:"type,omitempty"`
+ Status string `json:"status,omitempty"`
+ URI string `json:"uri,omitempty"`
+ Token string `json:"token,omitempty"`
+ KeyAuthorization string `json:"keyAuthorization,omitempty"`
+ TLS bool `json:"tls,omitempty"`
+ Iterations int `json:"n,omitempty"`
+ Error RemoteError `json:"error,omitempty"`
+ ValidationRecords []validationRecord `json:"validationRecord,omitempty"`
+}
+
+type csrMessage struct {
+ Resource string `json:"resource,omitempty"`
+ Csr string `json:"csr"`
+ Authorizations []string `json:"authorizations"`
+}
+
+type revokeCertMessage struct {
+ Resource string `json:"resource"`
+ Certificate string `json:"certificate"`
+}
+
+// CertificateResource represents a CA issued certificate.
+// PrivateKey and Certificate are both already PEM encoded
+// and can be directly written to disk. Certificate may
+// be a certificate bundle, depending on the options supplied
+// to create it.
+type CertificateResource struct {
+ Domain string `json:"domain"`
+ CertURL string `json:"certUrl"`
+ CertStableURL string `json:"certStableUrl"`
+ AccountRef string `json:"accountRef,omitempty"`
+ PrivateKey []byte `json:"-"`
+ Certificate []byte `json:"-"`
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go
new file mode 100644
index 00000000..d177ff07
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go
@@ -0,0 +1,28 @@
+package acme
+
+import "time"
+
+// ChallengeProvider enables implementing a custom challenge
+// provider. Present presents the solution to a challenge available to
+// be solved. CleanUp will be called by the challenge if Present ends
+// in a non-error state.
+type ChallengeProvider interface {
+ Present(domain, token, keyAuth string) error
+ CleanUp(domain, token, keyAuth string) error
+}
+
+// ChallengeProviderTimeout allows for implementing a
+// ChallengeProvider where an unusually long timeout is required when
+// waiting for an ACME challenge to be satisfied, such as when
+// checking for DNS record progagation. If an implementor of a
+// ChallengeProvider provides a Timeout method, then the return values
+// of the Timeout method will be used when appropriate by the acme
+// package. The interval value is the time between checks.
+//
+// The default values used for timeout and interval are 60 seconds and
+// 2 seconds respectively. These are used when no Timeout method is
+// defined for the ChallengeProvider.
+type ChallengeProviderTimeout interface {
+ ChallengeProvider
+ Timeout() (timeout, interval time.Duration)
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go
new file mode 100644
index 00000000..f184b17a
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go
@@ -0,0 +1,73 @@
+package acme
+
+import (
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/tls"
+ "encoding/hex"
+ "fmt"
+ "log"
+)
+
+type tlsSNIChallenge struct {
+ jws *jws
+ validate validateFunc
+ provider ChallengeProvider
+}
+
+func (t *tlsSNIChallenge) Solve(chlng challenge, domain string) error {
+ // FIXME: https://github.com/ietf-wg-acme/acme/pull/22
+ // Currently we implement this challenge to track boulder, not the current spec!
+
+ logf("[INFO][%s] acme: Trying to solve TLS-SNI-01", domain)
+
+ // Generate the Key Authorization for the challenge
+ keyAuth, err := getKeyAuthorization(chlng.Token, t.jws.privKey)
+ if err != nil {
+ return err
+ }
+
+ err = t.provider.Present(domain, chlng.Token, keyAuth)
+ if err != nil {
+ return fmt.Errorf("[%s] error presenting token: %v", domain, err)
+ }
+ defer func() {
+ err := t.provider.CleanUp(domain, chlng.Token, keyAuth)
+ if err != nil {
+ log.Printf("[%s] error cleaning up: %v", domain, err)
+ }
+ }()
+ return t.validate(t.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth})
+}
+
+// TLSSNI01ChallengeCert returns a certificate and target domain for the `tls-sni-01` challenge
+func TLSSNI01ChallengeCertDomain(keyAuth string) (tls.Certificate, string, error) {
+ // generate a new RSA key for the certificates
+ tempPrivKey, err := generatePrivateKey(RSA2048)
+ if err != nil {
+ return tls.Certificate{}, "", err
+ }
+ rsaPrivKey := tempPrivKey.(*rsa.PrivateKey)
+ rsaPrivPEM := pemEncode(rsaPrivKey)
+
+ zBytes := sha256.Sum256([]byte(keyAuth))
+ z := hex.EncodeToString(zBytes[:sha256.Size])
+ domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:])
+ tempCertPEM, err := generatePemCert(rsaPrivKey, domain)
+ if err != nil {
+ return tls.Certificate{}, "", err
+ }
+
+ certificate, err := tls.X509KeyPair(tempCertPEM, rsaPrivPEM)
+ if err != nil {
+ return tls.Certificate{}, "", err
+ }
+
+ return certificate, domain, nil
+}
+
+// TLSSNI01ChallengeCert returns a certificate for the `tls-sni-01` challenge
+func TLSSNI01ChallengeCert(keyAuth string) (tls.Certificate, error) {
+ cert, _, err := TLSSNI01ChallengeCertDomain(keyAuth)
+ return cert, err
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go
new file mode 100644
index 00000000..faaf16f6
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go
@@ -0,0 +1,62 @@
+package acme
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+)
+
+// TLSProviderServer implements ChallengeProvider for `TLS-SNI-01` challenge
+// It may be instantiated without using the NewTLSProviderServer function if
+// you want only to use the default values.
+type TLSProviderServer struct {
+ iface string
+ port string
+ done chan bool
+ listener net.Listener
+}
+
+// NewTLSProviderServer creates a new TLSProviderServer on the selected interface and port.
+// Setting iface and / or port to an empty string will make the server fall back to
+// the "any" interface and port 443 respectively.
+func NewTLSProviderServer(iface, port string) *TLSProviderServer {
+ return &TLSProviderServer{iface: iface, port: port}
+}
+
+// Present makes the keyAuth available as a cert
+func (s *TLSProviderServer) Present(domain, token, keyAuth string) error {
+ if s.port == "" {
+ s.port = "443"
+ }
+
+ cert, err := TLSSNI01ChallengeCert(keyAuth)
+ if err != nil {
+ return err
+ }
+
+ tlsConf := new(tls.Config)
+ tlsConf.Certificates = []tls.Certificate{cert}
+
+ s.listener, err = tls.Listen("tcp", net.JoinHostPort(s.iface, s.port), tlsConf)
+ if err != nil {
+ return fmt.Errorf("Could not start HTTPS server for challenge -> %v", err)
+ }
+
+ s.done = make(chan bool)
+ go func() {
+ http.Serve(s.listener, nil)
+ s.done <- true
+ }()
+ return nil
+}
+
+// CleanUp closes the HTTP server.
+func (s *TLSProviderServer) CleanUp(domain, token, keyAuth string) error {
+ if s.listener == nil {
+ return nil
+ }
+ s.listener.Close()
+ <-s.done
+ return nil
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go
new file mode 100644
index 00000000..3aec7456
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go
@@ -0,0 +1,65 @@
+package acme
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/tls"
+ "encoding/hex"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func TestTLSSNIChallenge(t *testing.T) {
+ privKey, _ := rsa.GenerateKey(rand.Reader, 512)
+ j := &jws{privKey: privKey}
+ clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni1"}
+ mockValidate := func(_ *jws, _, _ string, chlng challenge) error {
+ conn, err := tls.Dial("tcp", "localhost:23457", &tls.Config{
+ InsecureSkipVerify: true,
+ })
+ if err != nil {
+ t.Errorf("Expected to connect to challenge server without an error. %s", err.Error())
+ }
+
+ // Expect the server to only return one certificate
+ connState := conn.ConnectionState()
+ if count := len(connState.PeerCertificates); count != 1 {
+ t.Errorf("Expected the challenge server to return exactly one certificate but got %d", count)
+ }
+
+ remoteCert := connState.PeerCertificates[0]
+ if count := len(remoteCert.DNSNames); count != 1 {
+ t.Errorf("Expected the challenge certificate to have exactly one DNSNames entry but had %d", count)
+ }
+
+ zBytes := sha256.Sum256([]byte(chlng.KeyAuthorization))
+ z := hex.EncodeToString(zBytes[:sha256.Size])
+ domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:])
+
+ if remoteCert.DNSNames[0] != domain {
+ t.Errorf("Expected the challenge certificate DNSName to match %s but was %s", domain, remoteCert.DNSNames[0])
+ }
+
+ return nil
+ }
+ solver := &tlsSNIChallenge{jws: j, validate: mockValidate, provider: &TLSProviderServer{port: "23457"}}
+
+ if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil {
+ t.Errorf("Solve error: got %v, want nil", err)
+ }
+}
+
+func TestTLSSNIChallengeInvalidPort(t *testing.T) {
+ privKey, _ := rsa.GenerateKey(rand.Reader, 128)
+ j := &jws{privKey: privKey}
+ clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni2"}
+ solver := &tlsSNIChallenge{jws: j, validate: stubValidate, provider: &TLSProviderServer{port: "123456"}}
+
+ if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil {
+ t.Errorf("Solve error: got %v, want error", err)
+ } else if want := "invalid port 123456"; !strings.HasSuffix(err.Error(), want) {
+ t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want)
+ }
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go
new file mode 100644
index 00000000..2fa0db30
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go
@@ -0,0 +1,29 @@
+package acme
+
+import (
+ "fmt"
+ "time"
+)
+
+// WaitFor polls the given function 'f', once every 'interval', up to 'timeout'.
+func WaitFor(timeout, interval time.Duration, f func() (bool, error)) error {
+ var lastErr string
+ timeup := time.After(timeout)
+ for {
+ select {
+ case <-timeup:
+ return fmt.Errorf("Time limit exceeded. Last error: %s", lastErr)
+ default:
+ }
+
+ stop, err := f()
+ if stop {
+ return nil
+ }
+ if err != nil {
+ lastErr = err.Error()
+ }
+
+ time.Sleep(interval)
+ }
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go
new file mode 100644
index 00000000..158af411
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go
@@ -0,0 +1,26 @@
+package acme
+
+import (
+ "testing"
+ "time"
+)
+
+func TestWaitForTimeout(t *testing.T) {
+ c := make(chan error)
+ go func() {
+ err := WaitFor(3*time.Second, 1*time.Second, func() (bool, error) {
+ return false, nil
+ })
+ c <- err
+ }()
+
+ timeout := time.After(4 * time.Second)
+ select {
+ case <-timeout:
+ t.Fatal("timeout exceeded")
+ case err := <-c:
+ if err == nil {
+ t.Errorf("expected timeout error; got %v", err)
+ }
+ }
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md
new file mode 100644
index 00000000..97e61dbb
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md
@@ -0,0 +1,10 @@
+Serious about security
+======================
+
+Square recognizes the important contributions the security research community
+can make. We therefore encourage reporting security issues with the code
+contained in this repository.
+
+If you believe you have discovered a security vulnerability, please follow the
+guidelines at .
+
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md
new file mode 100644
index 00000000..61b18365
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+# Contributing
+
+If you would like to contribute code to go-jose you can do so through GitHub by
+forking the repository and sending a pull request.
+
+When submitting code, please make every effort to follow existing conventions
+and style in order to keep the code as readable as possible. Please also make
+sure all tests pass by running `go test`, and format your code with `go fmt`.
+We also recommend using `golint` and `errcheck`.
+
+Before your code can be accepted into the project you must also sign the
+[Individual Contributor License Agreement][1].
+
+ [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md
new file mode 100644
index 00000000..fd859da7
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md
@@ -0,0 +1,209 @@
+# Go JOSE
+
+[](https://godoc.org/gopkg.in/square/go-jose.v1) [](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) [](https://travis-ci.org/square/go-jose) [](https://coveralls.io/r/square/go-jose)
+
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. For the moment, it mainly focuses on encryption
+and signing based on the JSON Web Encryption and JSON Web Signature standards.
+
+**Disclaimer**: This library contains encryption software that is subject to
+the U.S. Export Administration Regulations. You may not export, re-export,
+transfer or download this code or any part of it in violation of any United
+States law, directive or regulation. In particular this software may not be
+exported or re-exported in any form or on any media to Iran, North Sudan,
+Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
+US maintained blocked list.
+
+## Overview
+
+The implementation follows the
+[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516)
+standard (RFC 7516) and
+[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515)
+standard (RFC 7515). Tables of supported algorithms are shown below.
+The library supports both the compact and full serialization formats, and has
+optional support for multiple recipients. It also comes with a small
+command-line utility
+([`jose-util`](https://github.com/square/go-jose/tree/master/jose-util))
+for dealing with JOSE messages in a shell.
+
+**Note**: We use a forked version of the `encoding/json` package from the Go
+standard library which uses case-sensitive matching for member names (instead
+of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)).
+This is to avoid differences in interpretation of messages between go-jose and
+libraries in other languages. If you do not like this behavior, you can use the
+`std_json` build tag to disable it (though we do not recommend doing so).
+
+### Versions
+
+We use [gopkg.in](https://gopkg.in) for versioning.
+
+[Version 1](https://gopkg.in/square/go-jose.v1) is the current stable version:
+
+ import "gopkg.in/square/go-jose.v1"
+
+The interface for [go-jose.v1](https://gopkg.in/square/go-jose.v1) will remain
+backwards compatible. We're currently sketching out ideas for a new version, to
+clean up the interface a bit. If you have ideas or feature requests [please let
+us know](https://github.com/square/go-jose/issues/64)!
+
+### Supported algorithms
+
+See below for a table of supported algorithms. Algorithm identifiers match
+the names in the
+[JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
+standard where possible. The
+[Godoc reference](https://godoc.org/github.com/square/go-jose#pkg-constants)
+has a list of constants.
+
+ Key encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSA-PKCS#1v1.5 | RSA1_5
+ RSA-OAEP | RSA-OAEP, RSA-OAEP-256
+ AES key wrap | A128KW, A192KW, A256KW
+ AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
+ ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
+ ECDH-ES (direct) | ECDH-ES1
+ Direct encryption | dir1
+
+1. Not supported in multi-recipient mode
+
+ Signing / MAC | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
+ RSASSA-PSS | PS256, PS384, PS512
+ HMAC | HS256, HS384, HS512
+ ECDSA | ES256, ES384, ES512
+
+ Content encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
+ AES-GCM | A128GCM, A192GCM, A256GCM
+
+ Compression | Algorithm identifiers(s)
+ :------------------------- | -------------------------------
+ DEFLATE (RFC 1951) | DEF
+
+### Supported key types
+
+See below for a table of supported key types. These are understood by the
+library, and can be passed to corresponding functions such as `NewEncrypter` or
+`NewSigner`. Note that if you are creating a new encrypter or signer with a
+JsonWebKey, the key id of the JsonWebKey (if present) will be added to any
+resulting messages.
+
+ Algorithm(s) | Corresponding types
+ :------------------------- | -------------------------------
+ RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey)
+ ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey)
+ AES, HMAC | []byte, *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey)
+
+## Examples
+
+Encryption/decryption example using RSA:
+
+```Go
+// Generate a public/private key pair to use for this example. The library
+// also provides two utility functions (LoadPublicKey and LoadPrivateKey)
+// that can be used to load keys from PEM/DER-encoded data.
+privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+if err != nil {
+ panic(err)
+}
+
+// Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would
+// indicate that the selected algorithm(s) are not currently supported.
+publicKey := &privateKey.PublicKey
+encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey)
+if err != nil {
+ panic(err)
+}
+
+// Encrypt a sample plaintext. Calling the encrypter returns an encrypted
+// JWE object, which can then be serialized for output afterwards. An error
+// would indicate a problem in an underlying cryptographic primitive.
+var plaintext = []byte("Lorem ipsum dolor sit amet")
+object, err := encrypter.Encrypt(plaintext)
+if err != nil {
+ panic(err)
+}
+
+// Serialize the encrypted object using the full serialization format.
+// Alternatively you can also use the compact format here by calling
+// object.CompactSerialize() instead.
+serialized := object.FullSerialize()
+
+// Parse the serialized, encrypted JWE object. An error would indicate that
+// the given input did not represent a valid message.
+object, err = ParseEncrypted(serialized)
+if err != nil {
+ panic(err)
+}
+
+// Now we can decrypt and get back our original plaintext. An error here
+// would indicate the the message failed to decrypt, e.g. because the auth
+// tag was broken or the message was tampered with.
+decrypted, err := object.Decrypt(privateKey)
+if err != nil {
+ panic(err)
+}
+
+fmt.Printf(string(decrypted))
+// output: Lorem ipsum dolor sit amet
+```
+
+Signing/verification example using RSA:
+
+```Go
+// Generate a public/private key pair to use for this example. The library
+// also provides two utility functions (LoadPublicKey and LoadPrivateKey)
+// that can be used to load keys from PEM/DER-encoded data.
+privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+if err != nil {
+ panic(err)
+}
+
+// Instantiate a signer using RSASSA-PSS (SHA512) with the given private key.
+signer, err := NewSigner(PS512, privateKey)
+if err != nil {
+ panic(err)
+}
+
+// Sign a sample payload. Calling the signer returns a protected JWS object,
+// which can then be serialized for output afterwards. An error would
+// indicate a problem in an underlying cryptographic primitive.
+var payload = []byte("Lorem ipsum dolor sit amet")
+object, err := signer.Sign(payload)
+if err != nil {
+ panic(err)
+}
+
+// Serialize the encrypted object using the full serialization format.
+// Alternatively you can also use the compact format here by calling
+// object.CompactSerialize() instead.
+serialized := object.FullSerialize()
+
+// Parse the serialized, protected JWS object. An error would indicate that
+// the given input did not represent a valid message.
+object, err = ParseSigned(serialized)
+if err != nil {
+ panic(err)
+}
+
+// Now we can verify the signature on the payload. An error here would
+// indicate the the message failed to verify, e.g. because the signature was
+// broken or the message was tampered with.
+output, err := object.Verify(&privateKey.PublicKey)
+if err != nil {
+ panic(err)
+}
+
+fmt.Printf(string(output))
+// output: Lorem ipsum dolor sit amet
+```
+
+More examples can be found in the [Godoc
+reference](https://godoc.org/github.com/square/go-jose) for this package. The
+[`jose-util`](https://github.com/square/go-jose/tree/master/jose-util)
+subdirectory also contains a small command-line utility which might
+be useful as an example.
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go
new file mode 100644
index 00000000..381156ca
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go
@@ -0,0 +1,498 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto"
+ "crypto/aes"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "math/big"
+
+ "gopkg.in/square/go-jose.v1/cipher"
+)
+
+// A generic RSA-based encrypter/verifier
+type rsaEncrypterVerifier struct {
+ publicKey *rsa.PublicKey
+}
+
+// A generic RSA-based decrypter/signer
+type rsaDecrypterSigner struct {
+ privateKey *rsa.PrivateKey
+}
+
+// A generic EC-based encrypter/verifier
+type ecEncrypterVerifier struct {
+ publicKey *ecdsa.PublicKey
+}
+
+// A key generator for ECDH-ES
+type ecKeyGenerator struct {
+ size int
+ algID string
+ publicKey *ecdsa.PublicKey
+}
+
+// A generic EC-based decrypter/signer
+type ecDecrypterSigner struct {
+ privateKey *ecdsa.PrivateKey
+}
+
+// newRSARecipient creates recipientKeyInfo based on the given key.
+func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case RSA1_5, RSA_OAEP, RSA_OAEP_256:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &rsaEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newRSASigner creates a recipientSigInfo based on the given key.
+func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case RS256, RS384, RS512, PS256, PS384, PS512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: &JsonWebKey{
+ Key: &privateKey.PublicKey,
+ },
+ signer: &rsaDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// newECDHRecipient creates recipientKeyInfo based on the given key.
+func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &ecEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newECDSASigner creates a recipientSigInfo based on the given key.
+func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case ES256, ES384, ES512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: &JsonWebKey{
+ Key: &privateKey.PublicKey,
+ },
+ signer: &ecDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// Encrypt the given payload and update the object.
+func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ encryptedKey, err := ctx.encrypt(cek, alg)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: encryptedKey,
+ header: &rawHeader{},
+ }, nil
+}
+
+// Encrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
+ switch alg {
+ case RSA1_5:
+ return rsa.EncryptPKCS1v15(randReader, ctx.publicKey, cek)
+ case RSA_OAEP:
+ return rsa.EncryptOAEP(sha1.New(), randReader, ctx.publicKey, cek, []byte{})
+ case RSA_OAEP_256:
+ return rsa.EncryptOAEP(sha256.New(), randReader, ctx.publicKey, cek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ return ctx.decrypt(recipient.encryptedKey, KeyAlgorithm(headers.Alg), generator)
+}
+
+// Decrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
+ // Note: The random reader on decrypt operations is only used for blinding,
+ // so stubbing is meanlingless (hence the direct use of rand.Reader).
+ switch alg {
+ case RSA1_5:
+ defer func() {
+ // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
+ // because of an index out of bounds error, which we want to ignore.
+ // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
+ // only exists for preventing crashes with unpatched versions.
+ // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
+ // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
+ _ = recover()
+ }()
+
+ // Perform some input validation.
+ keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
+ if keyBytes != len(jek) {
+ // Input size is incorrect, the encrypted payload should always match
+ // the size of the public modulus (e.g. using a 2048 bit key will
+ // produce 256 bytes of output). Reject this since it's invalid input.
+ return nil, ErrCryptoFailure
+ }
+
+ cek, _, err := generator.genKey()
+ if err != nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
+ // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
+ // the Million Message Attack on Cryptographic Message Syntax". We are
+ // therefore deliberatly ignoring errors here.
+ _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
+
+ return cek, nil
+ case RSA_OAEP:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ case RSA_OAEP_256:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Sign the given payload
+func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ var out []byte
+ var err error
+
+ switch alg {
+ case RS256, RS384, RS512:
+ out, err = rsa.SignPKCS1v15(randReader, ctx.privateKey, hash, hashed)
+ case PS256, PS384, PS512:
+ out, err = rsa.SignPSS(randReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ })
+ }
+
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ switch alg {
+ case RS256, RS384, RS512:
+ return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
+ case PS256, PS384, PS512:
+ return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
+ }
+
+ return ErrUnsupportedAlgorithm
+}
+
+// Encrypt the given payload and update the object.
+func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ switch alg {
+ case ECDH_ES:
+ // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
+ return recipientInfo{
+ header: &rawHeader{},
+ }, nil
+ case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ generator := ecKeyGenerator{
+ algID: string(alg),
+ publicKey: ctx.publicKey,
+ }
+
+ switch alg {
+ case ECDH_ES_A128KW:
+ generator.size = 16
+ case ECDH_ES_A192KW:
+ generator.size = 24
+ case ECDH_ES_A256KW:
+ generator.size = 32
+ }
+
+ kek, header, err := generator.genKey()
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ block, err := aes.NewCipher(kek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ jek, err := josecipher.KeyWrap(block, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: jek,
+ header: &header,
+ }, nil
+}
+
+// Get key size for EC key generator
+func (ctx ecKeyGenerator) keySize() int {
+ return ctx.size
+}
+
+// Get a content encryption key for ECDH-ES
+func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, randReader)
+ if err != nil {
+ return nil, rawHeader{}, err
+ }
+
+ out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
+
+ headers := rawHeader{
+ Epk: &JsonWebKey{
+ Key: &priv.PublicKey,
+ },
+ }
+
+ return out, headers, nil
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ if headers.Epk == nil {
+ return nil, errors.New("square/go-jose: missing epk header")
+ }
+
+ publicKey, ok := headers.Epk.Key.(*ecdsa.PublicKey)
+ if publicKey == nil || !ok {
+ return nil, errors.New("square/go-jose: invalid epk header")
+ }
+
+ apuData := headers.Apu.bytes()
+ apvData := headers.Apv.bytes()
+
+ deriveKey := func(algID string, size int) []byte {
+ return josecipher.DeriveECDHES(algID, apuData, apvData, ctx.privateKey, publicKey, size)
+ }
+
+ var keySize int
+
+ switch KeyAlgorithm(headers.Alg) {
+ case ECDH_ES:
+ // ECDH-ES uses direct key agreement, no key unwrapping necessary.
+ return deriveKey(string(headers.Enc), generator.keySize()), nil
+ case ECDH_ES_A128KW:
+ keySize = 16
+ case ECDH_ES_A192KW:
+ keySize = 24
+ case ECDH_ES_A256KW:
+ keySize = 32
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ key := deriveKey(headers.Alg, keySize)
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return josecipher.KeyUnwrap(block, recipient.encryptedKey)
+}
+
+// Sign the given payload
+func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var expectedBitSize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ expectedBitSize = 256
+ hash = crypto.SHA256
+ case ES384:
+ expectedBitSize = 384
+ hash = crypto.SHA384
+ case ES512:
+ expectedBitSize = 521
+ hash = crypto.SHA512
+ }
+
+ curveBits := ctx.privateKey.Curve.Params().BitSize
+ if expectedBitSize != curveBits {
+ return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r, s, err := ecdsa.Sign(randReader, ctx.privateKey, hashed)
+ if err != nil {
+ return Signature{}, err
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes += 1
+ }
+
+ // We serialize the outpus (r and s) into big-endian byte arrays and pad
+ // them with zeros on the left to make sure the sizes work out. Both arrays
+ // must be keyBytes long, and the output must be 2*keyBytes long.
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := s.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ out := append(rBytesPadded, sBytesPadded...)
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var keySize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ keySize = 32
+ hash = crypto.SHA256
+ case ES384:
+ keySize = 48
+ hash = crypto.SHA384
+ case ES512:
+ keySize = 66
+ hash = crypto.SHA512
+ }
+
+ if len(signature) != 2*keySize {
+ return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r := big.NewInt(0).SetBytes(signature[:keySize])
+ s := big.NewInt(0).SetBytes(signature[keySize:])
+
+ match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
+ if !match {
+ return errors.New("square/go-jose: ecdsa signature failed to verify")
+ }
+
+ return nil
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go
new file mode 100644
index 00000000..1c8c8b34
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go
@@ -0,0 +1,431 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/rsa"
+ "errors"
+ "io"
+ "math/big"
+ "testing"
+)
+
+func TestVectorsRSA(t *testing.T) {
+ // Sources:
+ // http://www.emc.com/emc-plus/rsa-labs/standards-initiatives/pkcs-rsa-cryptography-standard.htm
+ // ftp://ftp.rsa.com/pub/rsalabs/tmp/pkcs1v15crypt-vectors.txt
+ priv := &rsa.PrivateKey{
+ PublicKey: rsa.PublicKey{
+ N: fromHexInt(`
+ a8b3b284af8eb50b387034a860f146c4919f318763cd6c5598c8
+ ae4811a1e0abc4c7e0b082d693a5e7fced675cf4668512772c0c
+ bc64a742c6c630f533c8cc72f62ae833c40bf25842e984bb78bd
+ bf97c0107d55bdb662f5c4e0fab9845cb5148ef7392dd3aaff93
+ ae1e6b667bb3d4247616d4f5ba10d4cfd226de88d39f16fb`),
+ E: 65537,
+ },
+ D: fromHexInt(`
+ 53339cfdb79fc8466a655c7316aca85c55fd8f6dd898fdaf1195
+ 17ef4f52e8fd8e258df93fee180fa0e4ab29693cd83b152a553d
+ 4ac4d1812b8b9fa5af0e7f55fe7304df41570926f3311f15c4d6
+ 5a732c483116ee3d3d2d0af3549ad9bf7cbfb78ad884f84d5beb
+ 04724dc7369b31def37d0cf539e9cfcdd3de653729ead5d1`),
+ Primes: []*big.Int{
+ fromHexInt(`
+ d32737e7267ffe1341b2d5c0d150a81b586fb3132bed2f8d5262
+ 864a9cb9f30af38be448598d413a172efb802c21acf1c11c520c
+ 2f26a471dcad212eac7ca39d`),
+ fromHexInt(`
+ cc8853d1d54da630fac004f471f281c7b8982d8224a490edbeb3
+ 3d3e3d5cc93c4765703d1dd791642f1f116a0dd852be2419b2af
+ 72bfe9a030e860b0288b5d77`),
+ },
+ }
+
+ input := fromHexBytes(
+ "6628194e12073db03ba94cda9ef9532397d50dba79b987004afefe34")
+
+ expectedPKCS := fromHexBytes(`
+ 50b4c14136bd198c2f3c3ed243fce036e168d56517984a263cd66492b808
+ 04f169d210f2b9bdfb48b12f9ea05009c77da257cc600ccefe3a6283789d
+ 8ea0e607ac58e2690ec4ebc10146e8cbaa5ed4d5cce6fe7b0ff9efc1eabb
+ 564dbf498285f449ee61dd7b42ee5b5892cb90601f30cda07bf26489310b
+ cd23b528ceab3c31`)
+
+ expectedOAEP := fromHexBytes(`
+ 354fe67b4a126d5d35fe36c777791a3f7ba13def484e2d3908aff722fad4
+ 68fb21696de95d0be911c2d3174f8afcc201035f7b6d8e69402de5451618
+ c21a535fa9d7bfc5b8dd9fc243f8cf927db31322d6e881eaa91a996170e6
+ 57a05a266426d98c88003f8477c1227094a0d9fa1e8c4024309ce1ecccb5
+ 210035d47ac72e8a`)
+
+ // Mock random reader
+ randReader = bytes.NewReader(fromHexBytes(`
+ 017341ae3875d5f87101f8cc4fa9b9bc156bb04628fccdb2f4f11e905bd3
+ a155d376f593bd7304210874eba08a5e22bcccb4c9d3882a93a54db022f5
+ 03d16338b6b7ce16dc7f4bbf9a96b59772d6606e9747c7649bf9e083db98
+ 1884a954ab3c6f18b776ea21069d69776a33e96bad48e1dda0a5ef`))
+ defer resetRandReader()
+
+ // RSA-PKCS1v1.5 encrypt
+ enc := new(rsaEncrypterVerifier)
+ enc.publicKey = &priv.PublicKey
+ encryptedPKCS, err := enc.encrypt(input, RSA1_5)
+ if err != nil {
+ t.Error("Encryption failed:", err)
+ return
+ }
+
+ if bytes.Compare(encryptedPKCS, expectedPKCS) != 0 {
+ t.Error("Output does not match expected value (PKCS1v1.5)")
+ }
+
+ // RSA-OAEP encrypt
+ encryptedOAEP, err := enc.encrypt(input, RSA_OAEP)
+ if err != nil {
+ t.Error("Encryption failed:", err)
+ return
+ }
+
+ if bytes.Compare(encryptedOAEP, expectedOAEP) != 0 {
+ t.Error("Output does not match expected value (OAEP)")
+ }
+
+ // Need fake cipher for PKCS1v1.5 decrypt
+ resetRandReader()
+ aes := newAESGCM(len(input))
+
+ keygen := randomKeyGenerator{
+ size: aes.keySize(),
+ }
+
+ // RSA-PKCS1v1.5 decrypt
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = priv
+ decryptedPKCS, err := dec.decrypt(encryptedPKCS, RSA1_5, keygen)
+ if err != nil {
+ t.Error("Decryption failed:", err)
+ return
+ }
+
+ if bytes.Compare(input, decryptedPKCS) != 0 {
+ t.Error("Output does not match expected value (PKCS1v1.5)")
+ }
+
+ // RSA-OAEP decrypt
+ decryptedOAEP, err := dec.decrypt(encryptedOAEP, RSA_OAEP, keygen)
+ if err != nil {
+ t.Error("decryption failed:", err)
+ return
+ }
+
+ if bytes.Compare(input, decryptedOAEP) != 0 {
+ t.Error("output does not match expected value (OAEP)")
+ }
+}
+
+func TestInvalidAlgorithmsRSA(t *testing.T) {
+ _, err := newRSARecipient("XYZ", nil)
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ _, err = newRSASigner("XYZ", nil)
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ enc := new(rsaEncrypterVerifier)
+ enc.publicKey = &rsaTestKey.PublicKey
+ _, err = enc.encryptKey([]byte{}, "XYZ")
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ err = enc.verifyPayload([]byte{}, []byte{}, "XYZ")
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = rsaTestKey
+ _, err = dec.decrypt(make([]byte, 256), "XYZ", randomKeyGenerator{size: 16})
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ _, err = dec.signPayload([]byte{}, "XYZ")
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+}
+
+type failingKeyGenerator struct{}
+
+func (ctx failingKeyGenerator) keySize() int {
+ return 0
+}
+
+func (ctx failingKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ return nil, rawHeader{}, errors.New("failed to generate key")
+}
+
+func TestPKCSKeyGeneratorFailure(t *testing.T) {
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = rsaTestKey
+ generator := failingKeyGenerator{}
+ _, err := dec.decrypt(make([]byte, 256), RSA1_5, generator)
+ if err != ErrCryptoFailure {
+ t.Error("should return error on invalid algorithm")
+ }
+}
+
+func TestInvalidAlgorithmsEC(t *testing.T) {
+ _, err := newECDHRecipient("XYZ", nil)
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ _, err = newECDSASigner("XYZ", nil)
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+
+ enc := new(ecEncrypterVerifier)
+ enc.publicKey = &ecTestKey256.PublicKey
+ _, err = enc.encryptKey([]byte{}, "XYZ")
+ if err != ErrUnsupportedAlgorithm {
+ t.Error("should return error on invalid algorithm")
+ }
+}
+
+func TestInvalidECKeyGen(t *testing.T) {
+ gen := ecKeyGenerator{
+ size: 16,
+ algID: "A128GCM",
+ publicKey: &ecTestKey256.PublicKey,
+ }
+
+ if gen.keySize() != 16 {
+ t.Error("ec key generator reported incorrect key size")
+ }
+
+ _, _, err := gen.genKey()
+ if err != nil {
+ t.Error("ec key generator failed to generate key", err)
+ }
+}
+
+func TestInvalidECDecrypt(t *testing.T) {
+ dec := ecDecrypterSigner{
+ privateKey: ecTestKey256,
+ }
+
+ generator := randomKeyGenerator{size: 16}
+
+ // Missing epk header
+ headers := rawHeader{
+ Alg: string(ECDH_ES),
+ }
+
+ _, err := dec.decryptKey(headers, nil, generator)
+ if err == nil {
+ t.Error("ec decrypter accepted object with missing epk header")
+ }
+
+ // Invalid epk header
+ headers.Epk = &JsonWebKey{}
+
+ _, err = dec.decryptKey(headers, nil, generator)
+ if err == nil {
+ t.Error("ec decrypter accepted object with invalid epk header")
+ }
+}
+
+func TestDecryptWithIncorrectSize(t *testing.T) {
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = priv
+ aes := newAESGCM(16)
+
+ keygen := randomKeyGenerator{
+ size: aes.keySize(),
+ }
+
+ payload := make([]byte, 254)
+ _, err = dec.decrypt(payload, RSA1_5, keygen)
+ if err == nil {
+ t.Error("Invalid payload size should return error")
+ }
+
+ payload = make([]byte, 257)
+ _, err = dec.decrypt(payload, RSA1_5, keygen)
+ if err == nil {
+ t.Error("Invalid payload size should return error")
+ }
+}
+
+func TestPKCSDecryptNeverFails(t *testing.T) {
+ // We don't want RSA-PKCS1 v1.5 decryption to ever fail, in order to prevent
+ // side-channel timing attacks (Bleichenbacher attack in particular).
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = priv
+ aes := newAESGCM(16)
+
+ keygen := randomKeyGenerator{
+ size: aes.keySize(),
+ }
+
+ for i := 1; i < 50; i++ {
+ payload := make([]byte, 256)
+ _, err := io.ReadFull(rand.Reader, payload)
+ if err != nil {
+ t.Error("Unable to get random data:", err)
+ return
+ }
+ _, err = dec.decrypt(payload, RSA1_5, keygen)
+ if err != nil {
+ t.Error("PKCS1v1.5 decrypt should never fail:", err)
+ return
+ }
+ }
+}
+
+func BenchmarkPKCSDecryptWithValidPayloads(b *testing.B) {
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ panic(err)
+ }
+
+ enc := new(rsaEncrypterVerifier)
+ enc.publicKey = &priv.PublicKey
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = priv
+ aes := newAESGCM(32)
+
+ b.StopTimer()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ plaintext := make([]byte, 32)
+ _, err = io.ReadFull(rand.Reader, plaintext)
+ if err != nil {
+ panic(err)
+ }
+
+ ciphertext, err := enc.encrypt(plaintext, RSA1_5)
+ if err != nil {
+ panic(err)
+ }
+
+ keygen := randomKeyGenerator{
+ size: aes.keySize(),
+ }
+
+ b.StartTimer()
+ _, err = dec.decrypt(ciphertext, RSA1_5, keygen)
+ b.StopTimer()
+ if err != nil {
+ panic(err)
+ }
+ }
+}
+
+func BenchmarkPKCSDecryptWithInvalidPayloads(b *testing.B) {
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ panic(err)
+ }
+
+ enc := new(rsaEncrypterVerifier)
+ enc.publicKey = &priv.PublicKey
+ dec := new(rsaDecrypterSigner)
+ dec.privateKey = priv
+ aes := newAESGCM(16)
+
+ keygen := randomKeyGenerator{
+ size: aes.keySize(),
+ }
+
+ b.StopTimer()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ plaintext := make([]byte, 16)
+ _, err = io.ReadFull(rand.Reader, plaintext)
+ if err != nil {
+ panic(err)
+ }
+
+ ciphertext, err := enc.encrypt(plaintext, RSA1_5)
+ if err != nil {
+ panic(err)
+ }
+
+ // Do some simple scrambling
+ ciphertext[128] ^= 0xFF
+
+ b.StartTimer()
+ _, err = dec.decrypt(ciphertext, RSA1_5, keygen)
+ b.StopTimer()
+ if err != nil {
+ panic(err)
+ }
+ }
+}
+
+func TestInvalidEllipticCurve(t *testing.T) {
+ signer256 := ecDecrypterSigner{privateKey: ecTestKey256}
+ signer384 := ecDecrypterSigner{privateKey: ecTestKey384}
+ signer521 := ecDecrypterSigner{privateKey: ecTestKey521}
+
+ _, err := signer256.signPayload([]byte{}, ES384)
+ if err == nil {
+ t.Error("should not generate ES384 signature with P-256 key")
+ }
+ _, err = signer256.signPayload([]byte{}, ES512)
+ if err == nil {
+ t.Error("should not generate ES512 signature with P-256 key")
+ }
+ _, err = signer384.signPayload([]byte{}, ES256)
+ if err == nil {
+ t.Error("should not generate ES256 signature with P-384 key")
+ }
+ _, err = signer384.signPayload([]byte{}, ES512)
+ if err == nil {
+ t.Error("should not generate ES512 signature with P-384 key")
+ }
+ _, err = signer521.signPayload([]byte{}, ES256)
+ if err == nil {
+ t.Error("should not generate ES256 signature with P-521 key")
+ }
+ _, err = signer521.signPayload([]byte{}, ES384)
+ if err == nil {
+ t.Error("should not generate ES384 signature with P-521 key")
+ }
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go
new file mode 100644
index 00000000..a5c35834
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go
@@ -0,0 +1,196 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+const (
+ nonceBytes = 16
+)
+
+// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
+func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
+ keySize := len(key) / 2
+ integrityKey := key[:keySize]
+ encryptionKey := key[keySize:]
+
+ blockCipher, err := newBlockCipher(encryptionKey)
+ if err != nil {
+ return nil, err
+ }
+
+ var hash func() hash.Hash
+ switch keySize {
+ case 16:
+ hash = sha256.New
+ case 24:
+ hash = sha512.New384
+ case 32:
+ hash = sha512.New
+ }
+
+ return &cbcAEAD{
+ hash: hash,
+ blockCipher: blockCipher,
+ authtagBytes: keySize,
+ integrityKey: integrityKey,
+ }, nil
+}
+
+// An AEAD based on CBC+HMAC
+type cbcAEAD struct {
+ hash func() hash.Hash
+ authtagBytes int
+ integrityKey []byte
+ blockCipher cipher.Block
+}
+
+func (ctx *cbcAEAD) NonceSize() int {
+ return nonceBytes
+}
+
+func (ctx *cbcAEAD) Overhead() int {
+ // Maximum overhead is block size (for padding) plus auth tag length, where
+ // the length of the auth tag is equivalent to the key size.
+ return ctx.blockCipher.BlockSize() + ctx.authtagBytes
+}
+
+// Seal encrypts and authenticates the plaintext.
+func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
+ // Output buffer -- must take care not to mangle plaintext input.
+ ciphertext := make([]byte, len(plaintext)+ctx.Overhead())[:len(plaintext)]
+ copy(ciphertext, plaintext)
+ ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
+
+ cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
+
+ cbc.CryptBlocks(ciphertext, ciphertext)
+ authtag := ctx.computeAuthTag(data, nonce, ciphertext)
+
+ ret, out := resize(dst, len(dst)+len(ciphertext)+len(authtag))
+ copy(out, ciphertext)
+ copy(out[len(ciphertext):], authtag)
+
+ return ret
+}
+
+// Open decrypts and authenticates the ciphertext.
+func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(ciphertext) < ctx.authtagBytes {
+ return nil, errors.New("square/go-jose: invalid ciphertext (too short)")
+ }
+
+ offset := len(ciphertext) - ctx.authtagBytes
+ expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
+ match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
+ if match != 1 {
+ return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)")
+ }
+
+ cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
+
+ // Make copy of ciphertext buffer, don't want to modify in place
+ buffer := append([]byte{}, []byte(ciphertext[:offset])...)
+
+ if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
+ return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)")
+ }
+
+ cbc.CryptBlocks(buffer, buffer)
+
+ // Remove padding
+ plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
+ if err != nil {
+ return nil, err
+ }
+
+ ret, out := resize(dst, len(dst)+len(plaintext))
+ copy(out, plaintext)
+
+ return ret, nil
+}
+
+// Compute an authentication tag
+func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
+ buffer := make([]byte, len(aad)+len(nonce)+len(ciphertext)+8)
+ n := 0
+ n += copy(buffer, aad)
+ n += copy(buffer[n:], nonce)
+ n += copy(buffer[n:], ciphertext)
+ binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad)*8))
+
+ // According to documentation, Write() on hash.Hash never fails.
+ hmac := hmac.New(ctx.hash, ctx.integrityKey)
+ _, _ = hmac.Write(buffer)
+
+ return hmac.Sum(nil)[:ctx.authtagBytes]
+}
+
+// resize ensures the the given slice has a capacity of at least n bytes.
+// If the capacity of the slice is less than n, a new slice is allocated
+// and the existing data will be copied.
+func resize(in []byte, n int) (head, tail []byte) {
+ if cap(in) >= n {
+ head = in[:n]
+ } else {
+ head = make([]byte, n)
+ copy(head, in)
+ }
+
+ tail = head[len(in):]
+ return
+}
+
+// Apply padding
+func padBuffer(buffer []byte, blockSize int) []byte {
+ missing := blockSize - (len(buffer) % blockSize)
+ ret, out := resize(buffer, len(buffer)+missing)
+ padding := bytes.Repeat([]byte{byte(missing)}, missing)
+ copy(out, padding)
+ return ret
+}
+
+// Remove padding
+func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
+ if len(buffer)%blockSize != 0 {
+ return nil, errors.New("square/go-jose: invalid padding")
+ }
+
+ last := buffer[len(buffer)-1]
+ count := int(last)
+
+ if count == 0 || count > blockSize || count > len(buffer) {
+ return nil, errors.New("square/go-jose: invalid padding")
+ }
+
+ padding := bytes.Repeat([]byte{last}, count)
+ if !bytes.HasSuffix(buffer, padding) {
+ return nil, errors.New("square/go-jose: invalid padding")
+ }
+
+ return buffer[:len(buffer)-count], nil
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go
new file mode 100644
index 00000000..c230271b
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go
@@ -0,0 +1,498 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "io"
+ "strings"
+ "testing"
+)
+
+func TestInvalidInputs(t *testing.T) {
+ key := []byte{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ }
+
+ nonce := []byte{
+ 92, 80, 104, 49, 133, 25, 161, 215, 173, 101, 219, 211, 136, 91, 210, 145}
+
+ aead, _ := NewCBCHMAC(key, aes.NewCipher)
+ ciphertext := aead.Seal(nil, nonce, []byte("plaintext"), []byte("aad"))
+
+ // Changed AAD, must fail
+ _, err := aead.Open(nil, nonce, ciphertext, []byte("INVALID"))
+ if err == nil {
+ t.Error("must detect invalid aad")
+ }
+
+ // Empty ciphertext, must fail
+ _, err = aead.Open(nil, nonce, []byte{}, []byte("aad"))
+ if err == nil {
+ t.Error("must detect invalid/empty ciphertext")
+ }
+
+ // Corrupt ciphertext, must fail
+ corrupt := make([]byte, len(ciphertext))
+ copy(corrupt, ciphertext)
+ corrupt[0] ^= 0xFF
+
+ _, err = aead.Open(nil, nonce, corrupt, []byte("aad"))
+ if err == nil {
+ t.Error("must detect corrupt ciphertext")
+ }
+
+ // Corrupt authtag, must fail
+ copy(corrupt, ciphertext)
+ corrupt[len(ciphertext)-1] ^= 0xFF
+
+ _, err = aead.Open(nil, nonce, corrupt, []byte("aad"))
+ if err == nil {
+ t.Error("must detect corrupt authtag")
+ }
+
+ // Truncated data, must fail
+ _, err = aead.Open(nil, nonce, ciphertext[:10], []byte("aad"))
+ if err == nil {
+ t.Error("must detect corrupt authtag")
+ }
+}
+
+func TestVectorsAESCBC128(t *testing.T) {
+ // Source: http://tools.ietf.org/html/draft-ietf-jose-json-web-encryption-29#appendix-A.2
+ plaintext := []byte{
+ 76, 105, 118, 101, 32, 108, 111, 110, 103, 32, 97, 110, 100, 32,
+ 112, 114, 111, 115, 112, 101, 114, 46}
+
+ aad := []byte{
+ 101, 121, 74, 104, 98, 71, 99, 105, 79, 105, 74, 83, 85, 48, 69,
+ 120, 88, 122, 85, 105, 76, 67, 74, 108, 98, 109, 77, 105, 79, 105,
+ 74, 66, 77, 84, 73, 52, 81, 48, 74, 68, 76, 85, 104, 84, 77, 106, 85,
+ 50, 73, 110, 48}
+
+ expectedCiphertext := []byte{
+ 40, 57, 83, 181, 119, 33, 133, 148, 198, 185, 243, 24, 152, 230, 6,
+ 75, 129, 223, 127, 19, 210, 82, 183, 230, 168, 33, 215, 104, 143,
+ 112, 56, 102}
+
+ expectedAuthtag := []byte{
+ 246, 17, 244, 190, 4, 95, 98, 3, 231, 0, 115, 157, 242, 203, 100,
+ 191}
+
+ key := []byte{
+ 4, 211, 31, 197, 84, 157, 252, 254, 11, 100, 157, 250, 63, 170, 106, 206,
+ 107, 124, 212, 45, 111, 107, 9, 219, 200, 177, 0, 240, 143, 156, 44, 207}
+
+ nonce := []byte{
+ 3, 22, 60, 12, 43, 67, 104, 105, 108, 108, 105, 99, 111, 116, 104, 101}
+
+ enc, err := NewCBCHMAC(key, aes.NewCipher)
+ out := enc.Seal(nil, nonce, plaintext, aad)
+ if err != nil {
+ t.Error("Unable to encrypt:", err)
+ return
+ }
+
+ if bytes.Compare(out[:len(out)-16], expectedCiphertext) != 0 {
+ t.Error("Ciphertext did not match")
+ }
+ if bytes.Compare(out[len(out)-16:], expectedAuthtag) != 0 {
+ t.Error("Auth tag did not match")
+ }
+}
+
+func TestVectorsAESCBC256(t *testing.T) {
+ // Source: https://tools.ietf.org/html/draft-mcgrew-aead-aes-cbc-hmac-sha2-05#section-5.4
+ plaintext := []byte{
+ 0x41, 0x20, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x20, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x20,
+ 0x6d, 0x75, 0x73, 0x74, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x71, 0x75,
+ 0x69, 0x72, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65,
+ 0x74, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x69, 0x74, 0x20, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x62,
+ 0x65, 0x20, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x66, 0x61, 0x6c, 0x6c, 0x20, 0x69,
+ 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x20, 0x6f, 0x66,
+ 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e, 0x65, 0x6d, 0x79, 0x20, 0x77, 0x69, 0x74, 0x68, 0x6f,
+ 0x75, 0x74, 0x20, 0x69, 0x6e, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x6e, 0x69, 0x65, 0x6e, 0x63, 0x65}
+
+ aad := []byte{
+ 0x54, 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x70, 0x72, 0x69, 0x6e, 0x63,
+ 0x69, 0x70, 0x6c, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x41, 0x75, 0x67, 0x75, 0x73, 0x74, 0x65, 0x20,
+ 0x4b, 0x65, 0x72, 0x63, 0x6b, 0x68, 0x6f, 0x66, 0x66, 0x73}
+
+ expectedCiphertext := []byte{
+ 0x4a, 0xff, 0xaa, 0xad, 0xb7, 0x8c, 0x31, 0xc5, 0xda, 0x4b, 0x1b, 0x59, 0x0d, 0x10, 0xff, 0xbd,
+ 0x3d, 0xd8, 0xd5, 0xd3, 0x02, 0x42, 0x35, 0x26, 0x91, 0x2d, 0xa0, 0x37, 0xec, 0xbc, 0xc7, 0xbd,
+ 0x82, 0x2c, 0x30, 0x1d, 0xd6, 0x7c, 0x37, 0x3b, 0xcc, 0xb5, 0x84, 0xad, 0x3e, 0x92, 0x79, 0xc2,
+ 0xe6, 0xd1, 0x2a, 0x13, 0x74, 0xb7, 0x7f, 0x07, 0x75, 0x53, 0xdf, 0x82, 0x94, 0x10, 0x44, 0x6b,
+ 0x36, 0xeb, 0xd9, 0x70, 0x66, 0x29, 0x6a, 0xe6, 0x42, 0x7e, 0xa7, 0x5c, 0x2e, 0x08, 0x46, 0xa1,
+ 0x1a, 0x09, 0xcc, 0xf5, 0x37, 0x0d, 0xc8, 0x0b, 0xfe, 0xcb, 0xad, 0x28, 0xc7, 0x3f, 0x09, 0xb3,
+ 0xa3, 0xb7, 0x5e, 0x66, 0x2a, 0x25, 0x94, 0x41, 0x0a, 0xe4, 0x96, 0xb2, 0xe2, 0xe6, 0x60, 0x9e,
+ 0x31, 0xe6, 0xe0, 0x2c, 0xc8, 0x37, 0xf0, 0x53, 0xd2, 0x1f, 0x37, 0xff, 0x4f, 0x51, 0x95, 0x0b,
+ 0xbe, 0x26, 0x38, 0xd0, 0x9d, 0xd7, 0xa4, 0x93, 0x09, 0x30, 0x80, 0x6d, 0x07, 0x03, 0xb1, 0xf6}
+
+ expectedAuthtag := []byte{
+ 0x4d, 0xd3, 0xb4, 0xc0, 0x88, 0xa7, 0xf4, 0x5c, 0x21, 0x68, 0x39, 0x64, 0x5b, 0x20, 0x12, 0xbf,
+ 0x2e, 0x62, 0x69, 0xa8, 0xc5, 0x6a, 0x81, 0x6d, 0xbc, 0x1b, 0x26, 0x77, 0x61, 0x95, 0x5b, 0xc5}
+
+ key := []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f}
+
+ nonce := []byte{
+ 0x1a, 0xf3, 0x8c, 0x2d, 0xc2, 0xb9, 0x6f, 0xfd, 0xd8, 0x66, 0x94, 0x09, 0x23, 0x41, 0xbc, 0x04}
+
+ enc, err := NewCBCHMAC(key, aes.NewCipher)
+ out := enc.Seal(nil, nonce, plaintext, aad)
+ if err != nil {
+ t.Error("Unable to encrypt:", err)
+ return
+ }
+
+ if bytes.Compare(out[:len(out)-32], expectedCiphertext) != 0 {
+ t.Error("Ciphertext did not match, got", out[:len(out)-32], "wanted", expectedCiphertext)
+ }
+ if bytes.Compare(out[len(out)-32:], expectedAuthtag) != 0 {
+ t.Error("Auth tag did not match, got", out[len(out)-32:], "wanted", expectedAuthtag)
+ }
+}
+
+func TestAESCBCRoundtrip(t *testing.T) {
+ key128 := []byte{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+
+ key192 := []byte{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7}
+
+ key256 := []byte{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+
+ nonce := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+
+ RunRoundtrip(t, key128, nonce)
+ RunRoundtrip(t, key192, nonce)
+ RunRoundtrip(t, key256, nonce)
+}
+
+func RunRoundtrip(t *testing.T, key, nonce []byte) {
+ aead, err := NewCBCHMAC(key, aes.NewCipher)
+ if err != nil {
+ panic(err)
+ }
+
+ if aead.NonceSize() != len(nonce) {
+ panic("invalid nonce")
+ }
+
+ // Test pre-existing data in dst buffer
+ dst := []byte{15, 15, 15, 15}
+ plaintext := []byte{0, 0, 0, 0}
+ aad := []byte{4, 3, 2, 1}
+
+ result := aead.Seal(dst, nonce, plaintext, aad)
+ if bytes.Compare(dst, result[:4]) != 0 {
+ t.Error("Existing data in dst not preserved")
+ }
+
+ // Test pre-existing (empty) dst buffer with sufficient capacity
+ dst = make([]byte, 256)[:0]
+ result, err = aead.Open(dst, nonce, result[4:], aad)
+ if err != nil {
+ panic(err)
+ }
+
+ if bytes.Compare(result, plaintext) != 0 {
+ t.Error("Plaintext does not match output")
+ }
+}
+
+func TestAESCBCOverhead(t *testing.T) {
+ aead, err := NewCBCHMAC(make([]byte, 32), aes.NewCipher)
+ if err != nil {
+ panic(err)
+ }
+
+ if aead.Overhead() != 32 {
+ t.Error("CBC-HMAC reports incorrect overhead value")
+ }
+}
+
+func TestPadding(t *testing.T) {
+ for i := 0; i < 256; i++ {
+ slice := make([]byte, i)
+ padded := padBuffer(slice, 16)
+ if len(padded)%16 != 0 {
+ t.Error("failed to pad slice properly", i)
+ return
+ }
+ unpadded, err := unpadBuffer(padded, 16)
+ if err != nil || len(unpadded) != i {
+ t.Error("failed to unpad slice properly", i)
+ return
+ }
+ }
+}
+
+func TestInvalidKey(t *testing.T) {
+ key := make([]byte, 30)
+ _, err := NewCBCHMAC(key, aes.NewCipher)
+ if err == nil {
+ t.Error("should not be able to instantiate CBC-HMAC with invalid key")
+ }
+}
+
+func TestTruncatedCiphertext(t *testing.T) {
+ key := make([]byte, 32)
+ nonce := make([]byte, 16)
+ data := make([]byte, 32)
+
+ io.ReadFull(rand.Reader, key)
+ io.ReadFull(rand.Reader, nonce)
+
+ aead, err := NewCBCHMAC(key, aes.NewCipher)
+ if err != nil {
+ panic(err)
+ }
+
+ ctx := aead.(*cbcAEAD)
+ ct := aead.Seal(nil, nonce, data, nil)
+
+ // Truncated ciphertext, but with correct auth tag
+ truncated, tail := resize(ct[:len(ct)-ctx.authtagBytes-2], len(ct)-2)
+ copy(tail, ctx.computeAuthTag(nil, nonce, truncated[:len(truncated)-ctx.authtagBytes]))
+
+ // Open should fail
+ _, err = aead.Open(nil, nonce, truncated, nil)
+ if err == nil {
+ t.Error("open on truncated ciphertext should fail")
+ }
+}
+
+func TestInvalidPaddingOpen(t *testing.T) {
+ key := make([]byte, 32)
+ nonce := make([]byte, 16)
+
+ // Plaintext with invalid padding
+ plaintext := padBuffer(make([]byte, 28), aes.BlockSize)
+ plaintext[len(plaintext)-1] = 0xFF
+
+ io.ReadFull(rand.Reader, key)
+ io.ReadFull(rand.Reader, nonce)
+
+ block, _ := aes.NewCipher(key)
+ cbc := cipher.NewCBCEncrypter(block, nonce)
+ buffer := append([]byte{}, plaintext...)
+ cbc.CryptBlocks(buffer, buffer)
+
+ aead, _ := NewCBCHMAC(key, aes.NewCipher)
+ ctx := aead.(*cbcAEAD)
+
+ // Mutated ciphertext, but with correct auth tag
+ size := len(buffer)
+ ciphertext, tail := resize(buffer, size+(len(key)/2))
+ copy(tail, ctx.computeAuthTag(nil, nonce, ciphertext[:size]))
+
+ // Open should fail (b/c of invalid padding, even though tag matches)
+ _, err := aead.Open(nil, nonce, ciphertext, nil)
+ if err == nil || !strings.Contains(err.Error(), "invalid padding") {
+ t.Error("no or unexpected error on open with invalid padding:", err)
+ }
+}
+
+func TestInvalidPadding(t *testing.T) {
+ for i := 0; i < 256; i++ {
+ slice := make([]byte, i)
+ padded := padBuffer(slice, 16)
+ if len(padded)%16 != 0 {
+ t.Error("failed to pad slice properly", i)
+ return
+ }
+
+ paddingBytes := 16 - (i % 16)
+
+ // Mutate padding for testing
+ for j := 1; j <= paddingBytes; j++ {
+ mutated := make([]byte, len(padded))
+ copy(mutated, padded)
+ mutated[len(mutated)-j] ^= 0xFF
+
+ _, err := unpadBuffer(mutated, 16)
+ if err == nil {
+ t.Error("unpad on invalid padding should fail", i)
+ return
+ }
+ }
+
+ // Test truncated padding
+ _, err := unpadBuffer(padded[:len(padded)-1], 16)
+ if err == nil {
+ t.Error("unpad on truncated padding should fail", i)
+ return
+ }
+ }
+}
+
+func TestZeroLengthPadding(t *testing.T) {
+ data := make([]byte, 16)
+ data, err := unpadBuffer(data, 16)
+ if err == nil {
+ t.Error("padding with 0x00 should never be valid")
+ }
+}
+
+func benchEncryptCBCHMAC(b *testing.B, keySize, chunkSize int) {
+ key := make([]byte, keySize*2)
+ nonce := make([]byte, 16)
+
+ io.ReadFull(rand.Reader, key)
+ io.ReadFull(rand.Reader, nonce)
+
+ chunk := make([]byte, chunkSize)
+
+ aead, err := NewCBCHMAC(key, aes.NewCipher)
+ if err != nil {
+ panic(err)
+ }
+
+ b.SetBytes(int64(chunkSize))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ aead.Seal(nil, nonce, chunk, nil)
+ }
+}
+
+func benchDecryptCBCHMAC(b *testing.B, keySize, chunkSize int) {
+ key := make([]byte, keySize*2)
+ nonce := make([]byte, 16)
+
+ io.ReadFull(rand.Reader, key)
+ io.ReadFull(rand.Reader, nonce)
+
+ chunk := make([]byte, chunkSize)
+
+ aead, err := NewCBCHMAC(key, aes.NewCipher)
+ if err != nil {
+ panic(err)
+ }
+
+ out := aead.Seal(nil, nonce, chunk, nil)
+
+ b.SetBytes(int64(chunkSize))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ aead.Open(nil, nonce, out, nil)
+ }
+}
+
+func BenchmarkEncryptAES128_CBCHMAC_1k(b *testing.B) {
+ benchEncryptCBCHMAC(b, 16, 1024)
+}
+
+func BenchmarkEncryptAES128_CBCHMAC_64k(b *testing.B) {
+ benchEncryptCBCHMAC(b, 16, 65536)
+}
+
+func BenchmarkEncryptAES128_CBCHMAC_1MB(b *testing.B) {
+ benchEncryptCBCHMAC(b, 16, 1048576)
+}
+
+func BenchmarkEncryptAES128_CBCHMAC_64MB(b *testing.B) {
+ benchEncryptCBCHMAC(b, 16, 67108864)
+}
+
+func BenchmarkDecryptAES128_CBCHMAC_1k(b *testing.B) {
+ benchDecryptCBCHMAC(b, 16, 1024)
+}
+
+func BenchmarkDecryptAES128_CBCHMAC_64k(b *testing.B) {
+ benchDecryptCBCHMAC(b, 16, 65536)
+}
+
+func BenchmarkDecryptAES128_CBCHMAC_1MB(b *testing.B) {
+ benchDecryptCBCHMAC(b, 16, 1048576)
+}
+
+func BenchmarkDecryptAES128_CBCHMAC_64MB(b *testing.B) {
+ benchDecryptCBCHMAC(b, 16, 67108864)
+}
+
+func BenchmarkEncryptAES192_CBCHMAC_64k(b *testing.B) {
+ benchEncryptCBCHMAC(b, 24, 65536)
+}
+
+func BenchmarkEncryptAES192_CBCHMAC_1MB(b *testing.B) {
+ benchEncryptCBCHMAC(b, 24, 1048576)
+}
+
+func BenchmarkEncryptAES192_CBCHMAC_64MB(b *testing.B) {
+ benchEncryptCBCHMAC(b, 24, 67108864)
+}
+
+func BenchmarkDecryptAES192_CBCHMAC_1k(b *testing.B) {
+ benchDecryptCBCHMAC(b, 24, 1024)
+}
+
+func BenchmarkDecryptAES192_CBCHMAC_64k(b *testing.B) {
+ benchDecryptCBCHMAC(b, 24, 65536)
+}
+
+func BenchmarkDecryptAES192_CBCHMAC_1MB(b *testing.B) {
+ benchDecryptCBCHMAC(b, 24, 1048576)
+}
+
+func BenchmarkDecryptAES192_CBCHMAC_64MB(b *testing.B) {
+ benchDecryptCBCHMAC(b, 24, 67108864)
+}
+
+func BenchmarkEncryptAES256_CBCHMAC_64k(b *testing.B) {
+ benchEncryptCBCHMAC(b, 32, 65536)
+}
+
+func BenchmarkEncryptAES256_CBCHMAC_1MB(b *testing.B) {
+ benchEncryptCBCHMAC(b, 32, 1048576)
+}
+
+func BenchmarkEncryptAES256_CBCHMAC_64MB(b *testing.B) {
+ benchEncryptCBCHMAC(b, 32, 67108864)
+}
+
+func BenchmarkDecryptAES256_CBCHMAC_1k(b *testing.B) {
+ benchDecryptCBCHMAC(b, 32, 1032)
+}
+
+func BenchmarkDecryptAES256_CBCHMAC_64k(b *testing.B) {
+ benchDecryptCBCHMAC(b, 32, 65536)
+}
+
+func BenchmarkDecryptAES256_CBCHMAC_1MB(b *testing.B) {
+ benchDecryptCBCHMAC(b, 32, 1048576)
+}
+
+func BenchmarkDecryptAES256_CBCHMAC_64MB(b *testing.B) {
+ benchDecryptCBCHMAC(b, 32, 67108864)
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go
new file mode 100644
index 00000000..cbb5f7b8
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go
@@ -0,0 +1,75 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto"
+ "encoding/binary"
+ "hash"
+ "io"
+)
+
+type concatKDF struct {
+ z, info []byte
+ i uint32
+ cache []byte
+ hasher hash.Hash
+}
+
+// NewConcatKDF builds a KDF reader based on the given inputs.
+func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
+ buffer := make([]byte, len(algID)+len(ptyUInfo)+len(ptyVInfo)+len(supPubInfo)+len(supPrivInfo))
+ n := 0
+ n += copy(buffer, algID)
+ n += copy(buffer[n:], ptyUInfo)
+ n += copy(buffer[n:], ptyVInfo)
+ n += copy(buffer[n:], supPubInfo)
+ copy(buffer[n:], supPrivInfo)
+
+ hasher := hash.New()
+
+ return &concatKDF{
+ z: z,
+ info: buffer,
+ hasher: hasher,
+ cache: []byte{},
+ i: 1,
+ }
+}
+
+func (ctx *concatKDF) Read(out []byte) (int, error) {
+ copied := copy(out, ctx.cache)
+ ctx.cache = ctx.cache[copied:]
+
+ for copied < len(out) {
+ ctx.hasher.Reset()
+
+ // Write on a hash.Hash never fails
+ _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
+ _, _ = ctx.hasher.Write(ctx.z)
+ _, _ = ctx.hasher.Write(ctx.info)
+
+ hash := ctx.hasher.Sum(nil)
+ chunkCopied := copy(out[copied:], hash)
+ copied += chunkCopied
+ ctx.cache = hash[chunkCopied:]
+
+ ctx.i++
+ }
+
+ return copied, nil
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go
new file mode 100644
index 00000000..48219b3e
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go
@@ -0,0 +1,150 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto"
+ "testing"
+)
+
+// Taken from: https://tools.ietf.org/id/draft-ietf-jose-json-web-algorithms-38.txt
+func TestVectorConcatKDF(t *testing.T) {
+ z := []byte{
+ 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132,
+ 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121,
+ 140, 254, 144, 196}
+
+ algID := []byte{0, 0, 0, 7, 65, 49, 50, 56, 71, 67, 77}
+
+ ptyUInfo := []byte{0, 0, 0, 5, 65, 108, 105, 99, 101}
+ ptyVInfo := []byte{0, 0, 0, 3, 66, 111, 98}
+
+ supPubInfo := []byte{0, 0, 0, 128}
+ supPrivInfo := []byte{}
+
+ expected := []byte{
+ 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26}
+
+ ckdf := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo)
+
+ out0 := make([]byte, 9)
+ out1 := make([]byte, 7)
+
+ read0, err := ckdf.Read(out0)
+ if err != nil {
+ t.Error("error when reading from concat kdf reader", err)
+ return
+ }
+
+ read1, err := ckdf.Read(out1)
+ if err != nil {
+ t.Error("error when reading from concat kdf reader", err)
+ return
+ }
+
+ if read0+read1 != len(out0)+len(out1) {
+ t.Error("did not receive enough bytes from concat kdf reader")
+ return
+ }
+
+ out := []byte{}
+ out = append(out, out0...)
+ out = append(out, out1...)
+
+ if bytes.Compare(out, expected) != 0 {
+ t.Error("did not receive expected output from concat kdf reader")
+ return
+ }
+}
+
+func TestCache(t *testing.T) {
+ z := []byte{
+ 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132,
+ 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121,
+ 140, 254, 144, 196}
+
+ algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}
+
+ ptyUInfo := []byte{1, 2, 3, 4}
+ ptyVInfo := []byte{4, 3, 2, 1}
+
+ supPubInfo := []byte{}
+ supPrivInfo := []byte{}
+
+ outputs := [][]byte{}
+
+ // Read the same amount of data in different chunk sizes
+ chunkSizes := []int{1, 2, 4, 8, 16, 32, 64, 128, 256, 512}
+
+ for _, c := range chunkSizes {
+ out := make([]byte, 1024)
+ reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo)
+
+ for i := 0; i < 1024; i += c {
+ _, _ = reader.Read(out[i : i+c])
+ }
+
+ outputs = append(outputs, out)
+ }
+
+ for i := range outputs {
+ if bytes.Compare(outputs[i], outputs[(i+1)%len(outputs)]) != 0 {
+ t.Error("not all outputs from KDF matched")
+ }
+ }
+}
+
+func benchmarkKDF(b *testing.B, total int) {
+ z := []byte{
+ 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132,
+ 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121,
+ 140, 254, 144, 196}
+
+ algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}
+
+ ptyUInfo := []byte{1, 2, 3, 4}
+ ptyVInfo := []byte{4, 3, 2, 1}
+
+ supPubInfo := []byte{}
+ supPrivInfo := []byte{}
+
+ out := make([]byte, total)
+ reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo)
+
+ b.ResetTimer()
+ b.SetBytes(int64(total))
+ for i := 0; i < b.N; i++ {
+ _, _ = reader.Read(out)
+ }
+}
+
+func BenchmarkConcatKDF_1k(b *testing.B) {
+ benchmarkKDF(b, 1024)
+}
+
+func BenchmarkConcatKDF_64k(b *testing.B) {
+ benchmarkKDF(b, 65536)
+}
+
+func BenchmarkConcatKDF_1MB(b *testing.B) {
+ benchmarkKDF(b, 1048576)
+}
+
+func BenchmarkConcatKDF_64MB(b *testing.B) {
+ benchmarkKDF(b, 67108864)
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go
new file mode 100644
index 00000000..c6a5a821
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go
@@ -0,0 +1,51 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "encoding/binary"
+)
+
+// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
+func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
+ // algId, partyUInfo, partyVInfo inputs must be prefixed with the length
+ algID := lengthPrefixed([]byte(alg))
+ ptyUInfo := lengthPrefixed(apuData)
+ ptyVInfo := lengthPrefixed(apvData)
+
+ // suppPubInfo is the encoded length of the output size in bits
+ supPubInfo := make([]byte, 4)
+ binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
+
+ z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
+ reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
+
+ key := make([]byte, size)
+
+ // Read on the KDF will never fail
+ _, _ = reader.Read(key)
+ return key
+}
+
+func lengthPrefixed(data []byte) []byte {
+ out := make([]byte, len(data)+4)
+ binary.BigEndian.PutUint32(out, uint32(len(data)))
+ copy(out[4:], data)
+ return out
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go
new file mode 100644
index 00000000..f92abb17
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go
@@ -0,0 +1,98 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "encoding/base64"
+ "math/big"
+ "testing"
+)
+
+// Example keys from JWA, Appendix C
+var aliceKey = &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: elliptic.P256(),
+ X: fromBase64Int("gI0GAILBdu7T53akrFmMyGcsF3n5dO7MmwNBHKW5SV0="),
+ Y: fromBase64Int("SLW_xSffzlPWrHEVI30DHM_4egVwt3NQqeUD7nMFpps="),
+ },
+ D: fromBase64Int("0_NxaRPUMQoAJt50Gz8YiTr8gRTwyEaCumd-MToTmIo="),
+}
+
+var bobKey = &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: elliptic.P256(),
+ X: fromBase64Int("weNJy2HscCSM6AEDTDg04biOvhFhyyWvOHQfeF_PxMQ="),
+ Y: fromBase64Int("e8lnCO-AlStT-NJVX-crhB7QRYhiix03illJOVAOyck="),
+ },
+ D: fromBase64Int("VEmDZpDXXK8p8N0Cndsxs924q6nS1RXFASRl6BfUqdw="),
+}
+
+// Build big int from base64-encoded string. Strips whitespace (for testing).
+func fromBase64Int(data string) *big.Int {
+ val, err := base64.URLEncoding.DecodeString(data)
+ if err != nil {
+ panic("Invalid test data")
+ }
+ return new(big.Int).SetBytes(val)
+}
+
+func TestVectorECDHES(t *testing.T) {
+ apuData := []byte("Alice")
+ apvData := []byte("Bob")
+
+ expected := []byte{
+ 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26}
+
+ output := DeriveECDHES("A128GCM", apuData, apvData, bobKey, &aliceKey.PublicKey, 16)
+
+ if bytes.Compare(output, expected) != 0 {
+ t.Error("output did not match what we expect, got", output, "wanted", expected)
+ }
+}
+
+func BenchmarkECDHES_128(b *testing.B) {
+ apuData := []byte("APU")
+ apvData := []byte("APV")
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 16)
+ }
+}
+
+func BenchmarkECDHES_192(b *testing.B) {
+ apuData := []byte("APU")
+ apvData := []byte("APV")
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 24)
+ }
+}
+
+func BenchmarkECDHES_256(b *testing.B) {
+ apuData := []byte("APU")
+ apvData := []byte("APV")
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 32)
+ }
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go
new file mode 100644
index 00000000..1d36d501
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go
@@ -0,0 +1,109 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto/cipher"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+)
+
+var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
+
+// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
+func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
+ if len(cek)%8 != 0 {
+ return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := len(cek) / 8
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], cek[i*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer, defaultIV)
+
+ for t := 0; t < 6*n; t++ {
+ copy(buffer[8:], r[t%n])
+
+ block.Encrypt(buffer, buffer)
+
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] = buffer[i] ^ tBytes[i]
+ }
+ copy(r[t%n], buffer[8:])
+ }
+
+ out := make([]byte, (n+1)*8)
+ copy(out, buffer[:8])
+ for i := range r {
+ copy(out[(i+1)*8:], r[i])
+ }
+
+ return out, nil
+}
+
+// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
+func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
+ if len(ciphertext)%8 != 0 {
+ return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := (len(ciphertext) / 8) - 1
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], ciphertext[(i+1)*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer[:8], ciphertext[:8])
+
+ for t := 6*n - 1; t >= 0; t-- {
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] = buffer[i] ^ tBytes[i]
+ }
+ copy(buffer[8:], r[t%n])
+
+ block.Decrypt(buffer, buffer)
+
+ copy(r[t%n], buffer[8:])
+ }
+
+ if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
+ return nil, errors.New("square/go-jose: failed to unwrap key")
+ }
+
+ out := make([]byte, n*8)
+ for i := range r {
+ copy(out[i*8:], r[i])
+ }
+
+ return out, nil
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go
new file mode 100644
index 00000000..ceecf812
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go
@@ -0,0 +1,133 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/aes"
+ "encoding/hex"
+ "testing"
+)
+
+func TestAesKeyWrap(t *testing.T) {
+ // Test vectors from: http://csrc.nist.gov/groups/ST/toolkit/documents/kms/key-wrap.pdf
+ kek0, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F")
+ cek0, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF")
+
+ expected0, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5")
+
+ kek1, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F1011121314151617")
+ cek1, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF")
+
+ expected1, _ := hex.DecodeString("96778B25AE6CA435F92B5B97C050AED2468AB8A17AD84E5D")
+
+ kek2, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F")
+ cek2, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF0001020304050607")
+
+ expected2, _ := hex.DecodeString("A8F9BC1612C68B3FF6E6F4FBE30E71E4769C8B80A32CB8958CD5D17D6B254DA1")
+
+ block0, _ := aes.NewCipher(kek0)
+ block1, _ := aes.NewCipher(kek1)
+ block2, _ := aes.NewCipher(kek2)
+
+ out0, _ := KeyWrap(block0, cek0)
+ out1, _ := KeyWrap(block1, cek1)
+ out2, _ := KeyWrap(block2, cek2)
+
+ if bytes.Compare(out0, expected0) != 0 {
+ t.Error("output 0 not as expected, got", out0, "wanted", expected0)
+ }
+
+ if bytes.Compare(out1, expected1) != 0 {
+ t.Error("output 1 not as expected, got", out1, "wanted", expected1)
+ }
+
+ if bytes.Compare(out2, expected2) != 0 {
+ t.Error("output 2 not as expected, got", out2, "wanted", expected2)
+ }
+
+ unwrap0, _ := KeyUnwrap(block0, out0)
+ unwrap1, _ := KeyUnwrap(block1, out1)
+ unwrap2, _ := KeyUnwrap(block2, out2)
+
+ if bytes.Compare(unwrap0, cek0) != 0 {
+ t.Error("key unwrap did not return original input, got", unwrap0, "wanted", cek0)
+ }
+
+ if bytes.Compare(unwrap1, cek1) != 0 {
+ t.Error("key unwrap did not return original input, got", unwrap1, "wanted", cek1)
+ }
+
+ if bytes.Compare(unwrap2, cek2) != 0 {
+ t.Error("key unwrap did not return original input, got", unwrap2, "wanted", cek2)
+ }
+}
+
+func TestAesKeyWrapInvalid(t *testing.T) {
+ kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F")
+
+ // Invalid unwrap input (bit flipped)
+ input0, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CFE5")
+
+ block, _ := aes.NewCipher(kek)
+
+ _, err := KeyUnwrap(block, input0)
+ if err == nil {
+ t.Error("key unwrap failed to detect invalid input")
+ }
+
+ // Invalid unwrap input (truncated)
+ input1, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CF")
+
+ _, err = KeyUnwrap(block, input1)
+ if err == nil {
+ t.Error("key unwrap failed to detect truncated input")
+ }
+
+ // Invalid wrap input (not multiple of 8)
+ input2, _ := hex.DecodeString("0123456789ABCD")
+
+ _, err = KeyWrap(block, input2)
+ if err == nil {
+ t.Error("key wrap accepted invalid input")
+ }
+
+}
+
+func BenchmarkAesKeyWrap(b *testing.B) {
+ kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F")
+ key, _ := hex.DecodeString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
+
+ block, _ := aes.NewCipher(kek)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ KeyWrap(block, key)
+ }
+}
+
+func BenchmarkAesKeyUnwrap(b *testing.B) {
+ kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F")
+ input, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5")
+
+ block, _ := aes.NewCipher(kek)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ KeyUnwrap(block, input)
+ }
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go
new file mode 100644
index 00000000..f61af2c0
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go
@@ -0,0 +1,349 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "fmt"
+ "reflect"
+)
+
+// Encrypter represents an encrypter which produces an encrypted JWE object.
+type Encrypter interface {
+ Encrypt(plaintext []byte) (*JsonWebEncryption, error)
+ EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error)
+ SetCompression(alg CompressionAlgorithm)
+}
+
+// MultiEncrypter represents an encrypter which supports multiple recipients.
+type MultiEncrypter interface {
+ Encrypt(plaintext []byte) (*JsonWebEncryption, error)
+ EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error)
+ SetCompression(alg CompressionAlgorithm)
+ AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) error
+}
+
+// A generic content cipher
+type contentCipher interface {
+ keySize() int
+ encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
+ decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
+}
+
+// A key generator (for generating/getting a CEK)
+type keyGenerator interface {
+ keySize() int
+ genKey() ([]byte, rawHeader, error)
+}
+
+// A generic key encrypter
+type keyEncrypter interface {
+ encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
+}
+
+// A generic key decrypter
+type keyDecrypter interface {
+ decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
+}
+
+// A generic encrypter based on the given key encrypter and content cipher.
+type genericEncrypter struct {
+ contentAlg ContentEncryption
+ compressionAlg CompressionAlgorithm
+ cipher contentCipher
+ recipients []recipientKeyInfo
+ keyGenerator keyGenerator
+}
+
+type recipientKeyInfo struct {
+ keyID string
+ keyAlg KeyAlgorithm
+ keyEncrypter keyEncrypter
+}
+
+// SetCompression sets a compression algorithm to be applied before encryption.
+func (ctx *genericEncrypter) SetCompression(compressionAlg CompressionAlgorithm) {
+ ctx.compressionAlg = compressionAlg
+}
+
+// NewEncrypter creates an appropriate encrypter based on the key type
+func NewEncrypter(alg KeyAlgorithm, enc ContentEncryption, encryptionKey interface{}) (Encrypter, error) {
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ compressionAlg: NONE,
+ recipients: []recipientKeyInfo{},
+ cipher: getContentCipher(enc),
+ }
+
+ if encrypter.cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ var keyID string
+ var rawKey interface{}
+ switch encryptionKey := encryptionKey.(type) {
+ case *JsonWebKey:
+ keyID = encryptionKey.KeyID
+ rawKey = encryptionKey.Key
+ default:
+ rawKey = encryptionKey
+ }
+
+ switch alg {
+ case DIRECT:
+ // Direct encryption mode must be treated differently
+ if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) {
+ return nil, ErrUnsupportedKeyType
+ }
+ encrypter.keyGenerator = staticKeyGenerator{
+ key: rawKey.([]byte),
+ }
+ recipient, _ := newSymmetricRecipient(alg, rawKey.([]byte))
+ if keyID != "" {
+ recipient.keyID = keyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipient}
+ return encrypter, nil
+ case ECDH_ES:
+ // ECDH-ES (w/o key wrapping) is similar to DIRECT mode
+ typeOf := reflect.TypeOf(rawKey)
+ if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
+ return nil, ErrUnsupportedKeyType
+ }
+ encrypter.keyGenerator = ecKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ algID: string(enc),
+ publicKey: rawKey.(*ecdsa.PublicKey),
+ }
+ recipient, _ := newECDHRecipient(alg, rawKey.(*ecdsa.PublicKey))
+ if keyID != "" {
+ recipient.keyID = keyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipient}
+ return encrypter, nil
+ default:
+ // Can just add a standard recipient
+ encrypter.keyGenerator = randomKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ }
+ err := encrypter.AddRecipient(alg, encryptionKey)
+ return encrypter, err
+ }
+}
+
+// NewMultiEncrypter creates a multi-encrypter based on the given parameters
+func NewMultiEncrypter(enc ContentEncryption) (MultiEncrypter, error) {
+ cipher := getContentCipher(enc)
+
+ if cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ compressionAlg: NONE,
+ recipients: []recipientKeyInfo{},
+ cipher: cipher,
+ keyGenerator: randomKeyGenerator{
+ size: cipher.keySize(),
+ },
+ }
+
+ return encrypter, nil
+}
+
+func (ctx *genericEncrypter) AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) (err error) {
+ var recipient recipientKeyInfo
+
+ switch alg {
+ case DIRECT, ECDH_ES:
+ return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", alg)
+ }
+
+ recipient, err = makeJWERecipient(alg, encryptionKey)
+
+ if err == nil {
+ ctx.recipients = append(ctx.recipients, recipient)
+ }
+ return err
+}
+
+func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) {
+ switch encryptionKey := encryptionKey.(type) {
+ case *rsa.PublicKey:
+ return newRSARecipient(alg, encryptionKey)
+ case *ecdsa.PublicKey:
+ return newECDHRecipient(alg, encryptionKey)
+ case []byte:
+ return newSymmetricRecipient(alg, encryptionKey)
+ case *JsonWebKey:
+ recipient, err := makeJWERecipient(alg, encryptionKey.Key)
+ if err == nil && encryptionKey.KeyID != "" {
+ recipient.keyID = encryptionKey.KeyID
+ }
+ return recipient, err
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedKeyType
+ }
+}
+
+// newDecrypter creates an appropriate decrypter based on the key type
+func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
+ switch decryptionKey := decryptionKey.(type) {
+ case *rsa.PrivateKey:
+ return &rsaDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case *ecdsa.PrivateKey:
+ return &ecDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case []byte:
+ return &symmetricKeyCipher{
+ key: decryptionKey,
+ }, nil
+ case *JsonWebKey:
+ return newDecrypter(decryptionKey.Key)
+ default:
+ return nil, ErrUnsupportedKeyType
+ }
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JsonWebEncryption, error) {
+ return ctx.EncryptWithAuthData(plaintext, nil)
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JsonWebEncryption, error) {
+ obj := &JsonWebEncryption{}
+ obj.aad = aad
+
+ obj.protected = &rawHeader{
+ Enc: ctx.contentAlg,
+ }
+ obj.recipients = make([]recipientInfo, len(ctx.recipients))
+
+ if len(ctx.recipients) == 0 {
+ return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to")
+ }
+
+ cek, headers, err := ctx.keyGenerator.genKey()
+ if err != nil {
+ return nil, err
+ }
+
+ obj.protected.merge(&headers)
+
+ for i, info := range ctx.recipients {
+ recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ recipient.header.Alg = string(info.keyAlg)
+ if info.keyID != "" {
+ recipient.header.Kid = info.keyID
+ }
+ obj.recipients[i] = recipient
+ }
+
+ if len(ctx.recipients) == 1 {
+ // Move per-recipient headers into main protected header if there's
+ // only a single recipient.
+ obj.protected.merge(obj.recipients[0].header)
+ obj.recipients[0].header = nil
+ }
+
+ if ctx.compressionAlg != NONE {
+ plaintext, err = compress(ctx.compressionAlg, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.protected.Zip = ctx.compressionAlg
+ }
+
+ authData := obj.computeAuthData()
+ parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.iv = parts.iv
+ obj.ciphertext = parts.ciphertext
+ obj.tag = parts.tag
+
+ return obj, nil
+}
+
+// Decrypt and validate the object and return the plaintext.
+func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
+ headers := obj.mergedHeaders(nil)
+
+ if len(headers.Crit) > 0 {
+ return nil, fmt.Errorf("square/go-jose: unsupported crit header")
+ }
+
+ decrypter, err := newDecrypter(decryptionKey)
+ if err != nil {
+ return nil, err
+ }
+
+ cipher := getContentCipher(headers.Enc)
+ if cipher == nil {
+ return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.Enc))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ var plaintext []byte
+ for _, recipient := range obj.recipients {
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ if err == nil {
+ break
+ }
+ }
+ }
+
+ if plaintext == nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // The "zip" header paramter may only be present in the protected header.
+ if obj.protected.Zip != "" {
+ plaintext, err = decompress(obj.protected.Zip, plaintext)
+ }
+
+ return plaintext, err
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go
new file mode 100644
index 00000000..86b8fc0a
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go
@@ -0,0 +1,784 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "fmt"
+ "io"
+ "testing"
+)
+
+// We generate only a single RSA and EC key for testing, speeds up tests.
+var rsaTestKey, _ = rsa.GenerateKey(rand.Reader, 2048)
+
+var ecTestKey256, _ = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+var ecTestKey384, _ = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
+var ecTestKey521, _ = ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+
+func RoundtripJWE(keyAlg KeyAlgorithm, encAlg ContentEncryption, compressionAlg CompressionAlgorithm, serializer func(*JsonWebEncryption) (string, error), corrupter func(*JsonWebEncryption) bool, aad []byte, encryptionKey interface{}, decryptionKey interface{}) error {
+ enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey)
+ if err != nil {
+ return fmt.Errorf("error on new encrypter: %s", err)
+ }
+
+ enc.SetCompression(compressionAlg)
+
+ input := []byte("Lorem ipsum dolor sit amet")
+ obj, err := enc.EncryptWithAuthData(input, aad)
+ if err != nil {
+ return fmt.Errorf("error in encrypt: %s", err)
+ }
+
+ msg, err := serializer(obj)
+ if err != nil {
+ return fmt.Errorf("error in serializer: %s", err)
+ }
+
+ parsed, err := ParseEncrypted(msg)
+ if err != nil {
+ return fmt.Errorf("error in parse: %s, on msg '%s'", err, msg)
+ }
+
+ // (Maybe) mangle object
+ skip := corrupter(parsed)
+ if skip {
+ return fmt.Errorf("corrupter indicated message should be skipped")
+ }
+
+ if bytes.Compare(parsed.GetAuthData(), aad) != 0 {
+ return fmt.Errorf("auth data in parsed object does not match")
+ }
+
+ output, err := parsed.Decrypt(decryptionKey)
+ if err != nil {
+ return fmt.Errorf("error on decrypt: %s", err)
+ }
+
+ if bytes.Compare(input, output) != 0 {
+ return fmt.Errorf("Decrypted output does not match input, got '%s' but wanted '%s'", output, input)
+ }
+
+ return nil
+}
+
+func TestRoundtripsJWE(t *testing.T) {
+ // Test matrix
+ keyAlgs := []KeyAlgorithm{
+ DIRECT, ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW, A128KW, A192KW, A256KW,
+ RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW, A192GCMKW, A256GCMKW}
+ encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512}
+ zipAlgs := []CompressionAlgorithm{NONE, DEFLATE}
+
+ serializers := []func(*JsonWebEncryption) (string, error){
+ func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() },
+ func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil },
+ }
+
+ corrupter := func(obj *JsonWebEncryption) bool { return false }
+
+ // Note: can't use AAD with compact serialization
+ aads := [][]byte{
+ nil,
+ []byte("Ut enim ad minim veniam"),
+ }
+
+ // Test all different configurations
+ for _, alg := range keyAlgs {
+ for _, enc := range encAlgs {
+ for _, key := range generateTestKeys(alg, enc) {
+ for _, zip := range zipAlgs {
+ for i, serializer := range serializers {
+ err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec)
+ if err != nil {
+ t.Error(err, alg, enc, zip, i)
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func TestRoundtripsJWECorrupted(t *testing.T) {
+ // Test matrix
+ keyAlgs := []KeyAlgorithm{DIRECT, ECDH_ES, ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW}
+ encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512}
+ zipAlgs := []CompressionAlgorithm{NONE, DEFLATE}
+
+ serializers := []func(*JsonWebEncryption) (string, error){
+ func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() },
+ func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil },
+ }
+
+ bitflip := func(slice []byte) bool {
+ if len(slice) > 0 {
+ slice[0] ^= 0xFF
+ return false
+ }
+ return true
+ }
+
+ corrupters := []func(*JsonWebEncryption) bool{
+ func(obj *JsonWebEncryption) bool {
+ // Set invalid ciphertext
+ return bitflip(obj.ciphertext)
+ },
+ func(obj *JsonWebEncryption) bool {
+ // Set invalid auth tag
+ return bitflip(obj.tag)
+ },
+ func(obj *JsonWebEncryption) bool {
+ // Set invalid AAD
+ return bitflip(obj.aad)
+ },
+ func(obj *JsonWebEncryption) bool {
+ // Mess with encrypted key
+ return bitflip(obj.recipients[0].encryptedKey)
+ },
+ func(obj *JsonWebEncryption) bool {
+ // Mess with GCM-KW auth tag
+ return bitflip(obj.protected.Tag.bytes())
+ },
+ }
+
+ // Note: can't use AAD with compact serialization
+ aads := [][]byte{
+ nil,
+ []byte("Ut enim ad minim veniam"),
+ }
+
+ // Test all different configurations
+ for _, alg := range keyAlgs {
+ for _, enc := range encAlgs {
+ for _, key := range generateTestKeys(alg, enc) {
+ for _, zip := range zipAlgs {
+ for i, serializer := range serializers {
+ for j, corrupter := range corrupters {
+ err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec)
+ if err == nil {
+ t.Error("failed to detect corrupt data", err, alg, enc, zip, i, j)
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func TestEncrypterWithJWKAndKeyID(t *testing.T) {
+ enc, err := NewEncrypter(A128KW, A128GCM, &JsonWebKey{
+ KeyID: "test-id",
+ Key: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ })
+ if err != nil {
+ t.Error(err)
+ }
+
+ ciphertext, _ := enc.Encrypt([]byte("Lorem ipsum dolor sit amet"))
+
+ serialized1, _ := ciphertext.CompactSerialize()
+ serialized2 := ciphertext.FullSerialize()
+
+ parsed1, _ := ParseEncrypted(serialized1)
+ parsed2, _ := ParseEncrypted(serialized2)
+
+ if parsed1.Header.KeyID != "test-id" {
+ t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed1.Header.KeyID)
+ }
+ if parsed2.Header.KeyID != "test-id" {
+ t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed2.Header.KeyID)
+ }
+}
+
+func TestEncrypterWithBrokenRand(t *testing.T) {
+ keyAlgs := []KeyAlgorithm{ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW}
+ encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512}
+
+ serializer := func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() }
+ corrupter := func(obj *JsonWebEncryption) bool { return false }
+
+ // Break rand reader
+ readers := []func() io.Reader{
+ // Totally broken
+ func() io.Reader { return bytes.NewReader([]byte{}) },
+ // Not enough bytes
+ func() io.Reader { return io.LimitReader(rand.Reader, 20) },
+ }
+
+ defer resetRandReader()
+
+ for _, alg := range keyAlgs {
+ for _, enc := range encAlgs {
+ for _, key := range generateTestKeys(alg, enc) {
+ for i, getReader := range readers {
+ randReader = getReader()
+ err := RoundtripJWE(alg, enc, NONE, serializer, corrupter, nil, key.enc, key.dec)
+ if err == nil {
+ t.Error("encrypter should fail if rand is broken", i)
+ }
+ }
+ }
+ }
+ }
+}
+
+func TestNewEncrypterErrors(t *testing.T) {
+ _, err := NewEncrypter("XYZ", "XYZ", nil)
+ if err == nil {
+ t.Error("was able to instantiate encrypter with invalid cipher")
+ }
+
+ _, err = NewMultiEncrypter("XYZ")
+ if err == nil {
+ t.Error("was able to instantiate multi-encrypter with invalid cipher")
+ }
+
+ _, err = NewEncrypter(DIRECT, A128GCM, nil)
+ if err == nil {
+ t.Error("was able to instantiate encrypter with invalid direct key")
+ }
+
+ _, err = NewEncrypter(ECDH_ES, A128GCM, nil)
+ if err == nil {
+ t.Error("was able to instantiate encrypter with invalid EC key")
+ }
+}
+
+func TestMultiRecipientJWE(t *testing.T) {
+ enc, err := NewMultiEncrypter(A128GCM)
+ if err != nil {
+ panic(err)
+ }
+
+ err = enc.AddRecipient(RSA_OAEP, &rsaTestKey.PublicKey)
+ if err != nil {
+ t.Error("error when adding RSA recipient", err)
+ }
+
+ sharedKey := []byte{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ }
+
+ err = enc.AddRecipient(A256GCMKW, sharedKey)
+ if err != nil {
+ t.Error("error when adding AES recipient: ", err)
+ return
+ }
+
+ input := []byte("Lorem ipsum dolor sit amet")
+ obj, err := enc.Encrypt(input)
+ if err != nil {
+ t.Error("error in encrypt: ", err)
+ return
+ }
+
+ msg := obj.FullSerialize()
+
+ parsed, err := ParseEncrypted(msg)
+ if err != nil {
+ t.Error("error in parse: ", err)
+ return
+ }
+
+ output, err := parsed.Decrypt(rsaTestKey)
+ if err != nil {
+ t.Error("error on decrypt with RSA: ", err)
+ return
+ }
+
+ if bytes.Compare(input, output) != 0 {
+ t.Error("Decrypted output does not match input: ", output, input)
+ return
+ }
+
+ output, err = parsed.Decrypt(sharedKey)
+ if err != nil {
+ t.Error("error on decrypt with AES: ", err)
+ return
+ }
+
+ if bytes.Compare(input, output) != 0 {
+ t.Error("Decrypted output does not match input", output, input)
+ return
+ }
+}
+
+func TestMultiRecipientErrors(t *testing.T) {
+ enc, err := NewMultiEncrypter(A128GCM)
+ if err != nil {
+ panic(err)
+ }
+
+ input := []byte("Lorem ipsum dolor sit amet")
+ _, err = enc.Encrypt(input)
+ if err == nil {
+ t.Error("should fail when encrypting to zero recipients")
+ }
+
+ err = enc.AddRecipient(DIRECT, nil)
+ if err == nil {
+ t.Error("should reject DIRECT mode when encrypting to multiple recipients")
+ }
+
+ err = enc.AddRecipient(ECDH_ES, nil)
+ if err == nil {
+ t.Error("should reject ECDH_ES mode when encrypting to multiple recipients")
+ }
+
+ err = enc.AddRecipient(RSA1_5, nil)
+ if err == nil {
+ t.Error("should reject invalid recipient key")
+ }
+}
+
+type testKey struct {
+ enc, dec interface{}
+}
+
+func symmetricTestKey(size int) []testKey {
+ key, _, _ := randomKeyGenerator{size: size}.genKey()
+
+ return []testKey{
+ testKey{
+ enc: key,
+ dec: key,
+ },
+ testKey{
+ enc: &JsonWebKey{KeyID: "test", Key: key},
+ dec: &JsonWebKey{KeyID: "test", Key: key},
+ },
+ }
+}
+
+func generateTestKeys(keyAlg KeyAlgorithm, encAlg ContentEncryption) []testKey {
+ switch keyAlg {
+ case DIRECT:
+ return symmetricTestKey(getContentCipher(encAlg).keySize())
+ case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ return []testKey{
+ testKey{
+ dec: ecTestKey256,
+ enc: &ecTestKey256.PublicKey,
+ },
+ testKey{
+ dec: ecTestKey384,
+ enc: &ecTestKey384.PublicKey,
+ },
+ testKey{
+ dec: ecTestKey521,
+ enc: &ecTestKey521.PublicKey,
+ },
+ testKey{
+ dec: &JsonWebKey{KeyID: "test", Key: ecTestKey256},
+ enc: &JsonWebKey{KeyID: "test", Key: &ecTestKey256.PublicKey},
+ },
+ }
+ case A128GCMKW, A128KW:
+ return symmetricTestKey(16)
+ case A192GCMKW, A192KW:
+ return symmetricTestKey(24)
+ case A256GCMKW, A256KW:
+ return symmetricTestKey(32)
+ case RSA1_5, RSA_OAEP, RSA_OAEP_256:
+ return []testKey{testKey{
+ dec: rsaTestKey,
+ enc: &rsaTestKey.PublicKey,
+ }}
+ }
+
+ panic("Must update test case")
+}
+
+func RunRoundtripsJWE(b *testing.B, alg KeyAlgorithm, enc ContentEncryption, zip CompressionAlgorithm, priv, pub interface{}) {
+ serializer := func(obj *JsonWebEncryption) (string, error) {
+ return obj.CompactSerialize()
+ }
+
+ corrupter := func(obj *JsonWebEncryption) bool { return false }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err := RoundtripJWE(alg, enc, zip, serializer, corrupter, nil, pub, priv)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+}
+
+var (
+ chunks = map[string][]byte{
+ "1B": make([]byte, 1),
+ "64B": make([]byte, 64),
+ "1KB": make([]byte, 1024),
+ "64KB": make([]byte, 65536),
+ "1MB": make([]byte, 1048576),
+ "64MB": make([]byte, 67108864),
+ }
+
+ symKey, _, _ = randomKeyGenerator{size: 32}.genKey()
+
+ encrypters = map[string]Encrypter{
+ "OAEPAndGCM": mustEncrypter(RSA_OAEP, A128GCM, &rsaTestKey.PublicKey),
+ "PKCSAndGCM": mustEncrypter(RSA1_5, A128GCM, &rsaTestKey.PublicKey),
+ "OAEPAndCBC": mustEncrypter(RSA_OAEP, A128CBC_HS256, &rsaTestKey.PublicKey),
+ "PKCSAndCBC": mustEncrypter(RSA1_5, A128CBC_HS256, &rsaTestKey.PublicKey),
+ "DirectGCM128": mustEncrypter(DIRECT, A128GCM, symKey),
+ "DirectCBC128": mustEncrypter(DIRECT, A128CBC_HS256, symKey),
+ "DirectGCM256": mustEncrypter(DIRECT, A256GCM, symKey),
+ "DirectCBC256": mustEncrypter(DIRECT, A256CBC_HS512, symKey),
+ "AESKWAndGCM128": mustEncrypter(A128KW, A128GCM, symKey),
+ "AESKWAndCBC256": mustEncrypter(A256KW, A256GCM, symKey),
+ "ECDHOnP256AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey256.PublicKey),
+ "ECDHOnP384AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey384.PublicKey),
+ "ECDHOnP521AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey521.PublicKey),
+ }
+)
+
+func BenchmarkEncrypt1BWithOAEPAndGCM(b *testing.B) { benchEncrypt("1B", "OAEPAndGCM", b) }
+func BenchmarkEncrypt64BWithOAEPAndGCM(b *testing.B) { benchEncrypt("64B", "OAEPAndGCM", b) }
+func BenchmarkEncrypt1KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1KB", "OAEPAndGCM", b) }
+func BenchmarkEncrypt64KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64KB", "OAEPAndGCM", b) }
+func BenchmarkEncrypt1MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1MB", "OAEPAndGCM", b) }
+func BenchmarkEncrypt64MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64MB", "OAEPAndGCM", b) }
+
+func BenchmarkEncrypt1BWithPKCSAndGCM(b *testing.B) { benchEncrypt("1B", "PKCSAndGCM", b) }
+func BenchmarkEncrypt64BWithPKCSAndGCM(b *testing.B) { benchEncrypt("64B", "PKCSAndGCM", b) }
+func BenchmarkEncrypt1KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1KB", "PKCSAndGCM", b) }
+func BenchmarkEncrypt64KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64KB", "PKCSAndGCM", b) }
+func BenchmarkEncrypt1MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1MB", "PKCSAndGCM", b) }
+func BenchmarkEncrypt64MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64MB", "PKCSAndGCM", b) }
+
+func BenchmarkEncrypt1BWithOAEPAndCBC(b *testing.B) { benchEncrypt("1B", "OAEPAndCBC", b) }
+func BenchmarkEncrypt64BWithOAEPAndCBC(b *testing.B) { benchEncrypt("64B", "OAEPAndCBC", b) }
+func BenchmarkEncrypt1KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1KB", "OAEPAndCBC", b) }
+func BenchmarkEncrypt64KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64KB", "OAEPAndCBC", b) }
+func BenchmarkEncrypt1MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1MB", "OAEPAndCBC", b) }
+func BenchmarkEncrypt64MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64MB", "OAEPAndCBC", b) }
+
+func BenchmarkEncrypt1BWithPKCSAndCBC(b *testing.B) { benchEncrypt("1B", "PKCSAndCBC", b) }
+func BenchmarkEncrypt64BWithPKCSAndCBC(b *testing.B) { benchEncrypt("64B", "PKCSAndCBC", b) }
+func BenchmarkEncrypt1KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1KB", "PKCSAndCBC", b) }
+func BenchmarkEncrypt64KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64KB", "PKCSAndCBC", b) }
+func BenchmarkEncrypt1MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1MB", "PKCSAndCBC", b) }
+func BenchmarkEncrypt64MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64MB", "PKCSAndCBC", b) }
+
+func BenchmarkEncrypt1BWithDirectGCM128(b *testing.B) { benchEncrypt("1B", "DirectGCM128", b) }
+func BenchmarkEncrypt64BWithDirectGCM128(b *testing.B) { benchEncrypt("64B", "DirectGCM128", b) }
+func BenchmarkEncrypt1KBWithDirectGCM128(b *testing.B) { benchEncrypt("1KB", "DirectGCM128", b) }
+func BenchmarkEncrypt64KBWithDirectGCM128(b *testing.B) { benchEncrypt("64KB", "DirectGCM128", b) }
+func BenchmarkEncrypt1MBWithDirectGCM128(b *testing.B) { benchEncrypt("1MB", "DirectGCM128", b) }
+func BenchmarkEncrypt64MBWithDirectGCM128(b *testing.B) { benchEncrypt("64MB", "DirectGCM128", b) }
+
+func BenchmarkEncrypt1BWithDirectCBC128(b *testing.B) { benchEncrypt("1B", "DirectCBC128", b) }
+func BenchmarkEncrypt64BWithDirectCBC128(b *testing.B) { benchEncrypt("64B", "DirectCBC128", b) }
+func BenchmarkEncrypt1KBWithDirectCBC128(b *testing.B) { benchEncrypt("1KB", "DirectCBC128", b) }
+func BenchmarkEncrypt64KBWithDirectCBC128(b *testing.B) { benchEncrypt("64KB", "DirectCBC128", b) }
+func BenchmarkEncrypt1MBWithDirectCBC128(b *testing.B) { benchEncrypt("1MB", "DirectCBC128", b) }
+func BenchmarkEncrypt64MBWithDirectCBC128(b *testing.B) { benchEncrypt("64MB", "DirectCBC128", b) }
+
+func BenchmarkEncrypt1BWithDirectGCM256(b *testing.B) { benchEncrypt("1B", "DirectGCM256", b) }
+func BenchmarkEncrypt64BWithDirectGCM256(b *testing.B) { benchEncrypt("64B", "DirectGCM256", b) }
+func BenchmarkEncrypt1KBWithDirectGCM256(b *testing.B) { benchEncrypt("1KB", "DirectGCM256", b) }
+func BenchmarkEncrypt64KBWithDirectGCM256(b *testing.B) { benchEncrypt("64KB", "DirectGCM256", b) }
+func BenchmarkEncrypt1MBWithDirectGCM256(b *testing.B) { benchEncrypt("1MB", "DirectGCM256", b) }
+func BenchmarkEncrypt64MBWithDirectGCM256(b *testing.B) { benchEncrypt("64MB", "DirectGCM256", b) }
+
+func BenchmarkEncrypt1BWithDirectCBC256(b *testing.B) { benchEncrypt("1B", "DirectCBC256", b) }
+func BenchmarkEncrypt64BWithDirectCBC256(b *testing.B) { benchEncrypt("64B", "DirectCBC256", b) }
+func BenchmarkEncrypt1KBWithDirectCBC256(b *testing.B) { benchEncrypt("1KB", "DirectCBC256", b) }
+func BenchmarkEncrypt64KBWithDirectCBC256(b *testing.B) { benchEncrypt("64KB", "DirectCBC256", b) }
+func BenchmarkEncrypt1MBWithDirectCBC256(b *testing.B) { benchEncrypt("1MB", "DirectCBC256", b) }
+func BenchmarkEncrypt64MBWithDirectCBC256(b *testing.B) { benchEncrypt("64MB", "DirectCBC256", b) }
+
+func BenchmarkEncrypt1BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1B", "AESKWAndGCM128", b) }
+func BenchmarkEncrypt64BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64B", "AESKWAndGCM128", b) }
+func BenchmarkEncrypt1KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1KB", "AESKWAndGCM128", b) }
+func BenchmarkEncrypt64KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64KB", "AESKWAndGCM128", b) }
+func BenchmarkEncrypt1MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1MB", "AESKWAndGCM128", b) }
+func BenchmarkEncrypt64MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64MB", "AESKWAndGCM128", b) }
+
+func BenchmarkEncrypt1BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1B", "AESKWAndCBC256", b) }
+func BenchmarkEncrypt64BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64B", "AESKWAndCBC256", b) }
+func BenchmarkEncrypt1KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1KB", "AESKWAndCBC256", b) }
+func BenchmarkEncrypt64KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64KB", "AESKWAndCBC256", b) }
+func BenchmarkEncrypt1MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1MB", "AESKWAndCBC256", b) }
+func BenchmarkEncrypt64MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64MB", "AESKWAndCBC256", b) }
+
+func BenchmarkEncrypt1BWithECDHOnP256AndGCM128(b *testing.B) {
+ benchEncrypt("1B", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkEncrypt64BWithECDHOnP256AndGCM128(b *testing.B) {
+ benchEncrypt("64B", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkEncrypt1KBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchEncrypt("1KB", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkEncrypt64KBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchEncrypt("64KB", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkEncrypt1MBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchEncrypt("1MB", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkEncrypt64MBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchEncrypt("64MB", "ECDHOnP256AndGCM128", b)
+}
+
+func BenchmarkEncrypt1BWithECDHOnP384AndGCM128(b *testing.B) {
+ benchEncrypt("1B", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkEncrypt64BWithECDHOnP384AndGCM128(b *testing.B) {
+ benchEncrypt("64B", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkEncrypt1KBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchEncrypt("1KB", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkEncrypt64KBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchEncrypt("64KB", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkEncrypt1MBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchEncrypt("1MB", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkEncrypt64MBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchEncrypt("64MB", "ECDHOnP384AndGCM128", b)
+}
+
+func BenchmarkEncrypt1BWithECDHOnP521AndGCM128(b *testing.B) {
+ benchEncrypt("1B", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkEncrypt64BWithECDHOnP521AndGCM128(b *testing.B) {
+ benchEncrypt("64B", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkEncrypt1KBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchEncrypt("1KB", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkEncrypt64KBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchEncrypt("64KB", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkEncrypt1MBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchEncrypt("1MB", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkEncrypt64MBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchEncrypt("64MB", "ECDHOnP521AndGCM128", b)
+}
+
+func benchEncrypt(chunkKey, primKey string, b *testing.B) {
+ data, ok := chunks[chunkKey]
+ if !ok {
+ b.Fatalf("unknown chunk size %s", chunkKey)
+ }
+
+ enc, ok := encrypters[primKey]
+ if !ok {
+ b.Fatalf("unknown encrypter %s", primKey)
+ }
+
+ b.SetBytes(int64(len(data)))
+ for i := 0; i < b.N; i++ {
+ enc.Encrypt(data)
+ }
+}
+
+var (
+ decryptionKeys = map[string]interface{}{
+ "OAEPAndGCM": rsaTestKey,
+ "PKCSAndGCM": rsaTestKey,
+ "OAEPAndCBC": rsaTestKey,
+ "PKCSAndCBC": rsaTestKey,
+
+ "DirectGCM128": symKey,
+ "DirectCBC128": symKey,
+ "DirectGCM256": symKey,
+ "DirectCBC256": symKey,
+
+ "AESKWAndGCM128": symKey,
+ "AESKWAndCBC256": symKey,
+
+ "ECDHOnP256AndGCM128": ecTestKey256,
+ "ECDHOnP384AndGCM128": ecTestKey384,
+ "ECDHOnP521AndGCM128": ecTestKey521,
+ }
+)
+
+func BenchmarkDecrypt1BWithOAEPAndGCM(b *testing.B) { benchDecrypt("1B", "OAEPAndGCM", b) }
+func BenchmarkDecrypt64BWithOAEPAndGCM(b *testing.B) { benchDecrypt("64B", "OAEPAndGCM", b) }
+func BenchmarkDecrypt1KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1KB", "OAEPAndGCM", b) }
+func BenchmarkDecrypt64KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64KB", "OAEPAndGCM", b) }
+func BenchmarkDecrypt1MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1MB", "OAEPAndGCM", b) }
+func BenchmarkDecrypt64MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64MB", "OAEPAndGCM", b) }
+
+func BenchmarkDecrypt1BWithPKCSAndGCM(b *testing.B) { benchDecrypt("1B", "PKCSAndGCM", b) }
+func BenchmarkDecrypt64BWithPKCSAndGCM(b *testing.B) { benchDecrypt("64B", "PKCSAndGCM", b) }
+func BenchmarkDecrypt1KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1KB", "PKCSAndGCM", b) }
+func BenchmarkDecrypt64KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64KB", "PKCSAndGCM", b) }
+func BenchmarkDecrypt1MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1MB", "PKCSAndGCM", b) }
+func BenchmarkDecrypt64MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64MB", "PKCSAndGCM", b) }
+
+func BenchmarkDecrypt1BWithOAEPAndCBC(b *testing.B) { benchDecrypt("1B", "OAEPAndCBC", b) }
+func BenchmarkDecrypt64BWithOAEPAndCBC(b *testing.B) { benchDecrypt("64B", "OAEPAndCBC", b) }
+func BenchmarkDecrypt1KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1KB", "OAEPAndCBC", b) }
+func BenchmarkDecrypt64KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64KB", "OAEPAndCBC", b) }
+func BenchmarkDecrypt1MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1MB", "OAEPAndCBC", b) }
+func BenchmarkDecrypt64MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64MB", "OAEPAndCBC", b) }
+
+func BenchmarkDecrypt1BWithPKCSAndCBC(b *testing.B) { benchDecrypt("1B", "PKCSAndCBC", b) }
+func BenchmarkDecrypt64BWithPKCSAndCBC(b *testing.B) { benchDecrypt("64B", "PKCSAndCBC", b) }
+func BenchmarkDecrypt1KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1KB", "PKCSAndCBC", b) }
+func BenchmarkDecrypt64KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64KB", "PKCSAndCBC", b) }
+func BenchmarkDecrypt1MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1MB", "PKCSAndCBC", b) }
+func BenchmarkDecrypt64MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64MB", "PKCSAndCBC", b) }
+
+func BenchmarkDecrypt1BWithDirectGCM128(b *testing.B) { benchDecrypt("1B", "DirectGCM128", b) }
+func BenchmarkDecrypt64BWithDirectGCM128(b *testing.B) { benchDecrypt("64B", "DirectGCM128", b) }
+func BenchmarkDecrypt1KBWithDirectGCM128(b *testing.B) { benchDecrypt("1KB", "DirectGCM128", b) }
+func BenchmarkDecrypt64KBWithDirectGCM128(b *testing.B) { benchDecrypt("64KB", "DirectGCM128", b) }
+func BenchmarkDecrypt1MBWithDirectGCM128(b *testing.B) { benchDecrypt("1MB", "DirectGCM128", b) }
+func BenchmarkDecrypt64MBWithDirectGCM128(b *testing.B) { benchDecrypt("64MB", "DirectGCM128", b) }
+
+func BenchmarkDecrypt1BWithDirectCBC128(b *testing.B) { benchDecrypt("1B", "DirectCBC128", b) }
+func BenchmarkDecrypt64BWithDirectCBC128(b *testing.B) { benchDecrypt("64B", "DirectCBC128", b) }
+func BenchmarkDecrypt1KBWithDirectCBC128(b *testing.B) { benchDecrypt("1KB", "DirectCBC128", b) }
+func BenchmarkDecrypt64KBWithDirectCBC128(b *testing.B) { benchDecrypt("64KB", "DirectCBC128", b) }
+func BenchmarkDecrypt1MBWithDirectCBC128(b *testing.B) { benchDecrypt("1MB", "DirectCBC128", b) }
+func BenchmarkDecrypt64MBWithDirectCBC128(b *testing.B) { benchDecrypt("64MB", "DirectCBC128", b) }
+
+func BenchmarkDecrypt1BWithDirectGCM256(b *testing.B) { benchDecrypt("1B", "DirectGCM256", b) }
+func BenchmarkDecrypt64BWithDirectGCM256(b *testing.B) { benchDecrypt("64B", "DirectGCM256", b) }
+func BenchmarkDecrypt1KBWithDirectGCM256(b *testing.B) { benchDecrypt("1KB", "DirectGCM256", b) }
+func BenchmarkDecrypt64KBWithDirectGCM256(b *testing.B) { benchDecrypt("64KB", "DirectGCM256", b) }
+func BenchmarkDecrypt1MBWithDirectGCM256(b *testing.B) { benchDecrypt("1MB", "DirectGCM256", b) }
+func BenchmarkDecrypt64MBWithDirectGCM256(b *testing.B) { benchDecrypt("64MB", "DirectGCM256", b) }
+
+func BenchmarkDecrypt1BWithDirectCBC256(b *testing.B) { benchDecrypt("1B", "DirectCBC256", b) }
+func BenchmarkDecrypt64BWithDirectCBC256(b *testing.B) { benchDecrypt("64B", "DirectCBC256", b) }
+func BenchmarkDecrypt1KBWithDirectCBC256(b *testing.B) { benchDecrypt("1KB", "DirectCBC256", b) }
+func BenchmarkDecrypt64KBWithDirectCBC256(b *testing.B) { benchDecrypt("64KB", "DirectCBC256", b) }
+func BenchmarkDecrypt1MBWithDirectCBC256(b *testing.B) { benchDecrypt("1MB", "DirectCBC256", b) }
+func BenchmarkDecrypt64MBWithDirectCBC256(b *testing.B) { benchDecrypt("64MB", "DirectCBC256", b) }
+
+func BenchmarkDecrypt1BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1B", "AESKWAndGCM128", b) }
+func BenchmarkDecrypt64BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64B", "AESKWAndGCM128", b) }
+func BenchmarkDecrypt1KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1KB", "AESKWAndGCM128", b) }
+func BenchmarkDecrypt64KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64KB", "AESKWAndGCM128", b) }
+func BenchmarkDecrypt1MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1MB", "AESKWAndGCM128", b) }
+func BenchmarkDecrypt64MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64MB", "AESKWAndGCM128", b) }
+
+func BenchmarkDecrypt1BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1B", "AESKWAndCBC256", b) }
+func BenchmarkDecrypt64BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64B", "AESKWAndCBC256", b) }
+func BenchmarkDecrypt1KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1KB", "AESKWAndCBC256", b) }
+func BenchmarkDecrypt64KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64KB", "AESKWAndCBC256", b) }
+func BenchmarkDecrypt1MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1MB", "AESKWAndCBC256", b) }
+func BenchmarkDecrypt64MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64MB", "AESKWAndCBC256", b) }
+
+func BenchmarkDecrypt1BWithECDHOnP256AndGCM128(b *testing.B) {
+ benchDecrypt("1B", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkDecrypt64BWithECDHOnP256AndGCM128(b *testing.B) {
+ benchDecrypt("64B", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkDecrypt1KBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchDecrypt("1KB", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkDecrypt64KBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchDecrypt("64KB", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkDecrypt1MBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchDecrypt("1MB", "ECDHOnP256AndGCM128", b)
+}
+func BenchmarkDecrypt64MBWithECDHOnP256AndGCM128(b *testing.B) {
+ benchDecrypt("64MB", "ECDHOnP256AndGCM128", b)
+}
+
+func BenchmarkDecrypt1BWithECDHOnP384AndGCM128(b *testing.B) {
+ benchDecrypt("1B", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkDecrypt64BWithECDHOnP384AndGCM128(b *testing.B) {
+ benchDecrypt("64B", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkDecrypt1KBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchDecrypt("1KB", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkDecrypt64KBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchDecrypt("64KB", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkDecrypt1MBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchDecrypt("1MB", "ECDHOnP384AndGCM128", b)
+}
+func BenchmarkDecrypt64MBWithECDHOnP384AndGCM128(b *testing.B) {
+ benchDecrypt("64MB", "ECDHOnP384AndGCM128", b)
+}
+
+func BenchmarkDecrypt1BWithECDHOnP521AndGCM128(b *testing.B) {
+ benchDecrypt("1B", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkDecrypt64BWithECDHOnP521AndGCM128(b *testing.B) {
+ benchDecrypt("64B", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkDecrypt1KBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchDecrypt("1KB", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkDecrypt64KBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchDecrypt("64KB", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkDecrypt1MBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchDecrypt("1MB", "ECDHOnP521AndGCM128", b)
+}
+func BenchmarkDecrypt64MBWithECDHOnP521AndGCM128(b *testing.B) {
+ benchDecrypt("64MB", "ECDHOnP521AndGCM128", b)
+}
+
+func benchDecrypt(chunkKey, primKey string, b *testing.B) {
+ chunk, ok := chunks[chunkKey]
+ if !ok {
+ b.Fatalf("unknown chunk size %s", chunkKey)
+ }
+
+ enc, ok := encrypters[primKey]
+ if !ok {
+ b.Fatalf("unknown encrypter %s", primKey)
+ }
+
+ dec, ok := decryptionKeys[primKey]
+ if !ok {
+ b.Fatalf("unknown decryption key %s", primKey)
+ }
+
+ data, err := enc.Encrypt(chunk)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.SetBytes(int64(len(chunk)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ data.Decrypt(dec)
+ }
+}
+
+func mustEncrypter(keyAlg KeyAlgorithm, encAlg ContentEncryption, encryptionKey interface{}) Encrypter {
+ enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey)
+ if err != nil {
+ panic(err)
+ }
+ return enc
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go
new file mode 100644
index 00000000..b4cd1e98
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go
@@ -0,0 +1,26 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. For the moment, it mainly focuses on
+encryption and signing based on the JSON Web Encryption and JSON Web Signature
+standards. The library supports both the compact and full serialization
+formats, and has optional support for multiple recipients.
+
+*/
+package jose // import "gopkg.in/square/go-jose.v1"
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go
new file mode 100644
index 00000000..50468295
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go
@@ -0,0 +1,226 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "fmt"
+)
+
+// Dummy encrypter for use in examples
+var encrypter, _ = NewEncrypter(DIRECT, A128GCM, []byte{})
+
+func Example_jWE() {
+ // Generate a public/private key pair to use for this example. The library
+ // also provides two utility functions (LoadPublicKey and LoadPrivateKey)
+ // that can be used to load keys from PEM/DER-encoded data.
+ privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ panic(err)
+ }
+
+ // Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would
+ // indicate that the selected algorithm(s) are not currently supported.
+ publicKey := &privateKey.PublicKey
+ encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey)
+ if err != nil {
+ panic(err)
+ }
+
+ // Encrypt a sample plaintext. Calling the encrypter returns an encrypted
+ // JWE object, which can then be serialized for output afterwards. An error
+ // would indicate a problem in an underlying cryptographic primitive.
+ var plaintext = []byte("Lorem ipsum dolor sit amet")
+ object, err := encrypter.Encrypt(plaintext)
+ if err != nil {
+ panic(err)
+ }
+
+ // Serialize the encrypted object using the full serialization format.
+ // Alternatively you can also use the compact format here by calling
+ // object.CompactSerialize() instead.
+ serialized := object.FullSerialize()
+
+ // Parse the serialized, encrypted JWE object. An error would indicate that
+ // the given input did not represent a valid message.
+ object, err = ParseEncrypted(serialized)
+ if err != nil {
+ panic(err)
+ }
+
+ // Now we can decrypt and get back our original plaintext. An error here
+ // would indicate the the message failed to decrypt, e.g. because the auth
+ // tag was broken or the message was tampered with.
+ decrypted, err := object.Decrypt(privateKey)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf(string(decrypted))
+ // output: Lorem ipsum dolor sit amet
+}
+
+func Example_jWS() {
+ // Generate a public/private key pair to use for this example. The library
+ // also provides two utility functions (LoadPublicKey and LoadPrivateKey)
+ // that can be used to load keys from PEM/DER-encoded data.
+ privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ panic(err)
+ }
+
+ // Instantiate a signer using RSASSA-PSS (SHA512) with the given private key.
+ signer, err := NewSigner(PS512, privateKey)
+ if err != nil {
+ panic(err)
+ }
+
+ // Sign a sample payload. Calling the signer returns a protected JWS object,
+ // which can then be serialized for output afterwards. An error would
+ // indicate a problem in an underlying cryptographic primitive.
+ var payload = []byte("Lorem ipsum dolor sit amet")
+ object, err := signer.Sign(payload)
+ if err != nil {
+ panic(err)
+ }
+
+ // Serialize the encrypted object using the full serialization format.
+ // Alternatively you can also use the compact format here by calling
+ // object.CompactSerialize() instead.
+ serialized := object.FullSerialize()
+
+ // Parse the serialized, protected JWS object. An error would indicate that
+ // the given input did not represent a valid message.
+ object, err = ParseSigned(serialized)
+ if err != nil {
+ panic(err)
+ }
+
+ // Now we can verify the signature on the payload. An error here would
+ // indicate the the message failed to verify, e.g. because the signature was
+ // broken or the message was tampered with.
+ output, err := object.Verify(&privateKey.PublicKey)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf(string(output))
+ // output: Lorem ipsum dolor sit amet
+}
+
+func ExampleNewEncrypter_publicKey() {
+ var publicKey *rsa.PublicKey
+
+ // Instantiate an encrypter using RSA-OAEP with AES128-GCM.
+ NewEncrypter(RSA_OAEP, A128GCM, publicKey)
+
+ // Instantiate an encrypter using RSA-PKCS1v1.5 with AES128-CBC+HMAC.
+ NewEncrypter(RSA1_5, A128CBC_HS256, publicKey)
+}
+
+func ExampleNewEncrypter_symmetric() {
+ var sharedKey []byte
+
+ // Instantiate an encrypter using AES128-GCM with AES-GCM key wrap.
+ NewEncrypter(A128GCMKW, A128GCM, sharedKey)
+
+ // Instantiate an encrypter using AES256-GCM directly, w/o key wrapping.
+ NewEncrypter(DIRECT, A256GCM, sharedKey)
+}
+
+func ExampleNewSigner_publicKey() {
+ var rsaPrivateKey *rsa.PrivateKey
+ var ecdsaPrivateKey *ecdsa.PrivateKey
+
+ // Instantiate a signer using RSA-PKCS#1v1.5 with SHA-256.
+ NewSigner(RS256, rsaPrivateKey)
+
+ // Instantiate a signer using ECDSA with SHA-384.
+ NewSigner(ES384, ecdsaPrivateKey)
+}
+
+func ExampleNewSigner_symmetric() {
+ var sharedKey []byte
+
+ // Instantiate an signer using HMAC-SHA256.
+ NewSigner(HS256, sharedKey)
+
+ // Instantiate an signer using HMAC-SHA512.
+ NewSigner(HS512, sharedKey)
+}
+
+func ExampleNewMultiEncrypter() {
+ var publicKey *rsa.PublicKey
+ var sharedKey []byte
+
+ // Instantiate an encrypter using AES-GCM.
+ encrypter, err := NewMultiEncrypter(A128GCM)
+ if err != nil {
+ panic(err)
+ }
+
+ // Add a recipient using a shared key with AES-GCM key wap
+ err = encrypter.AddRecipient(A128GCMKW, sharedKey)
+ if err != nil {
+ panic(err)
+ }
+
+ // Add a recipient using an RSA public key with RSA-OAEP
+ err = encrypter.AddRecipient(RSA_OAEP, publicKey)
+ if err != nil {
+ panic(err)
+ }
+}
+
+func ExampleNewMultiSigner() {
+ var privateKey *rsa.PrivateKey
+ var sharedKey []byte
+
+ // Instantiate a signer for multiple recipients.
+ signer := NewMultiSigner()
+
+ // Add a recipient using a shared key with HMAC-SHA256
+ err := signer.AddRecipient(HS256, sharedKey)
+ if err != nil {
+ panic(err)
+ }
+
+ // Add a recipient using an RSA private key with RSASSA-PSS with SHA384
+ err = signer.AddRecipient(PS384, privateKey)
+ if err != nil {
+ panic(err)
+ }
+}
+
+func ExampleEncrypter_encrypt() {
+ // Encrypt a plaintext in order to get an encrypted JWE object.
+ var plaintext = []byte("This is a secret message")
+
+ encrypter.Encrypt(plaintext)
+}
+
+func ExampleEncrypter_encryptWithAuthData() {
+ // Encrypt a plaintext in order to get an encrypted JWE object. Also attach
+ // some additional authenticated data (AAD) to the object. Note that objects
+ // with attached AAD can only be represented using full serialization.
+ var plaintext = []byte("This is a secret message")
+ var aad = []byte("This is authenticated, but public data")
+
+ encrypter.EncryptWithAuthData(plaintext, aad)
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go
new file mode 100644
index 00000000..3e2ac0ae
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go
@@ -0,0 +1,191 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "compress/flate"
+ "encoding/base64"
+ "encoding/binary"
+ "io"
+ "math/big"
+ "regexp"
+ "strings"
+)
+
+var stripWhitespaceRegex = regexp.MustCompile("\\s")
+
+// Url-safe base64 encode that strips padding
+func base64URLEncode(data []byte) string {
+ var result = base64.URLEncoding.EncodeToString(data)
+ return strings.TrimRight(result, "=")
+}
+
+// Url-safe base64 decoder that adds padding
+func base64URLDecode(data string) ([]byte, error) {
+ var missing = (4 - len(data)%4) % 4
+ data += strings.Repeat("=", missing)
+ return base64.URLEncoding.DecodeString(data)
+}
+
+// Helper function to serialize known-good objects.
+// Precondition: value is not a nil pointer.
+func mustSerializeJSON(value interface{}) []byte {
+ out, err := MarshalJSON(value)
+ if err != nil {
+ panic(err)
+ }
+ // We never want to serialize the top-level value "null," since it's not a
+ // valid JOSE message. But if a caller passes in a nil pointer to this method,
+ // MarshalJSON will happily serialize it as the top-level value "null". If
+ // that value is then embedded in another operation, for instance by being
+ // base64-encoded and fed as input to a signing algorithm
+ // (https://github.com/square/go-jose/issues/22), the result will be
+ // incorrect. Because this method is intended for known-good objects, and a nil
+ // pointer is not a known-good object, we are free to panic in this case.
+ // Note: It's not possible to directly check whether the data pointed at by an
+ // interface is a nil pointer, so we do this hacky workaround.
+ // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
+ if string(out) == "null" {
+ panic("Tried to serialize a nil pointer.")
+ }
+ return out
+}
+
+// Strip all newlines and whitespace
+func stripWhitespace(data string) string {
+ return stripWhitespaceRegex.ReplaceAllString(data, "")
+}
+
+// Perform compression based on algorithm
+func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return deflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// Perform decompression based on algorithm
+func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return inflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// Compress with DEFLATE
+func deflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+
+ // Writing to byte buffer, err is always nil
+ writer, _ := flate.NewWriter(output, 1)
+ _, _ = io.Copy(writer, bytes.NewBuffer(input))
+
+ err := writer.Close()
+ return output.Bytes(), err
+}
+
+// Decompress with DEFLATE
+func inflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+ reader := flate.NewReader(bytes.NewBuffer(input))
+
+ _, err := io.Copy(output, reader)
+ if err != nil {
+ return nil, err
+ }
+
+ err = reader.Close()
+ return output.Bytes(), err
+}
+
+// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
+type byteBuffer struct {
+ data []byte
+}
+
+func newBuffer(data []byte) *byteBuffer {
+ if data == nil {
+ return nil
+ }
+ return &byteBuffer{
+ data: data,
+ }
+}
+
+func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
+ if len(data) > length {
+ panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
+ }
+ pad := make([]byte, length-len(data))
+ return newBuffer(append(pad, data...))
+}
+
+func newBufferFromInt(num uint64) *byteBuffer {
+ data := make([]byte, 8)
+ binary.BigEndian.PutUint64(data, num)
+ return newBuffer(bytes.TrimLeft(data, "\x00"))
+}
+
+func (b *byteBuffer) MarshalJSON() ([]byte, error) {
+ return MarshalJSON(b.base64())
+}
+
+func (b *byteBuffer) UnmarshalJSON(data []byte) error {
+ var encoded string
+ err := UnmarshalJSON(data, &encoded)
+ if err != nil {
+ return err
+ }
+
+ if encoded == "" {
+ return nil
+ }
+
+ decoded, err := base64URLDecode(encoded)
+ if err != nil {
+ return err
+ }
+
+ *b = *newBuffer(decoded)
+
+ return nil
+}
+
+func (b *byteBuffer) base64() string {
+ return base64URLEncode(b.data)
+}
+
+func (b *byteBuffer) bytes() []byte {
+ // Handling nil here allows us to transparently handle nil slices when serializing.
+ if b == nil {
+ return nil
+ }
+ return b.data
+}
+
+func (b byteBuffer) bigInt() *big.Int {
+ return new(big.Int).SetBytes(b.data)
+}
+
+func (b byteBuffer) toInt() int {
+ return int(b.bigInt().Int64())
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go
new file mode 100644
index 00000000..e2f8d979
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go
@@ -0,0 +1,173 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+func TestBase64URLEncode(t *testing.T) {
+ // Test arrays with various sizes
+ if base64URLEncode([]byte{}) != "" {
+ t.Error("failed to encode empty array")
+ }
+
+ if base64URLEncode([]byte{0}) != "AA" {
+ t.Error("failed to encode [0x00]")
+ }
+
+ if base64URLEncode([]byte{0, 1}) != "AAE" {
+ t.Error("failed to encode [0x00, 0x01]")
+ }
+
+ if base64URLEncode([]byte{0, 1, 2}) != "AAEC" {
+ t.Error("failed to encode [0x00, 0x01, 0x02]")
+ }
+
+ if base64URLEncode([]byte{0, 1, 2, 3}) != "AAECAw" {
+ t.Error("failed to encode [0x00, 0x01, 0x02, 0x03]")
+ }
+}
+
+func TestBase64URLDecode(t *testing.T) {
+ // Test arrays with various sizes
+ val, err := base64URLDecode("")
+ if err != nil || !bytes.Equal(val, []byte{}) {
+ t.Error("failed to decode empty array")
+ }
+
+ val, err = base64URLDecode("AA")
+ if err != nil || !bytes.Equal(val, []byte{0}) {
+ t.Error("failed to decode [0x00]")
+ }
+
+ val, err = base64URLDecode("AAE")
+ if err != nil || !bytes.Equal(val, []byte{0, 1}) {
+ t.Error("failed to decode [0x00, 0x01]")
+ }
+
+ val, err = base64URLDecode("AAEC")
+ if err != nil || !bytes.Equal(val, []byte{0, 1, 2}) {
+ t.Error("failed to decode [0x00, 0x01, 0x02]")
+ }
+
+ val, err = base64URLDecode("AAECAw")
+ if err != nil || !bytes.Equal(val, []byte{0, 1, 2, 3}) {
+ t.Error("failed to decode [0x00, 0x01, 0x02, 0x03]")
+ }
+}
+
+func TestDeflateRoundtrip(t *testing.T) {
+ original := []byte("Lorem ipsum dolor sit amet")
+
+ compressed, err := deflate(original)
+ if err != nil {
+ panic(err)
+ }
+
+ output, err := inflate(compressed)
+ if err != nil {
+ panic(err)
+ }
+
+ if bytes.Compare(output, original) != 0 {
+ t.Error("Input and output do not match")
+ }
+}
+
+func TestInvalidCompression(t *testing.T) {
+ _, err := compress("XYZ", []byte{})
+ if err == nil {
+ t.Error("should not accept invalid algorithm")
+ }
+
+ _, err = decompress("XYZ", []byte{})
+ if err == nil {
+ t.Error("should not accept invalid algorithm")
+ }
+
+ _, err = decompress(DEFLATE, []byte{1, 2, 3, 4})
+ if err == nil {
+ t.Error("should not accept invalid data")
+ }
+}
+
+func TestByteBufferTrim(t *testing.T) {
+ buf := newBufferFromInt(1)
+ if !bytes.Equal(buf.data, []byte{1}) {
+ t.Error("Byte buffer for integer '1' should contain [0x01]")
+ }
+
+ buf = newBufferFromInt(65537)
+ if !bytes.Equal(buf.data, []byte{1, 0, 1}) {
+ t.Error("Byte buffer for integer '65537' should contain [0x01, 0x00, 0x01]")
+ }
+}
+
+func TestFixedSizeBuffer(t *testing.T) {
+ data0 := []byte{}
+ data1 := []byte{1}
+ data2 := []byte{1, 2}
+ data3 := []byte{1, 2, 3}
+ data4 := []byte{1, 2, 3, 4}
+
+ buf0 := newFixedSizeBuffer(data0, 4)
+ buf1 := newFixedSizeBuffer(data1, 4)
+ buf2 := newFixedSizeBuffer(data2, 4)
+ buf3 := newFixedSizeBuffer(data3, 4)
+ buf4 := newFixedSizeBuffer(data4, 4)
+
+ if !bytes.Equal(buf0.data, []byte{0, 0, 0, 0}) {
+ t.Error("Invalid padded buffer for buf0")
+ }
+ if !bytes.Equal(buf1.data, []byte{0, 0, 0, 1}) {
+ t.Error("Invalid padded buffer for buf1")
+ }
+ if !bytes.Equal(buf2.data, []byte{0, 0, 1, 2}) {
+ t.Error("Invalid padded buffer for buf2")
+ }
+ if !bytes.Equal(buf3.data, []byte{0, 1, 2, 3}) {
+ t.Error("Invalid padded buffer for buf3")
+ }
+ if !bytes.Equal(buf4.data, []byte{1, 2, 3, 4}) {
+ t.Error("Invalid padded buffer for buf4")
+ }
+}
+
+func TestSerializeJSONRejectsNil(t *testing.T) {
+ defer func() {
+ r := recover()
+ if r == nil || !strings.Contains(r.(string), "nil pointer") {
+ t.Error("serialize function should not accept nil pointer")
+ }
+ }()
+
+ mustSerializeJSON(nil)
+}
+
+func TestFixedSizeBufferTooLarge(t *testing.T) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ t.Error("should not be able to create fixed size buffer with oversized data")
+ }
+ }()
+
+ newFixedSizeBuffer(make([]byte, 2), 1)
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE
new file mode 100644
index 00000000..74487567
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md
new file mode 100644
index 00000000..86de5e55
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md
@@ -0,0 +1,13 @@
+# Safe JSON
+
+This repository contains a fork of the `encoding/json` package from Go 1.6.
+
+The following changes were made:
+
+* Object deserialization uses case-sensitive member name matching instead of
+ [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html).
+ This is to avoid differences in the interpretation of JOSE messages between
+ go-jose and libraries written in other languages.
+* When deserializing a JSON object, we check for duplicate keys and reject the
+ input whenever we detect a duplicate. Rather than trying to work with malformed
+ data, we prefer to reject it right away.
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go
new file mode 100644
index 00000000..ed89d115
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go
@@ -0,0 +1,223 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Large data benchmark.
+// The JSON data is a summary of agl's changes in the
+// go, webkit, and chromium open source projects.
+// We benchmark converting between the JSON form
+// and in-memory data structures.
+
+package json
+
+import (
+ "bytes"
+ "compress/gzip"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+)
+
+type codeResponse struct {
+ Tree *codeNode `json:"tree"`
+ Username string `json:"username"`
+}
+
+type codeNode struct {
+ Name string `json:"name"`
+ Kids []*codeNode `json:"kids"`
+ CLWeight float64 `json:"cl_weight"`
+ Touches int `json:"touches"`
+ MinT int64 `json:"min_t"`
+ MaxT int64 `json:"max_t"`
+ MeanT int64 `json:"mean_t"`
+}
+
+var codeJSON []byte
+var codeStruct codeResponse
+
+func codeInit() {
+ f, err := os.Open("testdata/code.json.gz")
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+ gz, err := gzip.NewReader(f)
+ if err != nil {
+ panic(err)
+ }
+ data, err := ioutil.ReadAll(gz)
+ if err != nil {
+ panic(err)
+ }
+
+ codeJSON = data
+
+ if err := Unmarshal(codeJSON, &codeStruct); err != nil {
+ panic("unmarshal code.json: " + err.Error())
+ }
+
+ if data, err = Marshal(&codeStruct); err != nil {
+ panic("marshal code.json: " + err.Error())
+ }
+
+ if !bytes.Equal(data, codeJSON) {
+ println("different lengths", len(data), len(codeJSON))
+ for i := 0; i < len(data) && i < len(codeJSON); i++ {
+ if data[i] != codeJSON[i] {
+ println("re-marshal: changed at byte", i)
+ println("orig: ", string(codeJSON[i-10:i+10]))
+ println("new: ", string(data[i-10:i+10]))
+ break
+ }
+ }
+ panic("re-marshal code.json: different result")
+ }
+}
+
+func BenchmarkCodeEncoder(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ enc := NewEncoder(ioutil.Discard)
+ for i := 0; i < b.N; i++ {
+ if err := enc.Encode(&codeStruct); err != nil {
+ b.Fatal("Encode:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkCodeMarshal(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ for i := 0; i < b.N; i++ {
+ if _, err := Marshal(&codeStruct); err != nil {
+ b.Fatal("Marshal:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkCodeDecoder(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ var buf bytes.Buffer
+ dec := NewDecoder(&buf)
+ var r codeResponse
+ for i := 0; i < b.N; i++ {
+ buf.Write(codeJSON)
+ // hide EOF
+ buf.WriteByte('\n')
+ buf.WriteByte('\n')
+ buf.WriteByte('\n')
+ if err := dec.Decode(&r); err != nil {
+ b.Fatal("Decode:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkDecoderStream(b *testing.B) {
+ b.StopTimer()
+ var buf bytes.Buffer
+ dec := NewDecoder(&buf)
+ buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n")
+ var x interface{}
+ if err := dec.Decode(&x); err != nil {
+ b.Fatal("Decode:", err)
+ }
+ ones := strings.Repeat(" 1\n", 300000) + "\n\n\n"
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ if i%300000 == 0 {
+ buf.WriteString(ones)
+ }
+ x = nil
+ if err := dec.Decode(&x); err != nil || x != 1.0 {
+ b.Fatalf("Decode: %v after %d", err, i)
+ }
+ }
+}
+
+func BenchmarkCodeUnmarshal(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ for i := 0; i < b.N; i++ {
+ var r codeResponse
+ if err := Unmarshal(codeJSON, &r); err != nil {
+ b.Fatal("Unmmarshal:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkCodeUnmarshalReuse(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ var r codeResponse
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(codeJSON, &r); err != nil {
+ b.Fatal("Unmmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkUnmarshalString(b *testing.B) {
+ data := []byte(`"hello, world"`)
+ var s string
+
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(data, &s); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkUnmarshalFloat64(b *testing.B) {
+ var f float64
+ data := []byte(`3.14`)
+
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(data, &f); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkUnmarshalInt64(b *testing.B) {
+ var x int64
+ data := []byte(`3`)
+
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(data, &x); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkIssue10335(b *testing.B) {
+ b.ReportAllocs()
+ var s struct{}
+ j := []byte(`{"a":{ }}`)
+ for n := 0; n < b.N; n++ {
+ if err := Unmarshal(j, &s); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go
new file mode 100644
index 00000000..37457e5a
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go
@@ -0,0 +1,1183 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+// Unmarshal will only set exported fields of the struct.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a string-keyed map, Unmarshal first
+// establishes a map to use, If the map is nil, Unmarshal allocates a new map.
+// Otherwise Unmarshal reuses the existing map, keeping existing entries.
+// Unmarshal then stores key-value pairs from the JSON object into the map.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by objects
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ d.value(rv)
+ return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+ // This function implements the JSON numbers grammar.
+ // See https://tools.ietf.org/html/rfc7159#section-6
+ // and http://json.org/number.gif
+
+ if s == "" {
+ return false
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+
+ // Digits
+ switch {
+ default:
+ return false
+
+ case s[0] == '0':
+ s = s[1:]
+
+ case '1' <= s[0] && s[0] <= '9':
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // . followed by 1 or more digits.
+ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // e or E followed by an optional - or + and
+ // 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ s = s[1:]
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // Make sure we are at the end.
+ return s == ""
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // read offset in data
+ scan scanner
+ nextscan scanner // for calls to nextValue
+ savedError error
+ useNumber bool
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+ panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = err
+ }
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+ c := d.data[d.off]
+ item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // Our scanner has seen the opening brace/bracket
+ // and thinks we're still in the middle of the object.
+ // invent a closing brace/bracket to get it out.
+ if c == '{' {
+ d.scan.step(&d.scan, '}')
+ } else {
+ d.scan.step(&d.scan, ']')
+ }
+
+ return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+ var newOp int
+ for {
+ if d.off >= len(d.data) {
+ newOp = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ } else {
+ c := d.data[d.off]
+ d.off++
+ newOp = d.scan.step(&d.scan, c)
+ }
+ if newOp != op {
+ break
+ }
+ }
+ return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+ if !v.IsValid() {
+ _, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // d.scan thinks we're still at the beginning of the item.
+ // Feed in an empty string - the shortest, simplest value -
+ // so that it knows we got to the end of the value.
+ if d.scan.redo {
+ // rewind.
+ d.scan.redo = false
+ d.scan.step = stateBeginValue
+ }
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+
+ n := len(d.scan.parseState)
+ if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+ // d.scan thinks we just read an object key; finish the object
+ d.scan.step(&d.scan, ':')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '}')
+ }
+
+ return
+ }
+
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(v)
+
+ case scanBeginObject:
+ d.object(v)
+
+ case scanBeginLiteral:
+ d.literal(v)
+ }
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(reflect.Value{})
+
+ case scanBeginObject:
+ d.object(reflect.Value{})
+
+ case scanBeginLiteral:
+ switch v := d.literalInterface().(type) {
+ case nil, string:
+ return v
+ }
+ }
+ return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ }
+
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ v.Set(reflect.ValueOf(d.arrayInterface()))
+ return
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ case reflect.Array:
+ case reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ d.value(v.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.value(reflect.Value{})
+ }
+ i++
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+}
+
+var nullLiteral = []byte("null")
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(d.objectInterface()))
+ return
+ }
+
+ // Check type of target: struct or map[string]T
+ switch v.Kind() {
+ case reflect.Map:
+ // map must have string kind
+ t := v.Type()
+ if t.Key().Kind() != reflect.String {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ var mapElem reflect.Value
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, []byte(key)) {
+ f = ff
+ break
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ d.literalStore(nullLiteral, subv, false)
+ case string:
+ d.literalStore([]byte(qv), subv, true)
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kv := reflect.ValueOf(key).Convert(v.Type().Key())
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64 or a Number
+// depending on the setting of d.useNumber.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+ if d.useNumber {
+ return Number(s), nil
+ }
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+ return f, nil
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ //Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return
+ }
+ wantptr := item[0] == 'n' // null
+ u, ut, pv := d.indirect(v, wantptr)
+ if u != nil {
+ err := u.UnmarshalJSON(item)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ return
+ }
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ err := ut.UnmarshalText(s)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case 't', 'f': // true, false
+ value := c == 't'
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ }
+
+ case '"': // string
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ case reflect.Slice:
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.SetBytes(b[:n])
+ case reflect.String:
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ v.SetString(s)
+ if !isValidNumber(s) {
+ d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item))
+ }
+ break
+ }
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ }
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+ switch d.scanWhile(scanSkipSpace) {
+ default:
+ d.error(errPhase)
+ panic("unreachable")
+ case scanBeginArray:
+ return d.arrayInterface()
+ case scanBeginObject:
+ return d.objectInterface()
+ case scanBeginLiteral:
+ return d.literalInterface()
+ }
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]interface{} {
+ m := make(map[string]interface{})
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+ item := d.data[start:d.off]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ d.error(errPhase)
+ }
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+ }
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go
new file mode 100644
index 00000000..7577b21a
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go
@@ -0,0 +1,1474 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "image"
+ "net"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+type T struct {
+ X string
+ Y int
+ Z int `json:"-"`
+}
+
+type U struct {
+ Alphabet string `json:"alpha"`
+}
+
+type V struct {
+ F1 interface{}
+ F2 int32
+ F3 Number
+}
+
+// ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and
+// without UseNumber
+var ifaceNumAsFloat64 = map[string]interface{}{
+ "k1": float64(1),
+ "k2": "s",
+ "k3": []interface{}{float64(1), float64(2.0), float64(3e-3)},
+ "k4": map[string]interface{}{"kk1": "s", "kk2": float64(2)},
+}
+
+var ifaceNumAsNumber = map[string]interface{}{
+ "k1": Number("1"),
+ "k2": "s",
+ "k3": []interface{}{Number("1"), Number("2.0"), Number("3e-3")},
+ "k4": map[string]interface{}{"kk1": "s", "kk2": Number("2")},
+}
+
+type tx struct {
+ x int
+}
+
+// A type that can unmarshal itself.
+
+type unmarshaler struct {
+ T bool
+}
+
+func (u *unmarshaler) UnmarshalJSON(b []byte) error {
+ *u = unmarshaler{true} // All we need to see that UnmarshalJSON is called.
+ return nil
+}
+
+type ustruct struct {
+ M unmarshaler
+}
+
+type unmarshalerText struct {
+ T bool
+}
+
+// needed for re-marshaling tests
+func (u *unmarshalerText) MarshalText() ([]byte, error) {
+ return []byte(""), nil
+}
+
+func (u *unmarshalerText) UnmarshalText(b []byte) error {
+ *u = unmarshalerText{true} // All we need to see that UnmarshalText is called.
+ return nil
+}
+
+var _ encoding.TextUnmarshaler = (*unmarshalerText)(nil)
+
+type ustructText struct {
+ M unmarshalerText
+}
+
+var (
+ um0, um1 unmarshaler // target2 of unmarshaling
+ ump = &um1
+ umtrue = unmarshaler{true}
+ umslice = []unmarshaler{{true}}
+ umslicep = new([]unmarshaler)
+ umstruct = ustruct{unmarshaler{true}}
+
+ um0T, um1T unmarshalerText // target2 of unmarshaling
+ umpT = &um1T
+ umtrueT = unmarshalerText{true}
+ umsliceT = []unmarshalerText{{true}}
+ umslicepT = new([]unmarshalerText)
+ umstructT = ustructText{unmarshalerText{true}}
+)
+
+// Test data structures for anonymous fields.
+
+type Point struct {
+ Z int
+}
+
+type Top struct {
+ Level0 int
+ Embed0
+ *Embed0a
+ *Embed0b `json:"e,omitempty"` // treated as named
+ Embed0c `json:"-"` // ignored
+ Loop
+ Embed0p // has Point with X, Y, used
+ Embed0q // has Point with Z, used
+ embed // contains exported field
+}
+
+type Embed0 struct {
+ Level1a int // overridden by Embed0a's Level1a with json tag
+ Level1b int // used because Embed0a's Level1b is renamed
+ Level1c int // used because Embed0a's Level1c is ignored
+ Level1d int // annihilated by Embed0a's Level1d
+ Level1e int `json:"x"` // annihilated by Embed0a.Level1e
+}
+
+type Embed0a struct {
+ Level1a int `json:"Level1a,omitempty"`
+ Level1b int `json:"LEVEL1B,omitempty"`
+ Level1c int `json:"-"`
+ Level1d int // annihilated by Embed0's Level1d
+ Level1f int `json:"x"` // annihilated by Embed0's Level1e
+}
+
+type Embed0b Embed0
+
+type Embed0c Embed0
+
+type Embed0p struct {
+ image.Point
+}
+
+type Embed0q struct {
+ Point
+}
+
+type embed struct {
+ Q int
+}
+
+type Loop struct {
+ Loop1 int `json:",omitempty"`
+ Loop2 int `json:",omitempty"`
+ *Loop
+}
+
+// From reflect test:
+// The X in S6 and S7 annihilate, but they also block the X in S8.S9.
+type S5 struct {
+ S6
+ S7
+ S8
+}
+
+type S6 struct {
+ X int
+}
+
+type S7 S6
+
+type S8 struct {
+ S9
+}
+
+type S9 struct {
+ X int
+ Y int
+}
+
+// From reflect test:
+// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9.
+type S10 struct {
+ S11
+ S12
+ S13
+}
+
+type S11 struct {
+ S6
+}
+
+type S12 struct {
+ S6
+}
+
+type S13 struct {
+ S8
+}
+
+type unmarshalTest struct {
+ in string
+ ptr interface{}
+ out interface{}
+ err error
+ useNumber bool
+}
+
+type XYZ struct {
+ X interface{}
+ Y interface{}
+ Z interface{}
+}
+
+func sliceAddr(x []int) *[]int { return &x }
+func mapAddr(x map[string]int) *map[string]int { return &x }
+
+var unmarshalTests = []unmarshalTest{
+ // basic types
+ {in: `true`, ptr: new(bool), out: true},
+ {in: `1`, ptr: new(int), out: 1},
+ {in: `1.2`, ptr: new(float64), out: 1.2},
+ {in: `-5`, ptr: new(int16), out: int16(-5)},
+ {in: `2`, ptr: new(Number), out: Number("2"), useNumber: true},
+ {in: `2`, ptr: new(Number), out: Number("2")},
+ {in: `2`, ptr: new(interface{}), out: float64(2.0)},
+ {in: `2`, ptr: new(interface{}), out: Number("2"), useNumber: true},
+ {in: `"a\u1234"`, ptr: new(string), out: "a\u1234"},
+ {in: `"http:\/\/"`, ptr: new(string), out: "http://"},
+ {in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"},
+ {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"},
+ {in: "null", ptr: new(interface{}), out: nil},
+ {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7}},
+ {in: `{"x": 1}`, ptr: new(tx), out: tx{}},
+ {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}},
+ {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true},
+ {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsFloat64},
+ {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsNumber, useNumber: true},
+
+ // raw values with whitespace
+ {in: "\n true ", ptr: new(bool), out: true},
+ {in: "\t 1 ", ptr: new(int), out: 1},
+ {in: "\r 1.2 ", ptr: new(float64), out: 1.2},
+ {in: "\t -5 \n", ptr: new(int16), out: int16(-5)},
+ {in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"},
+
+ // Z has a "-" tag.
+ {in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}},
+
+ {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}},
+ {in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}},
+ {in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}},
+
+ // syntax errors
+ {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}},
+ {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}},
+ {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true},
+
+ // raw value errors
+ {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}},
+ {in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}},
+ {in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}},
+ {in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}},
+
+ // array tests
+ {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}},
+ {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}},
+ {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}},
+
+ // empty array to interface test
+ {in: `[]`, ptr: new([]interface{}), out: []interface{}{}},
+ {in: `null`, ptr: new([]interface{}), out: []interface{}(nil)},
+ {in: `{"T":[]}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}},
+ {in: `{"T":null}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": interface{}(nil)}},
+
+ // composite tests
+ {in: allValueIndent, ptr: new(All), out: allValue},
+ {in: allValueCompact, ptr: new(All), out: allValue},
+ {in: allValueIndent, ptr: new(*All), out: &allValue},
+ {in: allValueCompact, ptr: new(*All), out: &allValue},
+ {in: pallValueIndent, ptr: new(All), out: pallValue},
+ {in: pallValueCompact, ptr: new(All), out: pallValue},
+ {in: pallValueIndent, ptr: new(*All), out: &pallValue},
+ {in: pallValueCompact, ptr: new(*All), out: &pallValue},
+
+ // unmarshal interface test
+ {in: `{"T":false}`, ptr: &um0, out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called
+ {in: `{"T":false}`, ptr: &ump, out: &umtrue},
+ {in: `[{"T":false}]`, ptr: &umslice, out: umslice},
+ {in: `[{"T":false}]`, ptr: &umslicep, out: &umslice},
+ {in: `{"M":{"T":false}}`, ptr: &umstruct, out: umstruct},
+
+ // UnmarshalText interface test
+ {in: `"X"`, ptr: &um0T, out: umtrueT}, // use "false" so test will fail if custom unmarshaler is not called
+ {in: `"X"`, ptr: &umpT, out: &umtrueT},
+ {in: `["X"]`, ptr: &umsliceT, out: umsliceT},
+ {in: `["X"]`, ptr: &umslicepT, out: &umsliceT},
+ {in: `{"M":"X"}`, ptr: &umstructT, out: umstructT},
+
+ // Overwriting of data.
+ // This is different from package xml, but it's what we've always done.
+ // Now documented and tested.
+ {in: `[2]`, ptr: sliceAddr([]int{1}), out: []int{2}},
+ {in: `{"key": 2}`, ptr: mapAddr(map[string]int{"old": 0, "key": 1}), out: map[string]int{"key": 2}},
+
+ {
+ in: `{
+ "Level0": 1,
+ "Level1b": 2,
+ "Level1c": 3,
+ "x": 4,
+ "Level1a": 5,
+ "LEVEL1B": 6,
+ "e": {
+ "Level1a": 8,
+ "Level1b": 9,
+ "Level1c": 10,
+ "Level1d": 11,
+ "x": 12
+ },
+ "Loop1": 13,
+ "Loop2": 14,
+ "X": 15,
+ "Y": 16,
+ "Z": 17,
+ "Q": 18
+ }`,
+ ptr: new(Top),
+ out: Top{
+ Level0: 1,
+ Embed0: Embed0{
+ Level1b: 2,
+ Level1c: 3,
+ },
+ Embed0a: &Embed0a{
+ Level1a: 5,
+ Level1b: 6,
+ },
+ Embed0b: &Embed0b{
+ Level1a: 8,
+ Level1b: 9,
+ Level1c: 10,
+ Level1d: 11,
+ Level1e: 12,
+ },
+ Loop: Loop{
+ Loop1: 13,
+ Loop2: 14,
+ },
+ Embed0p: Embed0p{
+ Point: image.Point{X: 15, Y: 16},
+ },
+ Embed0q: Embed0q{
+ Point: Point{Z: 17},
+ },
+ embed: embed{
+ Q: 18,
+ },
+ },
+ },
+ {
+ in: `{"X": 1,"Y":2}`,
+ ptr: new(S5),
+ out: S5{S8: S8{S9: S9{Y: 2}}},
+ },
+ {
+ in: `{"X": 1,"Y":2}`,
+ ptr: new(S10),
+ out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}},
+ },
+
+ // invalid UTF-8 is coerced to valid UTF-8.
+ {
+ in: "\"hello\xffworld\"",
+ ptr: new(string),
+ out: "hello\ufffdworld",
+ },
+ {
+ in: "\"hello\xc2\xc2world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\xc2\xffworld\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\\ud800world\"",
+ ptr: new(string),
+ out: "hello\ufffdworld",
+ },
+ {
+ in: "\"hello\\ud800\\ud800world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\\ud800\\ud800world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\xed\xa0\x80\xed\xb0\x80world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld",
+ },
+
+ // issue 8305
+ {
+ in: `{"2009-11-10T23:00:00Z": "hello world"}`,
+ ptr: &map[time.Time]string{},
+ err: &UnmarshalTypeError{"object", reflect.TypeOf(map[time.Time]string{}), 1},
+ },
+}
+
+func TestMarshal(t *testing.T) {
+ b, err := Marshal(allValue)
+ if err != nil {
+ t.Fatalf("Marshal allValue: %v", err)
+ }
+ if string(b) != allValueCompact {
+ t.Errorf("Marshal allValueCompact")
+ diff(t, b, []byte(allValueCompact))
+ return
+ }
+
+ b, err = Marshal(pallValue)
+ if err != nil {
+ t.Fatalf("Marshal pallValue: %v", err)
+ }
+ if string(b) != pallValueCompact {
+ t.Errorf("Marshal pallValueCompact")
+ diff(t, b, []byte(pallValueCompact))
+ return
+ }
+}
+
+var badUTF8 = []struct {
+ in, out string
+}{
+ {"hello\xffworld", `"hello\ufffdworld"`},
+ {"", `""`},
+ {"\xff", `"\ufffd"`},
+ {"\xff\xff", `"\ufffd\ufffd"`},
+ {"a\xffb", `"a\ufffdb"`},
+ {"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`},
+}
+
+func TestMarshalBadUTF8(t *testing.T) {
+ for _, tt := range badUTF8 {
+ b, err := Marshal(tt.in)
+ if string(b) != tt.out || err != nil {
+ t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out)
+ }
+ }
+}
+
+func TestMarshalNumberZeroVal(t *testing.T) {
+ var n Number
+ out, err := Marshal(n)
+ if err != nil {
+ t.Fatal(err)
+ }
+ outStr := string(out)
+ if outStr != "0" {
+ t.Fatalf("Invalid zero val for Number: %q", outStr)
+ }
+}
+
+func TestMarshalEmbeds(t *testing.T) {
+ top := &Top{
+ Level0: 1,
+ Embed0: Embed0{
+ Level1b: 2,
+ Level1c: 3,
+ },
+ Embed0a: &Embed0a{
+ Level1a: 5,
+ Level1b: 6,
+ },
+ Embed0b: &Embed0b{
+ Level1a: 8,
+ Level1b: 9,
+ Level1c: 10,
+ Level1d: 11,
+ Level1e: 12,
+ },
+ Loop: Loop{
+ Loop1: 13,
+ Loop2: 14,
+ },
+ Embed0p: Embed0p{
+ Point: image.Point{X: 15, Y: 16},
+ },
+ Embed0q: Embed0q{
+ Point: Point{Z: 17},
+ },
+ embed: embed{
+ Q: 18,
+ },
+ }
+ b, err := Marshal(top)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17,\"Q\":18}"
+ if string(b) != want {
+ t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want)
+ }
+}
+
+func TestUnmarshal(t *testing.T) {
+ for i, tt := range unmarshalTests {
+ var scan scanner
+ in := []byte(tt.in)
+ if err := checkValid(in, &scan); err != nil {
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: checkValid: %#v", i, err)
+ continue
+ }
+ }
+ if tt.ptr == nil {
+ continue
+ }
+
+ // v = new(right-type)
+ v := reflect.New(reflect.TypeOf(tt.ptr).Elem())
+ dec := NewDecoder(bytes.NewReader(in))
+ if tt.useNumber {
+ dec.UseNumber()
+ }
+ if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: %v, want %v", i, err, tt.err)
+ continue
+ } else if err != nil {
+ continue
+ }
+ if !reflect.DeepEqual(v.Elem().Interface(), tt.out) {
+ t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out)
+ data, _ := Marshal(v.Elem().Interface())
+ println(string(data))
+ data, _ = Marshal(tt.out)
+ println(string(data))
+ continue
+ }
+
+ // Check round trip.
+ if tt.err == nil {
+ enc, err := Marshal(v.Interface())
+ if err != nil {
+ t.Errorf("#%d: error re-marshaling: %v", i, err)
+ continue
+ }
+ vv := reflect.New(reflect.TypeOf(tt.ptr).Elem())
+ dec = NewDecoder(bytes.NewReader(enc))
+ if tt.useNumber {
+ dec.UseNumber()
+ }
+ if err := dec.Decode(vv.Interface()); err != nil {
+ t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err)
+ continue
+ }
+ if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) {
+ t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface())
+ t.Errorf(" In: %q", strings.Map(noSpace, string(in)))
+ t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc)))
+ continue
+ }
+ }
+ }
+}
+
+func TestUnmarshalMarshal(t *testing.T) {
+ initBig()
+ var v interface{}
+ if err := Unmarshal(jsonBig, &v); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if !bytes.Equal(jsonBig, b) {
+ t.Errorf("Marshal jsonBig")
+ diff(t, b, jsonBig)
+ return
+ }
+}
+
+var numberTests = []struct {
+ in string
+ i int64
+ intErr string
+ f float64
+ floatErr string
+}{
+ {in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1},
+ {in: "-12", i: -12, f: -12.0},
+ {in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"},
+}
+
+// Independent of Decode, basic coverage of the accessors in Number
+func TestNumberAccessors(t *testing.T) {
+ for _, tt := range numberTests {
+ n := Number(tt.in)
+ if s := n.String(); s != tt.in {
+ t.Errorf("Number(%q).String() is %q", tt.in, s)
+ }
+ if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i {
+ t.Errorf("Number(%q).Int64() is %d", tt.in, i)
+ } else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) {
+ t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err)
+ }
+ if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f {
+ t.Errorf("Number(%q).Float64() is %g", tt.in, f)
+ } else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) {
+ t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err)
+ }
+ }
+}
+
+func TestLargeByteSlice(t *testing.T) {
+ s0 := make([]byte, 2000)
+ for i := range s0 {
+ s0[i] = byte(i)
+ }
+ b, err := Marshal(s0)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ var s1 []byte
+ if err := Unmarshal(b, &s1); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !bytes.Equal(s0, s1) {
+ t.Errorf("Marshal large byte slice")
+ diff(t, s0, s1)
+ }
+}
+
+type Xint struct {
+ X int
+}
+
+func TestUnmarshalInterface(t *testing.T) {
+ var xint Xint
+ var i interface{} = &xint
+ if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if xint.X != 1 {
+ t.Fatalf("Did not write to xint")
+ }
+}
+
+func TestUnmarshalPtrPtr(t *testing.T) {
+ var xint Xint
+ pxint := &xint
+ if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if xint.X != 1 {
+ t.Fatalf("Did not write to xint")
+ }
+}
+
+func TestEscape(t *testing.T) {
+ const input = `"foobar"` + " [\u2028 \u2029]"
+ const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"`
+ b, err := Marshal(input)
+ if err != nil {
+ t.Fatalf("Marshal error: %v", err)
+ }
+ if s := string(b); s != expected {
+ t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected)
+ }
+}
+
+// WrongString is a struct that's misusing the ,string modifier.
+type WrongString struct {
+ Message string `json:"result,string"`
+}
+
+type wrongStringTest struct {
+ in, err string
+}
+
+var wrongStringTests = []wrongStringTest{
+ {`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`},
+ {`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`},
+ {`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`},
+ {`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`},
+}
+
+// If people misuse the ,string modifier, the error message should be
+// helpful, telling the user that they're doing it wrong.
+func TestErrorMessageFromMisusedString(t *testing.T) {
+ for n, tt := range wrongStringTests {
+ r := strings.NewReader(tt.in)
+ var s WrongString
+ err := NewDecoder(r).Decode(&s)
+ got := fmt.Sprintf("%v", err)
+ if got != tt.err {
+ t.Errorf("%d. got err = %q, want %q", n, got, tt.err)
+ }
+ }
+}
+
+func noSpace(c rune) rune {
+ if isSpace(byte(c)) { //only used for ascii
+ return -1
+ }
+ return c
+}
+
+type All struct {
+ Bool bool
+ Int int
+ Int8 int8
+ Int16 int16
+ Int32 int32
+ Int64 int64
+ Uint uint
+ Uint8 uint8
+ Uint16 uint16
+ Uint32 uint32
+ Uint64 uint64
+ Uintptr uintptr
+ Float32 float32
+ Float64 float64
+
+ Foo string `json:"bar"`
+ Foo2 string `json:"bar2,dummyopt"`
+
+ IntStr int64 `json:",string"`
+
+ PBool *bool
+ PInt *int
+ PInt8 *int8
+ PInt16 *int16
+ PInt32 *int32
+ PInt64 *int64
+ PUint *uint
+ PUint8 *uint8
+ PUint16 *uint16
+ PUint32 *uint32
+ PUint64 *uint64
+ PUintptr *uintptr
+ PFloat32 *float32
+ PFloat64 *float64
+
+ String string
+ PString *string
+
+ Map map[string]Small
+ MapP map[string]*Small
+ PMap *map[string]Small
+ PMapP *map[string]*Small
+
+ EmptyMap map[string]Small
+ NilMap map[string]Small
+
+ Slice []Small
+ SliceP []*Small
+ PSlice *[]Small
+ PSliceP *[]*Small
+
+ EmptySlice []Small
+ NilSlice []Small
+
+ StringSlice []string
+ ByteSlice []byte
+
+ Small Small
+ PSmall *Small
+ PPSmall **Small
+
+ Interface interface{}
+ PInterface *interface{}
+
+ unexported int
+}
+
+type Small struct {
+ Tag string
+}
+
+var allValue = All{
+ Bool: true,
+ Int: 2,
+ Int8: 3,
+ Int16: 4,
+ Int32: 5,
+ Int64: 6,
+ Uint: 7,
+ Uint8: 8,
+ Uint16: 9,
+ Uint32: 10,
+ Uint64: 11,
+ Uintptr: 12,
+ Float32: 14.1,
+ Float64: 15.1,
+ Foo: "foo",
+ Foo2: "foo2",
+ IntStr: 42,
+ String: "16",
+ Map: map[string]Small{
+ "17": {Tag: "tag17"},
+ "18": {Tag: "tag18"},
+ },
+ MapP: map[string]*Small{
+ "19": {Tag: "tag19"},
+ "20": nil,
+ },
+ EmptyMap: map[string]Small{},
+ Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}},
+ SliceP: []*Small{{Tag: "tag22"}, nil, {Tag: "tag23"}},
+ EmptySlice: []Small{},
+ StringSlice: []string{"str24", "str25", "str26"},
+ ByteSlice: []byte{27, 28, 29},
+ Small: Small{Tag: "tag30"},
+ PSmall: &Small{Tag: "tag31"},
+ Interface: 5.2,
+}
+
+var pallValue = All{
+ PBool: &allValue.Bool,
+ PInt: &allValue.Int,
+ PInt8: &allValue.Int8,
+ PInt16: &allValue.Int16,
+ PInt32: &allValue.Int32,
+ PInt64: &allValue.Int64,
+ PUint: &allValue.Uint,
+ PUint8: &allValue.Uint8,
+ PUint16: &allValue.Uint16,
+ PUint32: &allValue.Uint32,
+ PUint64: &allValue.Uint64,
+ PUintptr: &allValue.Uintptr,
+ PFloat32: &allValue.Float32,
+ PFloat64: &allValue.Float64,
+ PString: &allValue.String,
+ PMap: &allValue.Map,
+ PMapP: &allValue.MapP,
+ PSlice: &allValue.Slice,
+ PSliceP: &allValue.SliceP,
+ PPSmall: &allValue.PSmall,
+ PInterface: &allValue.Interface,
+}
+
+var allValueIndent = `{
+ "Bool": true,
+ "Int": 2,
+ "Int8": 3,
+ "Int16": 4,
+ "Int32": 5,
+ "Int64": 6,
+ "Uint": 7,
+ "Uint8": 8,
+ "Uint16": 9,
+ "Uint32": 10,
+ "Uint64": 11,
+ "Uintptr": 12,
+ "Float32": 14.1,
+ "Float64": 15.1,
+ "bar": "foo",
+ "bar2": "foo2",
+ "IntStr": "42",
+ "PBool": null,
+ "PInt": null,
+ "PInt8": null,
+ "PInt16": null,
+ "PInt32": null,
+ "PInt64": null,
+ "PUint": null,
+ "PUint8": null,
+ "PUint16": null,
+ "PUint32": null,
+ "PUint64": null,
+ "PUintptr": null,
+ "PFloat32": null,
+ "PFloat64": null,
+ "String": "16",
+ "PString": null,
+ "Map": {
+ "17": {
+ "Tag": "tag17"
+ },
+ "18": {
+ "Tag": "tag18"
+ }
+ },
+ "MapP": {
+ "19": {
+ "Tag": "tag19"
+ },
+ "20": null
+ },
+ "PMap": null,
+ "PMapP": null,
+ "EmptyMap": {},
+ "NilMap": null,
+ "Slice": [
+ {
+ "Tag": "tag20"
+ },
+ {
+ "Tag": "tag21"
+ }
+ ],
+ "SliceP": [
+ {
+ "Tag": "tag22"
+ },
+ null,
+ {
+ "Tag": "tag23"
+ }
+ ],
+ "PSlice": null,
+ "PSliceP": null,
+ "EmptySlice": [],
+ "NilSlice": null,
+ "StringSlice": [
+ "str24",
+ "str25",
+ "str26"
+ ],
+ "ByteSlice": "Gxwd",
+ "Small": {
+ "Tag": "tag30"
+ },
+ "PSmall": {
+ "Tag": "tag31"
+ },
+ "PPSmall": null,
+ "Interface": 5.2,
+ "PInterface": null
+}`
+
+var allValueCompact = strings.Map(noSpace, allValueIndent)
+
+var pallValueIndent = `{
+ "Bool": false,
+ "Int": 0,
+ "Int8": 0,
+ "Int16": 0,
+ "Int32": 0,
+ "Int64": 0,
+ "Uint": 0,
+ "Uint8": 0,
+ "Uint16": 0,
+ "Uint32": 0,
+ "Uint64": 0,
+ "Uintptr": 0,
+ "Float32": 0,
+ "Float64": 0,
+ "bar": "",
+ "bar2": "",
+ "IntStr": "0",
+ "PBool": true,
+ "PInt": 2,
+ "PInt8": 3,
+ "PInt16": 4,
+ "PInt32": 5,
+ "PInt64": 6,
+ "PUint": 7,
+ "PUint8": 8,
+ "PUint16": 9,
+ "PUint32": 10,
+ "PUint64": 11,
+ "PUintptr": 12,
+ "PFloat32": 14.1,
+ "PFloat64": 15.1,
+ "String": "",
+ "PString": "16",
+ "Map": null,
+ "MapP": null,
+ "PMap": {
+ "17": {
+ "Tag": "tag17"
+ },
+ "18": {
+ "Tag": "tag18"
+ }
+ },
+ "PMapP": {
+ "19": {
+ "Tag": "tag19"
+ },
+ "20": null
+ },
+ "EmptyMap": null,
+ "NilMap": null,
+ "Slice": null,
+ "SliceP": null,
+ "PSlice": [
+ {
+ "Tag": "tag20"
+ },
+ {
+ "Tag": "tag21"
+ }
+ ],
+ "PSliceP": [
+ {
+ "Tag": "tag22"
+ },
+ null,
+ {
+ "Tag": "tag23"
+ }
+ ],
+ "EmptySlice": null,
+ "NilSlice": null,
+ "StringSlice": null,
+ "ByteSlice": null,
+ "Small": {
+ "Tag": ""
+ },
+ "PSmall": null,
+ "PPSmall": {
+ "Tag": "tag31"
+ },
+ "Interface": null,
+ "PInterface": 5.2
+}`
+
+var pallValueCompact = strings.Map(noSpace, pallValueIndent)
+
+func TestRefUnmarshal(t *testing.T) {
+ type S struct {
+ // Ref is defined in encode_test.go.
+ R0 Ref
+ R1 *Ref
+ R2 RefText
+ R3 *RefText
+ }
+ want := S{
+ R0: 12,
+ R1: new(Ref),
+ R2: 13,
+ R3: new(RefText),
+ }
+ *want.R1 = 12
+ *want.R3 = 13
+
+ var got S
+ if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("got %+v, want %+v", got, want)
+ }
+}
+
+// Test that the empty string doesn't panic decoding when ,string is specified
+// Issue 3450
+func TestEmptyString(t *testing.T) {
+ type T2 struct {
+ Number1 int `json:",string"`
+ Number2 int `json:",string"`
+ }
+ data := `{"Number1":"1", "Number2":""}`
+ dec := NewDecoder(strings.NewReader(data))
+ var t2 T2
+ err := dec.Decode(&t2)
+ if err == nil {
+ t.Fatal("Decode: did not return error")
+ }
+ if t2.Number1 != 1 {
+ t.Fatal("Decode: did not set Number1")
+ }
+}
+
+// Test that a null for ,string is not replaced with the previous quoted string (issue 7046).
+// It should also not be an error (issue 2540, issue 8587).
+func TestNullString(t *testing.T) {
+ type T struct {
+ A int `json:",string"`
+ B int `json:",string"`
+ C *int `json:",string"`
+ }
+ data := []byte(`{"A": "1", "B": null, "C": null}`)
+ var s T
+ s.B = 1
+ s.C = new(int)
+ *s.C = 2
+ err := Unmarshal(data, &s)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if s.B != 1 || s.C != nil {
+ t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C)
+ }
+}
+
+func intp(x int) *int {
+ p := new(int)
+ *p = x
+ return p
+}
+
+func intpp(x *int) **int {
+ pp := new(*int)
+ *pp = x
+ return pp
+}
+
+var interfaceSetTests = []struct {
+ pre interface{}
+ json string
+ post interface{}
+}{
+ {"foo", `"bar"`, "bar"},
+ {"foo", `2`, 2.0},
+ {"foo", `true`, true},
+ {"foo", `null`, nil},
+
+ {nil, `null`, nil},
+ {new(int), `null`, nil},
+ {(*int)(nil), `null`, nil},
+ {new(*int), `null`, new(*int)},
+ {(**int)(nil), `null`, nil},
+ {intp(1), `null`, nil},
+ {intpp(nil), `null`, intpp(nil)},
+ {intpp(intp(1)), `null`, intpp(nil)},
+}
+
+func TestInterfaceSet(t *testing.T) {
+ for _, tt := range interfaceSetTests {
+ b := struct{ X interface{} }{tt.pre}
+ blob := `{"X":` + tt.json + `}`
+ if err := Unmarshal([]byte(blob), &b); err != nil {
+ t.Errorf("Unmarshal %#q: %v", blob, err)
+ continue
+ }
+ if !reflect.DeepEqual(b.X, tt.post) {
+ t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post)
+ }
+ }
+}
+
+// JSON null values should be ignored for primitives and string values instead of resulting in an error.
+// Issue 2540
+func TestUnmarshalNulls(t *testing.T) {
+ jsonData := []byte(`{
+ "Bool" : null,
+ "Int" : null,
+ "Int8" : null,
+ "Int16" : null,
+ "Int32" : null,
+ "Int64" : null,
+ "Uint" : null,
+ "Uint8" : null,
+ "Uint16" : null,
+ "Uint32" : null,
+ "Uint64" : null,
+ "Float32" : null,
+ "Float64" : null,
+ "String" : null}`)
+
+ nulls := All{
+ Bool: true,
+ Int: 2,
+ Int8: 3,
+ Int16: 4,
+ Int32: 5,
+ Int64: 6,
+ Uint: 7,
+ Uint8: 8,
+ Uint16: 9,
+ Uint32: 10,
+ Uint64: 11,
+ Float32: 12.1,
+ Float64: 13.1,
+ String: "14"}
+
+ err := Unmarshal(jsonData, &nulls)
+ if err != nil {
+ t.Errorf("Unmarshal of null values failed: %v", err)
+ }
+ if !nulls.Bool || nulls.Int != 2 || nulls.Int8 != 3 || nulls.Int16 != 4 || nulls.Int32 != 5 || nulls.Int64 != 6 ||
+ nulls.Uint != 7 || nulls.Uint8 != 8 || nulls.Uint16 != 9 || nulls.Uint32 != 10 || nulls.Uint64 != 11 ||
+ nulls.Float32 != 12.1 || nulls.Float64 != 13.1 || nulls.String != "14" {
+
+ t.Errorf("Unmarshal of null values affected primitives")
+ }
+}
+
+func TestStringKind(t *testing.T) {
+ type stringKind string
+
+ var m1, m2 map[stringKind]int
+ m1 = map[stringKind]int{
+ "foo": 42,
+ }
+
+ data, err := Marshal(m1)
+ if err != nil {
+ t.Errorf("Unexpected error marshaling: %v", err)
+ }
+
+ err = Unmarshal(data, &m2)
+ if err != nil {
+ t.Errorf("Unexpected error unmarshaling: %v", err)
+ }
+
+ if !reflect.DeepEqual(m1, m2) {
+ t.Error("Items should be equal after encoding and then decoding")
+ }
+}
+
+// Custom types with []byte as underlying type could not be marshalled
+// and then unmarshalled.
+// Issue 8962.
+func TestByteKind(t *testing.T) {
+ type byteKind []byte
+
+ a := byteKind("hello")
+
+ data, err := Marshal(a)
+ if err != nil {
+ t.Error(err)
+ }
+ var b byteKind
+ err = Unmarshal(data, &b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(a, b) {
+ t.Errorf("expected %v == %v", a, b)
+ }
+}
+
+// The fix for issue 8962 introduced a regression.
+// Issue 12921.
+func TestSliceOfCustomByte(t *testing.T) {
+ type Uint8 uint8
+
+ a := []Uint8("hello")
+
+ data, err := Marshal(a)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var b []Uint8
+ err = Unmarshal(data, &b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(a, b) {
+ t.Fatal("expected %v == %v", a, b)
+ }
+}
+
+var decodeTypeErrorTests = []struct {
+ dest interface{}
+ src string
+}{
+ {new(string), `{"user": "name"}`}, // issue 4628.
+ {new(error), `{}`}, // issue 4222
+ {new(error), `[]`},
+ {new(error), `""`},
+ {new(error), `123`},
+ {new(error), `true`},
+}
+
+func TestUnmarshalTypeError(t *testing.T) {
+ for _, item := range decodeTypeErrorTests {
+ err := Unmarshal([]byte(item.src), item.dest)
+ if _, ok := err.(*UnmarshalTypeError); !ok {
+ t.Errorf("expected type error for Unmarshal(%q, type %T): got %T",
+ item.src, item.dest, err)
+ }
+ }
+}
+
+var unmarshalSyntaxTests = []string{
+ "tru",
+ "fals",
+ "nul",
+ "123e",
+ `"hello`,
+ `[1,2,3`,
+ `{"key":1`,
+ `{"key":1,`,
+}
+
+func TestUnmarshalSyntax(t *testing.T) {
+ var x interface{}
+ for _, src := range unmarshalSyntaxTests {
+ err := Unmarshal([]byte(src), &x)
+ if _, ok := err.(*SyntaxError); !ok {
+ t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err)
+ }
+ }
+}
+
+// Test handling of unexported fields that should be ignored.
+// Issue 4660
+type unexportedFields struct {
+ Name string
+ m map[string]interface{} `json:"-"`
+ m2 map[string]interface{} `json:"abcd"`
+}
+
+func TestUnmarshalUnexported(t *testing.T) {
+ input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}}`
+ want := &unexportedFields{Name: "Bob"}
+
+ out := &unexportedFields{}
+ err := Unmarshal([]byte(input), out)
+ if err != nil {
+ t.Errorf("got error %v, expected nil", err)
+ }
+ if !reflect.DeepEqual(out, want) {
+ t.Errorf("got %q, want %q", out, want)
+ }
+}
+
+// Time3339 is a time.Time which encodes to and from JSON
+// as an RFC 3339 time in UTC.
+type Time3339 time.Time
+
+func (t *Time3339) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b)
+ }
+ tm, err := time.Parse(time.RFC3339, string(b[1:len(b)-1]))
+ if err != nil {
+ return err
+ }
+ *t = Time3339(tm)
+ return nil
+}
+
+func TestUnmarshalJSONLiteralError(t *testing.T) {
+ var t3 Time3339
+ err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3)
+ if err == nil {
+ t.Fatalf("expected error; got time %v", time.Time(t3))
+ }
+ if !strings.Contains(err.Error(), "range") {
+ t.Errorf("got err = %v; want out of range error", err)
+ }
+}
+
+// Test that extra object elements in an array do not result in a
+// "data changing underfoot" error.
+// Issue 3717
+func TestSkipArrayObjects(t *testing.T) {
+ json := `[{}]`
+ var dest [0]interface{}
+
+ err := Unmarshal([]byte(json), &dest)
+ if err != nil {
+ t.Errorf("got error %q, want nil", err)
+ }
+}
+
+// Test semantics of pre-filled struct fields and pre-filled map fields.
+// Issue 4900.
+func TestPrefilled(t *testing.T) {
+ ptrToMap := func(m map[string]interface{}) *map[string]interface{} { return &m }
+
+ // Values here change, cannot reuse table across runs.
+ var prefillTests = []struct {
+ in string
+ ptr interface{}
+ out interface{}
+ }{
+ {
+ in: `{"X": 1, "Y": 2}`,
+ ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5},
+ out: &XYZ{X: float64(1), Y: float64(2), Z: 1.5},
+ },
+ {
+ in: `{"X": 1, "Y": 2}`,
+ ptr: ptrToMap(map[string]interface{}{"X": float32(3), "Y": int16(4), "Z": 1.5}),
+ out: ptrToMap(map[string]interface{}{"X": float64(1), "Y": float64(2), "Z": 1.5}),
+ },
+ }
+
+ for _, tt := range prefillTests {
+ ptrstr := fmt.Sprintf("%v", tt.ptr)
+ err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here
+ if err != nil {
+ t.Errorf("Unmarshal: %v", err)
+ }
+ if !reflect.DeepEqual(tt.ptr, tt.out) {
+ t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out)
+ }
+ }
+}
+
+var invalidUnmarshalTests = []struct {
+ v interface{}
+ want string
+}{
+ {nil, "json: Unmarshal(nil)"},
+ {struct{}{}, "json: Unmarshal(non-pointer struct {})"},
+ {(*int)(nil), "json: Unmarshal(nil *int)"},
+}
+
+func TestInvalidUnmarshal(t *testing.T) {
+ buf := []byte(`{"a":"1"}`)
+ for _, tt := range invalidUnmarshalTests {
+ err := Unmarshal(buf, tt.v)
+ if err == nil {
+ t.Errorf("Unmarshal expecting error, got nil")
+ continue
+ }
+ if got := err.Error(); got != tt.want {
+ t.Errorf("Unmarshal = %q; want %q", got, tt.want)
+ }
+ }
+}
+
+var invalidUnmarshalTextTests = []struct {
+ v interface{}
+ want string
+}{
+ {nil, "json: Unmarshal(nil)"},
+ {struct{}{}, "json: Unmarshal(non-pointer struct {})"},
+ {(*int)(nil), "json: Unmarshal(nil *int)"},
+ {new(net.IP), "json: cannot unmarshal string into Go value of type *net.IP"},
+}
+
+func TestInvalidUnmarshalText(t *testing.T) {
+ buf := []byte(`123`)
+ for _, tt := range invalidUnmarshalTextTests {
+ err := Unmarshal(buf, tt.v)
+ if err == nil {
+ t.Errorf("Unmarshal expecting error, got nil")
+ continue
+ }
+ if got := err.Error(); got != tt.want {
+ t.Errorf("Unmarshal = %q; want %q", got, tt.want)
+ }
+ }
+}
+
+// Test that string option is ignored for invalid types.
+// Issue 9812.
+func TestInvalidStringOption(t *testing.T) {
+ num := 0
+ item := struct {
+ T time.Time `json:",string"`
+ M map[string]string `json:",string"`
+ S []string `json:",string"`
+ A [1]string `json:",string"`
+ I interface{} `json:",string"`
+ P *int `json:",string"`
+ }{M: make(map[string]string), S: make([]string, 0), I: num, P: &num}
+
+ data, err := Marshal(item)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ err = Unmarshal(data, &item)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+}
diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go
new file mode 100644
index 00000000..1dae8bb7
--- /dev/null
+++ b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go
@@ -0,0 +1,1197 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON objects as defined in
+// RFC 4627. The mapping between JSON objects and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON object.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects.
+// The map's key type must be string; the map keys are used as JSON object
+// keys, subject to the UTF-8 coercion described for string values above.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON object.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON object.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+ e := &encodeState{}
+ err := e.marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML