dep: Update containers/image to 1c202c5d85d2ee531acb1e91740144410066d19e

Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
Mrunal Patel 2017-02-03 11:15:09 -08:00
parent dfa93414c5
commit 67588ca9bf
4 changed files with 84 additions and 57 deletions

View File

@ -1,5 +1,5 @@
{
"memo": "3b6ae2941d9c2fcab5942f6708186355cd795d594c81cd1c28defb85aadb6a92",
"memo": "54df0475f8948f4d0f416a919b64659031d5da2b3ba1f87141eb855c832b952c",
"projects": [
{
"name": "github.com/BurntSushi/toml",
@ -48,7 +48,7 @@
{
"name": "github.com/containers/image",
"branch": "master",
"revision": "b9c0864946ae611722007b9eb011b6b5868d1d6d",
"revision": "1c202c5d85d2ee531acb1e91740144410066d19e",
"packages": [
"copy",
"signature",

View File

@ -3,6 +3,9 @@
"github.com/Sirupsen/logrus": {
"branch": "master"
},
"github.com/containers/image": {
"branch": "master"
},
"github.com/docker/distribution": {
"branch": "master"
},

View File

@ -37,23 +37,33 @@ const (
manifestURL = "%s/manifests/%s"
blobsURL = "%s/blobs/%s"
blobUploadURL = "%s/blobs/uploads/"
minimumTokenLifetimeSeconds = 60
)
// ErrV1NotSupported is returned when we're trying to talk to a
// docker V1 registry.
var ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
type bearerToken struct {
Token string `json:"token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
}
// dockerClient is configuration for dealing with a single Docker registry.
type dockerClient struct {
ctx *types.SystemContext
registry string
username string
password string
scheme string // Cache of a value returned by a successful ping() if not empty
client *http.Client
signatureBase signatureStorageBase
challenges []challenge
scope authScope
ctx *types.SystemContext
registry string
username string
password string
scheme string // Cache of a value returned by a successful ping() if not empty
client *http.Client
signatureBase signatureStorageBase
challenges []challenge
scope authScope
token *bearerToken
tokenExpiration time.Time
}
type authScope struct {
@ -262,26 +272,30 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
req.SetBasicAuth(c.username, c.password)
return nil
case "bearer":
realm, ok := challenge.Parameters["realm"]
if !ok {
return errors.Errorf("missing realm in bearer auth challenge")
if c.token == nil || time.Now().After(c.tokenExpiration) {
realm, ok := challenge.Parameters["realm"]
if !ok {
return errors.Errorf("missing realm in bearer auth challenge")
}
service, _ := challenge.Parameters["service"] // Will be "" if not present
scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
token, err := c.getBearerToken(realm, service, scope)
if err != nil {
return err
}
c.token = token
c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
}
service, _ := challenge.Parameters["service"] // Will be "" if not present
scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
token, err := c.getBearerToken(realm, service, scope)
if err != nil {
return err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
return nil
}
return errors.Errorf("no handler for %s authentication", challenge.Scheme)
}
func (c *dockerClient) getBearerToken(realm, service, scope string) (string, error) {
func (c *dockerClient) getBearerToken(realm, service, scope string) (*bearerToken, error) {
authReq, err := http.NewRequest("GET", realm, nil)
if err != nil {
return "", err
return nil, err
}
getParams := authReq.URL.Query()
if service != "" {
@ -300,35 +314,33 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (string, err
client := &http.Client{Transport: tr}
res, err := client.Do(authReq)
if err != nil {
return "", err
return nil, err
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusUnauthorized:
return "", errors.Errorf("unable to retrieve auth token: 401 unauthorized")
return nil, errors.Errorf("unable to retrieve auth token: 401 unauthorized")
case http.StatusOK:
break
default:
return "", errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
}
tokenBlob, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
return nil, err
}
tokenStruct := struct {
Token string `json:"token"`
}{}
if err := json.Unmarshal(tokenBlob, &tokenStruct); err != nil {
return "", err
var token bearerToken
if err := json.Unmarshal(tokenBlob, &token); err != nil {
return nil, err
}
// TODO(runcom): reuse tokens?
//hostAuthTokens, ok = rb.hostsV2AuthTokens[req.URL.Host]
//if !ok {
//hostAuthTokens = make(map[string]string)
//rb.hostsV2AuthTokens[req.URL.Host] = hostAuthTokens
//}
//hostAuthTokens[repo] = tokenStruct.Token
return tokenStruct.Token, nil
if token.ExpiresIn < minimumTokenLifetimeSeconds {
token.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
}
if token.IssuedAt.IsZero() {
token.IssuedAt = time.Now().UTC()
}
return &token, nil
}
func getAuth(ctx *types.SystemContext, registry string) (string, string, error) {

View File

@ -131,11 +131,10 @@ func (s storageImageDestination) ShouldCompressLayers() bool {
return false
}
// PutBlob is used to both store filesystem layers and binary data that is part
// of the image. Filesystem layers are assumed to be imported in order, as
// that is required by some of the underlying storage drivers.
func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) {
blobSize := int64(-1)
// putBlob stores a layer or data blob, optionally enforcing that a digest in
// blobinfo matches the incoming data.
func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobInfo, enforceDigestAndSize bool) (types.BlobInfo, error) {
blobSize := blobinfo.Size
digest := blobinfo.Digest
errorBlobInfo := types.BlobInfo{
Digest: "",
@ -207,10 +206,9 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
// Hang on to the new layer's ID.
id = layer.ID
}
blobSize = counter.Count
// Check if the size looks right.
if blobinfo.Size >= 0 && blobSize != blobinfo.Size {
logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size)
if enforceDigestAndSize && blobinfo.Size >= 0 && blobinfo.Size != counter.Count {
logrus.Debugf("layer blob %q size is %d, not %d, rejecting", blobinfo.Digest, counter.Count, blobinfo.Size)
if layer != nil {
// Something's wrong; delete the newly-created layer.
s.imageRef.transport.store.DeleteLayer(layer.ID)
@ -218,14 +216,18 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
return errorBlobInfo, ErrBlobSizeMismatch
}
// If the content digest was specified, verify it.
if digest.Validate() == nil && digest.String() != hash {
logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash)
if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash {
logrus.Debugf("layer blob %q digests to %q, rejecting", blobinfo.Digest, hash)
if layer != nil {
// Something's wrong; delete the newly-created layer.
s.imageRef.transport.store.DeleteLayer(layer.ID)
}
return errorBlobInfo, ErrBlobDigestMismatch
}
// If we didn't get a blob size, return the one we calculated.
if blobSize == -1 {
blobSize = counter.Count
}
// If we didn't get a digest, construct one.
if digest == "" {
digest = ddigest.Digest(hash)
@ -234,7 +236,7 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
// ended up having. This is a list, in case the same blob is
// being applied more than once.
s.Layers[digest] = append(s.Layers[digest], id)
s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize})
s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: counter.Count})
if layer != nil {
logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id)
} else {
@ -249,25 +251,28 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
if err != nil && err != io.EOF {
return errorBlobInfo, err
}
blobSize = int64(len(blob))
hash = hasher.Digest().String()
if blobinfo.Size >= 0 && blobSize != blobinfo.Size {
logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size)
if enforceDigestAndSize && blobinfo.Size >= 0 && int64(len(blob)) != blobinfo.Size {
logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, int64(len(blob)), blobinfo.Size)
return errorBlobInfo, ErrBlobSizeMismatch
}
// If we were given a digest, verify that the content matches
// it.
if digest.Validate() == nil && digest.String() != hash {
if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash {
logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash)
return errorBlobInfo, ErrBlobDigestMismatch
}
// If we didn't get a blob size, return the one we calculated.
if blobSize == -1 {
blobSize = int64(len(blob))
}
// If we didn't get a digest, construct one.
if digest == "" {
digest = ddigest.Digest(hash)
}
// Save the blob for when we Commit().
s.BlobData[digest] = blob
s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize})
s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: int64(len(blob))})
logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest)
}
return types.BlobInfo{
@ -276,6 +281,13 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
}, nil
}
// PutBlob is used to both store filesystem layers and binary data that is part
// of the image. Filesystem layers are assumed to be imported in order, as
// that is required by some of the underlying storage drivers.
func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) {
return s.putBlob(stream, blobinfo, true)
}
func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
if blobinfo.Digest == "" {
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
@ -305,7 +317,7 @@ func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.Bl
if err != nil {
return types.BlobInfo{}, err
}
return s.PutBlob(rc, blobinfo)
return s.putBlob(rc, blobinfo, false)
}
func (s *storageImageDestination) Commit() error {