Merge pull request #355 from mrunalp/update_image_lib

dep: Update containers/image to 1c202c5d85d2ee531acb1e91740144410066d19e
This commit is contained in:
Antonio Murdaca 2017-02-03 23:34:23 +01:00 committed by GitHub
commit f5e5a4b848
4 changed files with 84 additions and 57 deletions

View file

@ -1,5 +1,5 @@
{ {
"memo": "3b6ae2941d9c2fcab5942f6708186355cd795d594c81cd1c28defb85aadb6a92", "memo": "54df0475f8948f4d0f416a919b64659031d5da2b3ba1f87141eb855c832b952c",
"projects": [ "projects": [
{ {
"name": "github.com/BurntSushi/toml", "name": "github.com/BurntSushi/toml",
@ -48,7 +48,7 @@
{ {
"name": "github.com/containers/image", "name": "github.com/containers/image",
"branch": "master", "branch": "master",
"revision": "b9c0864946ae611722007b9eb011b6b5868d1d6d", "revision": "1c202c5d85d2ee531acb1e91740144410066d19e",
"packages": [ "packages": [
"copy", "copy",
"signature", "signature",

View file

@ -3,6 +3,9 @@
"github.com/Sirupsen/logrus": { "github.com/Sirupsen/logrus": {
"branch": "master" "branch": "master"
}, },
"github.com/containers/image": {
"branch": "master"
},
"github.com/docker/distribution": { "github.com/docker/distribution": {
"branch": "master" "branch": "master"
}, },

View file

@ -37,23 +37,33 @@ const (
manifestURL = "%s/manifests/%s" manifestURL = "%s/manifests/%s"
blobsURL = "%s/blobs/%s" blobsURL = "%s/blobs/%s"
blobUploadURL = "%s/blobs/uploads/" blobUploadURL = "%s/blobs/uploads/"
minimumTokenLifetimeSeconds = 60
) )
// ErrV1NotSupported is returned when we're trying to talk to a // ErrV1NotSupported is returned when we're trying to talk to a
// docker V1 registry. // docker V1 registry.
var ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") var ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
type bearerToken struct {
Token string `json:"token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
}
// dockerClient is configuration for dealing with a single Docker registry. // dockerClient is configuration for dealing with a single Docker registry.
type dockerClient struct { type dockerClient struct {
ctx *types.SystemContext ctx *types.SystemContext
registry string registry string
username string username string
password string password string
scheme string // Cache of a value returned by a successful ping() if not empty scheme string // Cache of a value returned by a successful ping() if not empty
client *http.Client client *http.Client
signatureBase signatureStorageBase signatureBase signatureStorageBase
challenges []challenge challenges []challenge
scope authScope scope authScope
token *bearerToken
tokenExpiration time.Time
} }
type authScope struct { type authScope struct {
@ -262,26 +272,30 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
req.SetBasicAuth(c.username, c.password) req.SetBasicAuth(c.username, c.password)
return nil return nil
case "bearer": case "bearer":
realm, ok := challenge.Parameters["realm"] if c.token == nil || time.Now().After(c.tokenExpiration) {
if !ok { realm, ok := challenge.Parameters["realm"]
return errors.Errorf("missing realm in bearer auth challenge") if !ok {
return errors.Errorf("missing realm in bearer auth challenge")
}
service, _ := challenge.Parameters["service"] // Will be "" if not present
scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
token, err := c.getBearerToken(realm, service, scope)
if err != nil {
return err
}
c.token = token
c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
} }
service, _ := challenge.Parameters["service"] // Will be "" if not present req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
token, err := c.getBearerToken(realm, service, scope)
if err != nil {
return err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
return nil return nil
} }
return errors.Errorf("no handler for %s authentication", challenge.Scheme) return errors.Errorf("no handler for %s authentication", challenge.Scheme)
} }
func (c *dockerClient) getBearerToken(realm, service, scope string) (string, error) { func (c *dockerClient) getBearerToken(realm, service, scope string) (*bearerToken, error) {
authReq, err := http.NewRequest("GET", realm, nil) authReq, err := http.NewRequest("GET", realm, nil)
if err != nil { if err != nil {
return "", err return nil, err
} }
getParams := authReq.URL.Query() getParams := authReq.URL.Query()
if service != "" { if service != "" {
@ -300,35 +314,33 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (string, err
client := &http.Client{Transport: tr} client := &http.Client{Transport: tr}
res, err := client.Do(authReq) res, err := client.Do(authReq)
if err != nil { if err != nil {
return "", err return nil, err
} }
defer res.Body.Close() defer res.Body.Close()
switch res.StatusCode { switch res.StatusCode {
case http.StatusUnauthorized: case http.StatusUnauthorized:
return "", errors.Errorf("unable to retrieve auth token: 401 unauthorized") return nil, errors.Errorf("unable to retrieve auth token: 401 unauthorized")
case http.StatusOK: case http.StatusOK:
break break
default: default:
return "", errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL) return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
} }
tokenBlob, err := ioutil.ReadAll(res.Body) tokenBlob, err := ioutil.ReadAll(res.Body)
if err != nil { if err != nil {
return "", err return nil, err
} }
tokenStruct := struct { var token bearerToken
Token string `json:"token"` if err := json.Unmarshal(tokenBlob, &token); err != nil {
}{} return nil, err
if err := json.Unmarshal(tokenBlob, &tokenStruct); err != nil {
return "", err
} }
// TODO(runcom): reuse tokens? if token.ExpiresIn < minimumTokenLifetimeSeconds {
//hostAuthTokens, ok = rb.hostsV2AuthTokens[req.URL.Host] token.ExpiresIn = minimumTokenLifetimeSeconds
//if !ok { logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
//hostAuthTokens = make(map[string]string) }
//rb.hostsV2AuthTokens[req.URL.Host] = hostAuthTokens if token.IssuedAt.IsZero() {
//} token.IssuedAt = time.Now().UTC()
//hostAuthTokens[repo] = tokenStruct.Token }
return tokenStruct.Token, nil return &token, nil
} }
func getAuth(ctx *types.SystemContext, registry string) (string, string, error) { func getAuth(ctx *types.SystemContext, registry string) (string, string, error) {

View file

@ -131,11 +131,10 @@ func (s storageImageDestination) ShouldCompressLayers() bool {
return false return false
} }
// PutBlob is used to both store filesystem layers and binary data that is part // putBlob stores a layer or data blob, optionally enforcing that a digest in
// of the image. Filesystem layers are assumed to be imported in order, as // blobinfo matches the incoming data.
// that is required by some of the underlying storage drivers. func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobInfo, enforceDigestAndSize bool) (types.BlobInfo, error) {
func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { blobSize := blobinfo.Size
blobSize := int64(-1)
digest := blobinfo.Digest digest := blobinfo.Digest
errorBlobInfo := types.BlobInfo{ errorBlobInfo := types.BlobInfo{
Digest: "", Digest: "",
@ -207,10 +206,9 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
// Hang on to the new layer's ID. // Hang on to the new layer's ID.
id = layer.ID id = layer.ID
} }
blobSize = counter.Count
// Check if the size looks right. // Check if the size looks right.
if blobinfo.Size >= 0 && blobSize != blobinfo.Size { if enforceDigestAndSize && blobinfo.Size >= 0 && blobinfo.Size != counter.Count {
logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size) logrus.Debugf("layer blob %q size is %d, not %d, rejecting", blobinfo.Digest, counter.Count, blobinfo.Size)
if layer != nil { if layer != nil {
// Something's wrong; delete the newly-created layer. // Something's wrong; delete the newly-created layer.
s.imageRef.transport.store.DeleteLayer(layer.ID) s.imageRef.transport.store.DeleteLayer(layer.ID)
@ -218,14 +216,18 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
return errorBlobInfo, ErrBlobSizeMismatch return errorBlobInfo, ErrBlobSizeMismatch
} }
// If the content digest was specified, verify it. // If the content digest was specified, verify it.
if digest.Validate() == nil && digest.String() != hash { if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash {
logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) logrus.Debugf("layer blob %q digests to %q, rejecting", blobinfo.Digest, hash)
if layer != nil { if layer != nil {
// Something's wrong; delete the newly-created layer. // Something's wrong; delete the newly-created layer.
s.imageRef.transport.store.DeleteLayer(layer.ID) s.imageRef.transport.store.DeleteLayer(layer.ID)
} }
return errorBlobInfo, ErrBlobDigestMismatch return errorBlobInfo, ErrBlobDigestMismatch
} }
// If we didn't get a blob size, return the one we calculated.
if blobSize == -1 {
blobSize = counter.Count
}
// If we didn't get a digest, construct one. // If we didn't get a digest, construct one.
if digest == "" { if digest == "" {
digest = ddigest.Digest(hash) digest = ddigest.Digest(hash)
@ -234,7 +236,7 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
// ended up having. This is a list, in case the same blob is // ended up having. This is a list, in case the same blob is
// being applied more than once. // being applied more than once.
s.Layers[digest] = append(s.Layers[digest], id) s.Layers[digest] = append(s.Layers[digest], id)
s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize}) s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: counter.Count})
if layer != nil { if layer != nil {
logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id) logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id)
} else { } else {
@ -249,25 +251,28 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return errorBlobInfo, err return errorBlobInfo, err
} }
blobSize = int64(len(blob))
hash = hasher.Digest().String() hash = hasher.Digest().String()
if blobinfo.Size >= 0 && blobSize != blobinfo.Size { if enforceDigestAndSize && blobinfo.Size >= 0 && int64(len(blob)) != blobinfo.Size {
logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size) logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, int64(len(blob)), blobinfo.Size)
return errorBlobInfo, ErrBlobSizeMismatch return errorBlobInfo, ErrBlobSizeMismatch
} }
// If we were given a digest, verify that the content matches // If we were given a digest, verify that the content matches
// it. // it.
if digest.Validate() == nil && digest.String() != hash { if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash {
logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash)
return errorBlobInfo, ErrBlobDigestMismatch return errorBlobInfo, ErrBlobDigestMismatch
} }
// If we didn't get a blob size, return the one we calculated.
if blobSize == -1 {
blobSize = int64(len(blob))
}
// If we didn't get a digest, construct one. // If we didn't get a digest, construct one.
if digest == "" { if digest == "" {
digest = ddigest.Digest(hash) digest = ddigest.Digest(hash)
} }
// Save the blob for when we Commit(). // Save the blob for when we Commit().
s.BlobData[digest] = blob s.BlobData[digest] = blob
s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize}) s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: int64(len(blob))})
logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest) logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest)
} }
return types.BlobInfo{ return types.BlobInfo{
@ -276,6 +281,13 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
}, nil }, nil
} }
// PutBlob is used to both store filesystem layers and binary data that is part
// of the image. Filesystem layers are assumed to be imported in order, as
// that is required by some of the underlying storage drivers.
func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) {
return s.putBlob(stream, blobinfo, true)
}
func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
if blobinfo.Digest == "" { if blobinfo.Digest == "" {
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
@ -305,7 +317,7 @@ func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.Bl
if err != nil { if err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
return s.PutBlob(rc, blobinfo) return s.putBlob(rc, blobinfo, false)
} }
func (s *storageImageDestination) Commit() error { func (s *storageImageDestination) Commit() error {