registry/client/push.go
Stephen J Day eaadb82e1e Move Manifest type into storage package
This changeset move the Manifest type into the storage package to make the type
accessible to client and registry without import cycles. The structure of the
manifest was also changed to accuratle reflect the stages of the signing
process. A straw man Manifest.Sign method has been added to start testing this
concept out but will probably be accompanied by the more import
SignedManifest.Verify method as the security model develops.

This is probably the start of a concerted effort to consolidate types across
the client and server portions of the code base but we may want to see how such
a handy type, like the Manifest and SignedManifest, would work in docker core.
2014-11-21 19:37:44 -08:00

141 lines
3.4 KiB
Go

package client
import (
"bytes"
"io"
"io/ioutil"
"github.com/docker/docker-registry/storage"
log "github.com/Sirupsen/logrus"
)
// simultaneousLayerPushWindow is the size of the parallel layer push window.
// A layer may not be pushed until the layer preceeding it by the length of the
// push window has been successfully pushed.
const simultaneousLayerPushWindow = 4
type pushFunction func(fsLayer storage.FSLayer) error
// Push implements a client push workflow for the image defined by the given
// name and tag pair, using the given ObjectStore for local manifest and layer
// storage
func Push(c Client, objectStore ObjectStore, name, tag string) error {
manifest, err := objectStore.Manifest(name, tag)
if err != nil {
log.WithFields(log.Fields{
"error": err,
"name": name,
"tag": tag,
}).Info("No image found")
return err
}
errChans := make([]chan error, len(manifest.FSLayers))
for i := range manifest.FSLayers {
errChans[i] = make(chan error)
}
cancelCh := make(chan struct{})
// Iterate over each layer in the manifest, simultaneously pushing no more
// than simultaneousLayerPushWindow layers at a time. If an error is
// received from a layer push, we abort the push.
for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPushWindow; i++ {
dependentLayer := i - simultaneousLayerPushWindow
if dependentLayer >= 0 {
err := <-errChans[dependentLayer]
if err != nil {
log.WithField("error", err).Warn("Push aborted")
close(cancelCh)
return err
}
}
if i < len(manifest.FSLayers) {
go func(i int) {
select {
case errChans[i] <- pushLayer(c, objectStore, name, manifest.FSLayers[i]):
case <-cancelCh: // recv broadcast notification about cancelation
}
}(i)
}
}
err = c.PutImageManifest(name, tag, manifest)
if err != nil {
log.WithFields(log.Fields{
"error": err,
"manifest": manifest,
}).Warn("Unable to upload manifest")
return err
}
return nil
}
func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer storage.FSLayer) error {
log.WithField("layer", fsLayer).Info("Pushing layer")
layer, err := objectStore.Layer(fsLayer.BlobSum)
if err != nil {
log.WithFields(log.Fields{
"error": err,
"layer": fsLayer,
}).Warn("Unable to read local layer")
return err
}
layerReader, err := layer.Reader()
if err != nil {
log.WithFields(log.Fields{
"error": err,
"layer": fsLayer,
}).Warn("Unable to read local layer")
return err
}
defer layerReader.Close()
layerBuffer := new(bytes.Buffer)
layerSize, err := io.Copy(layerBuffer, layerReader)
if err != nil {
log.WithFields(log.Fields{
"error": err,
"layer": fsLayer,
}).Warn("Unable to read local layer")
return err
}
length, err := c.BlobLength(name, fsLayer.BlobSum)
if err != nil {
log.WithFields(log.Fields{
"error": err,
"layer": fsLayer,
}).Warn("Unable to check existence of remote layer")
return err
}
if length >= 0 {
log.WithField("layer", fsLayer).Info("Layer already exists")
return nil
}
location, err := c.InitiateBlobUpload(name)
if err != nil {
log.WithFields(log.Fields{
"error": err,
"layer": fsLayer,
}).Warn("Unable to upload layer")
return err
}
err = c.UploadBlob(location, ioutil.NopCloser(layerBuffer), int(layerSize), fsLayer.BlobSum)
if err != nil {
log.WithFields(log.Fields{
"error": err,
"layer": fsLayer,
}).Warn("Unable to upload layer")
return err
}
return nil
}