c062a85782
After implementing pull, a few changes are required to the content store interface to make sure that the implementation works smoothly. Specifically, we work to make sure the predeclaration path for digests works the same between remote and local writers. Before, we were hesitent to require the the size and digest up front, but it became clear that having this provided significant benefit. There are also several cleanups related to naming. We now call the expected digest `Expected` consistently across the board and `Total` is used to mark the expected size. This whole effort comes together to provide a very smooth status reporting workflow for image pull and push. This will be more obvious when the bulk of pull code lands. There are a few other changes to make `content.WriteBlob` more broadly useful. In accordance with addition for predeclaring expected size when getting a `Writer`, `WriteBlob` now supports this fully. It will also resume downloads if provided an `io.Seeker` or `io.ReaderAt`. Coupled with the `httpReadSeeker` from `docker/distribution`, we should only be a lines of code away from resumable downloads. Signed-off-by: Stephen J Day <stephen.day@docker.com>
89 lines
1.9 KiB
Go
89 lines
1.9 KiB
Go
package content
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
"io/ioutil"
|
|
|
|
"github.com/opencontainers/go-digest"
|
|
"github.com/pkg/errors"
|
|
)
|
|
|
|
// WriteBlob writes data with the expected digest into the content store. If
|
|
// expected already exists, the method returns immediately and the reader will
|
|
// not be consumed.
|
|
//
|
|
// This is useful when the digest and size are known beforehand.
|
|
//
|
|
// Copy is buffered, so no need to wrap reader in buffered io.
|
|
func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size int64, expected digest.Digest) error {
|
|
cw, err := cs.Writer(ctx, ref, size, expected)
|
|
if err != nil {
|
|
if !IsExists(err) {
|
|
return err
|
|
}
|
|
|
|
return nil // all ready present
|
|
}
|
|
|
|
ws, err := cw.Status()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if ws.Offset > 0 {
|
|
r, err = seekReader(r, ws.Offset, size)
|
|
if err != nil {
|
|
return errors.Wrapf(err, "unabled to resume write to %v", ref)
|
|
}
|
|
}
|
|
|
|
buf := BufPool.Get().([]byte)
|
|
defer BufPool.Put(buf)
|
|
|
|
if _, err := io.CopyBuffer(cw, r, buf); err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := cw.Commit(size, expected); err != nil {
|
|
if !IsExists(err) {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// seekReader attempts to seek the reader to the given offset, either by
|
|
// resolving `io.Seeker` or by detecting `io.ReaderAt`.
|
|
func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
|
// attempt to resolve r as a seeker and setup the offset.
|
|
seeker, ok := r.(io.Seeker)
|
|
if ok {
|
|
nn, err := seeker.Seek(offset, io.SeekStart)
|
|
if nn != offset {
|
|
return nil, fmt.Errorf("failed to seek to offset %v", offset)
|
|
}
|
|
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return r, nil
|
|
}
|
|
|
|
// ok, let's try io.ReaderAt!
|
|
readerAt, ok := r.(io.ReaderAt)
|
|
if ok && size > offset {
|
|
sr := io.NewSectionReader(readerAt, offset, size)
|
|
return sr, nil
|
|
}
|
|
|
|
return nil, errors.Errorf("cannot seek to offset %v", offset)
|
|
}
|
|
|
|
func readFileString(path string) (string, error) {
|
|
p, err := ioutil.ReadFile(path)
|
|
return string(p), err
|
|
}
|