2014-11-18 00:29:42 +00:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"crypto/sha256"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"testing"
|
|
|
|
|
2014-12-24 00:01:38 +00:00
|
|
|
"github.com/docker/distribution/digest"
|
|
|
|
"github.com/docker/distribution/storagedriver"
|
|
|
|
"github.com/docker/distribution/storagedriver/inmemory"
|
2015-01-06 00:53:13 +00:00
|
|
|
"github.com/docker/distribution/testutil"
|
2014-11-18 00:29:42 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// TestSimpleLayerUpload covers the layer upload process, exercising common
|
|
|
|
// error paths that might be seen during an upload.
|
|
|
|
func TestSimpleLayerUpload(t *testing.T) {
|
2014-11-19 22:39:32 +00:00
|
|
|
randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile()
|
2014-11-18 00:29:42 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating random reader: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
dgst := digest.Digest(tarSumStr)
|
|
|
|
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error allocating upload store: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
imageName := "foo/bar"
|
|
|
|
|
|
|
|
ls := &layerStore{
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 22:24:02 +00:00
|
|
|
driver: inmemory.New(),
|
2014-11-18 00:29:42 +00:00
|
|
|
pathMapper: &pathMapper{
|
|
|
|
root: "/storage/testing",
|
|
|
|
version: storagePathVersion,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
h := sha256.New()
|
|
|
|
rd := io.TeeReader(randomDataReader, h)
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
layerUpload, err := ls.Upload(imageName)
|
2014-11-18 00:29:42 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error starting layer upload: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cancel the upload then restart it
|
|
|
|
if err := layerUpload.Cancel(); err != nil {
|
|
|
|
t.Fatalf("unexpected error during upload cancellation: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do a resume, get unknown upload
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 22:24:02 +00:00
|
|
|
layerUpload, err = ls.Resume(layerUpload.Name(), layerUpload.UUID())
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != ErrLayerUploadUnknown {
|
|
|
|
t.Fatalf("unexpected error resuming upload, should be unkown: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restart!
|
2014-11-19 22:39:32 +00:00
|
|
|
layerUpload, err = ls.Upload(imageName)
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error starting layer upload: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the size of our random tarfile
|
|
|
|
randomDataSize, err := seekerSize(randomDataReader)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error getting seeker size of random data: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
nn, err := io.Copy(layerUpload, rd)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error uploading layer data: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if nn != randomDataSize {
|
|
|
|
t.Fatalf("layer data write incomplete")
|
|
|
|
}
|
|
|
|
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 22:24:02 +00:00
|
|
|
offset, err := layerUpload.Seek(0, os.SEEK_CUR)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error seeking layer upload: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if offset != nn {
|
|
|
|
t.Fatalf("layerUpload not updated with correct offset: %v != %v", offset, nn)
|
2014-11-18 00:29:42 +00:00
|
|
|
}
|
|
|
|
layerUpload.Close()
|
|
|
|
|
|
|
|
// Do a resume, for good fun
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 22:24:02 +00:00
|
|
|
layerUpload, err = ls.Resume(layerUpload.Name(), layerUpload.UUID())
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error resuming upload: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
sha256Digest := digest.NewDigest("sha256", h)
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 22:24:02 +00:00
|
|
|
layer, err := layerUpload.Finish(dgst)
|
2014-11-18 00:29:42 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error finishing layer upload: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// After finishing an upload, it should no longer exist.
|
Spool layer uploads to remote storage
To smooth initial implementation, uploads were spooled to local file storage,
validated, then pushed to remote storage. That approach was flawed in that it
present easy clustering of registry services that share a remote storage
backend. The original plan was to implement resumable hashes then implement
remote upload storage. After some thought, it was found to be better to get
remote spooling working, then optimize with resumable hashes.
Moving to this approach has tradeoffs: after storing the complete upload
remotely, the node must fetch the content and validate it before moving it to
the final location. This can double bandwidth usage to the remote backend.
Modifying the verification and upload code to store intermediate hashes should
be trivial once the layer digest format has settled.
The largest changes for users of the storage package (mostly the registry app)
are the LayerService interface and the LayerUpload interface. The LayerService
now takes qualified repository names to start and resume uploads. In corallry,
the concept of LayerUploadState has been complete removed, exposing all aspects
of that state as part of the LayerUpload object. The LayerUpload object has
been modified to work as an io.WriteSeeker and includes a StartedAt time, to
allow for upload timeout policies. Finish now only requires a digest, eliding
the requirement for a size parameter.
Resource cleanup has taken a turn for the better. Resources are cleaned up
after successful uploads and during a cancel call. Admittedly, this is probably
not completely where we want to be. It's recommend that we bolster this with a
periodic driver utility script that scans for partial uploads and deletes the
underlying data. As a small benefit, we can leave these around to better
understand how and why these uploads are failing, at the cost of some extra
disk space.
Many other changes follow from the changes above. The webapp needs to be
updated to meet the new interface requirements.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2015-01-08 22:24:02 +00:00
|
|
|
if _, err := ls.Resume(layerUpload.Name(), layerUpload.UUID()); err != ErrLayerUploadUnknown {
|
2014-11-18 00:29:42 +00:00
|
|
|
t.Fatalf("expected layer upload to be unknown, got %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test for existence.
|
2014-11-19 22:39:32 +00:00
|
|
|
exists, err := ls.Exists(layer.Name(), layer.Digest())
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error checking for existence: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !exists {
|
|
|
|
t.Fatalf("layer should now exist")
|
|
|
|
}
|
|
|
|
|
|
|
|
h.Reset()
|
|
|
|
nn, err = io.Copy(h, layer)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error reading layer: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if nn != randomDataSize {
|
|
|
|
t.Fatalf("incorrect read length")
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
if digest.NewDigest("sha256", h) != sha256Digest {
|
|
|
|
t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest)
|
2014-11-18 00:29:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestSimpleLayerRead just creates a simple layer file and ensures that basic
|
|
|
|
// open, read, seek, read works. More specific edge cases should be covered in
|
|
|
|
// other tests.
|
|
|
|
func TestSimpleLayerRead(t *testing.T) {
|
|
|
|
imageName := "foo/bar"
|
|
|
|
driver := inmemory.New()
|
|
|
|
ls := &layerStore{
|
|
|
|
driver: driver,
|
|
|
|
pathMapper: &pathMapper{
|
|
|
|
root: "/storage/testing",
|
|
|
|
version: storagePathVersion,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile()
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating random data: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
dgst := digest.Digest(tarSumStr)
|
|
|
|
|
2014-11-18 00:29:42 +00:00
|
|
|
// Test for existence.
|
2014-11-19 22:39:32 +00:00
|
|
|
exists, err := ls.Exists(imageName, dgst)
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error checking for existence: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if exists {
|
|
|
|
t.Fatalf("layer should not exist")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to get the layer and make sure we get a not found error
|
2014-11-19 22:39:32 +00:00
|
|
|
layer, err := ls.Fetch(imageName, dgst)
|
2014-11-18 00:29:42 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("error expected fetching unknown layer")
|
|
|
|
}
|
|
|
|
|
2014-11-26 20:52:52 +00:00
|
|
|
switch err.(type) {
|
|
|
|
case ErrUnknownLayer:
|
2014-11-18 00:29:42 +00:00
|
|
|
err = nil
|
2014-11-26 20:52:52 +00:00
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected error fetching non-existent layer: %v", err)
|
2014-11-18 00:29:42 +00:00
|
|
|
}
|
2014-11-26 20:52:52 +00:00
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
randomLayerDigest, err := writeTestLayer(driver, ls.pathMapper, imageName, dgst, randomLayerReader)
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error writing test layer: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
randomLayerSize, err := seekerSize(randomLayerReader)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error getting seeker size for random layer: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
layer, err = ls.Fetch(imageName, dgst)
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer layer.Close()
|
|
|
|
|
|
|
|
// Now check the sha digest and ensure its the same
|
|
|
|
h := sha256.New()
|
|
|
|
nn, err := io.Copy(h, layer)
|
|
|
|
if err != nil && err != io.EOF {
|
|
|
|
t.Fatalf("unexpected error copying to hash: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if nn != randomLayerSize {
|
|
|
|
t.Fatalf("stored incorrect number of bytes in layer: %d != %d", nn, randomLayerSize)
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
sha256Digest := digest.NewDigest("sha256", h)
|
|
|
|
if sha256Digest != randomLayerDigest {
|
|
|
|
t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, randomLayerDigest)
|
2014-11-18 00:29:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now seek back the layer, read the whole thing and check against randomLayerData
|
|
|
|
offset, err := layer.Seek(0, os.SEEK_SET)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error seeking layer: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if offset != 0 {
|
|
|
|
t.Fatalf("seek failed: expected 0 offset, got %d", offset)
|
|
|
|
}
|
|
|
|
|
|
|
|
p, err := ioutil.ReadAll(layer)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error reading all of layer: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(p) != int(randomLayerSize) {
|
|
|
|
t.Fatalf("layer data read has different length: %v != %v", len(p), randomLayerSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset the randomLayerReader and read back the buffer
|
|
|
|
_, err = randomLayerReader.Seek(0, os.SEEK_SET)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error resetting layer reader: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
randomLayerData, err := ioutil.ReadAll(randomLayerReader)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("random layer read failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(p, randomLayerData) {
|
|
|
|
t.Fatalf("layer data not equal")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeRandomLayer creates a random layer under name and tarSum using driver
|
|
|
|
// and pathMapper. An io.ReadSeeker with the data is returned, along with the
|
|
|
|
// sha256 hex digest.
|
2014-11-19 22:39:32 +00:00
|
|
|
func writeRandomLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string) (rs io.ReadSeeker, tarSum digest.Digest, sha256digest digest.Digest, err error) {
|
|
|
|
reader, tarSumStr, err := testutil.CreateRandomTarFile()
|
2014-11-18 00:29:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, "", "", err
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
tarSum = digest.Digest(tarSumStr)
|
|
|
|
|
2014-11-18 00:29:42 +00:00
|
|
|
// Now, actually create the layer.
|
|
|
|
randomLayerDigest, err := writeTestLayer(driver, pathMapper, name, tarSum, ioutil.NopCloser(reader))
|
|
|
|
|
|
|
|
if _, err := reader.Seek(0, os.SEEK_SET); err != nil {
|
|
|
|
return nil, "", "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return reader, tarSum, randomLayerDigest, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// seekerSize seeks to the end of seeker, checks the size and returns it to
|
|
|
|
// the original state, returning the size. The state of the seeker should be
|
|
|
|
// treated as unknown if an error is returned.
|
|
|
|
func seekerSize(seeker io.ReadSeeker) (int64, error) {
|
|
|
|
current, err := seeker.Seek(0, os.SEEK_CUR)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
end, err := seeker.Seek(0, os.SEEK_END)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resumed, err := seeker.Seek(current, os.SEEK_SET)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if resumed != current {
|
|
|
|
return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location")
|
|
|
|
}
|
|
|
|
|
|
|
|
return end, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createTestLayer creates a simple test layer in the provided driver under
|
2014-11-19 22:39:32 +00:00
|
|
|
// tarsum dgst, returning the sha256 digest location. This is implemented
|
|
|
|
// peicemeal and should probably be replaced by the uploader when it's ready.
|
|
|
|
func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) {
|
2014-11-18 00:29:42 +00:00
|
|
|
h := sha256.New()
|
|
|
|
rd := io.TeeReader(content, h)
|
|
|
|
|
|
|
|
p, err := ioutil.ReadAll(rd)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
blobDigestSHA := digest.NewDigest("sha256", h)
|
2014-11-18 00:29:42 +00:00
|
|
|
|
|
|
|
blobPath, err := pathMapper.path(blobPathSpec{
|
2014-11-25 00:21:02 +00:00
|
|
|
digest: dgst,
|
2014-11-18 00:29:42 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
if err := driver.PutContent(blobPath, p); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
layerLinkPath, err := pathMapper.path(layerLinkPathSpec{
|
|
|
|
name: name,
|
2014-11-19 22:39:32 +00:00
|
|
|
digest: dgst,
|
2014-11-18 00:29:42 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2014-11-25 00:21:02 +00:00
|
|
|
if err := driver.PutContent(layerLinkPath, []byte(dgst)); err != nil {
|
2014-11-18 00:29:42 +00:00
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
|
2014-11-19 22:39:32 +00:00
|
|
|
return blobDigestSHA, err
|
2014-11-18 00:29:42 +00:00
|
|
|
}
|