Use "Size" field to describe blobs over "Length"

After consideration, we've changed the main descriptor field name to for number
of bytes to "size" to match convention. While this may be a subjective
argument, commonly we refer to files by their "size" rather than their
"length". This will match other conventions, like `(FileInfo).Size()` and
methods on `io.SizeReaderAt`. Under more broad analysis, this argument doesn't
necessarily hold up. If anything, "size" is shorter than "length".

Signed-off-by: Stephen J Day <stephen.day@docker.com>
This commit is contained in:
Stephen J Day 2015-07-17 17:07:11 -07:00
parent c958371f4b
commit 249ad3b76d
9 changed files with 38 additions and 35 deletions

View file

@ -343,7 +343,7 @@ func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.Rea
return nil, err return nil, err
} }
return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Length), nil return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Size), nil
} }
func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
@ -366,7 +366,7 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut
desc := distribution.Descriptor{ desc := distribution.Descriptor{
MediaType: mediaType, MediaType: mediaType,
Length: int64(len(p)), Size: int64(len(p)),
Digest: dgstr.Digest(), Digest: dgstr.Digest(),
} }
@ -435,7 +435,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi
return distribution.Descriptor{ return distribution.Descriptor{
MediaType: resp.Header.Get("Content-Type"), MediaType: resp.Header.Get("Content-Type"),
Length: length, Size: length,
Digest: dgst, Digest: dgst,
}, nil }, nil
case http.StatusNotFound: case http.StatusNotFound:

View file

@ -127,8 +127,8 @@ func TestBlobExists(t *testing.T) {
t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1) t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1)
} }
if stat.Length != int64(len(b1)) { if stat.Size != int64(len(b1)) {
t.Fatalf("Unexpected length: %d, expected %d", stat.Length, len(b1)) t.Fatalf("Unexpected length: %d, expected %d", stat.Size, len(b1))
} }
// TODO(dmcgowan): Test error cases and ErrBlobUnknown case // TODO(dmcgowan): Test error cases and ErrBlobUnknown case
@ -244,14 +244,14 @@ func TestBlobUploadChunked(t *testing.T) {
blob, err := upload.Commit(ctx, distribution.Descriptor{ blob, err := upload.Commit(ctx, distribution.Descriptor{
Digest: dgst, Digest: dgst,
Length: int64(len(b1)), Size: int64(len(b1)),
}) })
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if blob.Length != int64(len(b1)) { if blob.Size != int64(len(b1)) {
t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Length, len(b1)) t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1))
} }
} }
@ -352,14 +352,14 @@ func TestBlobUploadMonolithic(t *testing.T) {
blob, err := upload.Commit(ctx, distribution.Descriptor{ blob, err := upload.Commit(ctx, distribution.Descriptor{
Digest: dgst, Digest: dgst,
Length: int64(len(b1)), Size: int64(len(b1)),
}) })
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if blob.Length != int64(len(b1)) { if blob.Size != int64(len(b1)) {
t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Length, len(b1)) t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1))
} }
} }

View file

@ -178,7 +178,7 @@ func TestSimpleBlobRead(t *testing.T) {
t.Fatalf("error getting seeker size for random layer: %v", err) t.Fatalf("error getting seeker size for random layer: %v", err)
} }
descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Length: randomLayerSize} descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize}
t.Logf("desc: %v", descBefore) t.Logf("desc: %v", descBefore)
desc, err = addBlob(ctx, bs, descBefore, randomLayerReader) desc, err = addBlob(ctx, bs, descBefore, randomLayerReader)
@ -186,8 +186,8 @@ func TestSimpleBlobRead(t *testing.T) {
t.Fatalf("error adding blob to blobservice: %v", err) t.Fatalf("error adding blob to blobservice: %v", err)
} }
if desc.Length != randomLayerSize { if desc.Size != randomLayerSize {
t.Fatalf("committed blob has incorrect length: %v != %v", desc.Length, randomLayerSize) t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize)
} }
rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest. rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest.
@ -330,8 +330,8 @@ func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distributio
if nn, err := io.Copy(wr, rd); err != nil { if nn, err := io.Copy(wr, rd); err != nil {
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
} else if nn != desc.Length { } else if nn != desc.Size {
return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Length) return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Size)
} }
return wr.Commit(ctx, desc) return wr.Commit(ctx, desc)

View file

@ -41,7 +41,7 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
case driver.ErrUnsupportedMethod: case driver.ErrUnsupportedMethod:
// Fallback to serving the content directly. // Fallback to serving the content directly.
br, err := newFileReader(ctx, bs.driver, path, desc.Length) br, err := newFileReader(ctx, bs.driver, path, desc.Size)
if err != nil { if err != nil {
return err return err
} }
@ -61,7 +61,7 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h
if w.Header().Get("Content-Length") == "" { if w.Header().Get("Content-Length") == "" {
// Set the content length if not already set. // Set the content length if not already set.
w.Header().Set("Content-Length", fmt.Sprint(desc.Length)) w.Header().Set("Content-Length", fmt.Sprint(desc.Size))
} }
http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br)

View file

@ -50,7 +50,7 @@ func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution
return nil, err return nil, err
} }
return newFileReader(ctx, bs.driver, path, desc.Length) return newFileReader(ctx, bs.driver, path, desc.Size)
} }
// Put stores the content p in the blob store, calculating the digest. If the // Put stores the content p in the blob store, calculating the digest. If the
@ -81,7 +81,7 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr
// TODO(stevvooe): Write out mediatype here, as well. // TODO(stevvooe): Write out mediatype here, as well.
return distribution.Descriptor{ return distribution.Descriptor{
Length: int64(len(p)), Size: int64(len(p)),
// NOTE(stevvooe): The central blob store firewalls media types from // NOTE(stevvooe): The central blob store firewalls media types from
// other users. The caller should look this up and override the value // other users. The caller should look this up and override the value
@ -179,7 +179,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi
// mediatype that overrides the main one. // mediatype that overrides the main one.
return distribution.Descriptor{ return distribution.Descriptor{
Length: fi.Size(), Size: fi.Size(),
// NOTE(stevvooe): The central blob store firewalls media types from // NOTE(stevvooe): The central blob store firewalls media types from
// other users. The caller should look this up and override the value // other users. The caller should look this up and override the value

View file

@ -148,7 +148,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri
// NOTE(stevvooe): We really don't care if the file is // NOTE(stevvooe): We really don't care if the file is
// not actually present for the reader. We now assume // not actually present for the reader. We now assume
// that the desc length is zero. // that the desc length is zero.
desc.Length = 0 desc.Size = 0
default: default:
// Any other error we want propagated up the stack. // Any other error we want propagated up the stack.
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
@ -161,14 +161,14 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri
bw.size = fi.Size() bw.size = fi.Size()
} }
if desc.Length > 0 { if desc.Size > 0 {
if desc.Length != bw.size { if desc.Size != bw.size {
return distribution.Descriptor{}, distribution.ErrBlobInvalidLength return distribution.Descriptor{}, distribution.ErrBlobInvalidLength
} }
} else { } else {
// if provided 0 or negative length, we can assume caller doesn't know or // if provided 0 or negative length, we can assume caller doesn't know or
// care about length. // care about length.
desc.Length = bw.size desc.Size = bw.size
} }
// TODO(stevvooe): This section is very meandering. Need to be broken down // TODO(stevvooe): This section is very meandering. Need to be broken down
@ -216,7 +216,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri
} }
// Read the file from the backend driver and validate it. // Read the file from the backend driver and validate it.
fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Length) fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Size)
if err != nil { if err != nil {
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
} }

View file

@ -23,8 +23,8 @@ func ValidateDescriptor(desc distribution.Descriptor) error {
return err return err
} }
if desc.Length < 0 { if desc.Size < 0 {
return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Length) return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size)
} }
if desc.MediaType == "" { if desc.MediaType == "" {

View file

@ -66,17 +66,20 @@ func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Di
// stat provides an internal stat call that takes a connection parameter. This // stat provides an internal stat call that takes a connection parameter. This
// allows some internal management of the connection scope. // allows some internal management of the connection scope.
func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) {
reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype")) reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype"))
if err != nil { if err != nil {
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
} }
if len(reply) < 2 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil // NOTE(stevvooe): The "size" field used to be "length". We treat a
// missing "size" field here as an unknown blob, which causes a cache
// miss, effectively migrating the field.
if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil
return distribution.Descriptor{}, distribution.ErrBlobUnknown return distribution.Descriptor{}, distribution.ErrBlobUnknown
} }
var desc distribution.Descriptor var desc distribution.Descriptor
if _, err := redis.Scan(reply, &desc.Digest, &desc.Length, &desc.MediaType); err != nil { if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil {
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
} }
@ -104,7 +107,7 @@ func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst
func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error {
if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst), if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst),
"digest", desc.Digest, "digest", desc.Digest,
"length", desc.Length); err != nil { "size", desc.Size); err != nil {
return err return err
} }

View file

@ -35,14 +35,14 @@ func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context,
if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{
Digest: "sha384:abc", Digest: "sha384:abc",
Length: 10, Size: 10,
MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat {
t.Fatalf("expected error with invalid digest: %v", err) t.Fatalf("expected error with invalid digest: %v", err)
} }
if err := cache.SetDescriptor(ctx, "sha384:abc", distribution.Descriptor{ if err := cache.SetDescriptor(ctx, "sha384:abc", distribution.Descriptor{
Digest: "", Digest: "",
Length: 10, Size: 10,
MediaType: "application/octet-stream"}); err == nil { MediaType: "application/octet-stream"}); err == nil {
t.Fatalf("expected error setting value on invalid descriptor") t.Fatalf("expected error setting value on invalid descriptor")
} }
@ -60,7 +60,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi
localDigest := digest.Digest("sha384:abc") localDigest := digest.Digest("sha384:abc")
expected := distribution.Descriptor{ expected := distribution.Descriptor{
Digest: "sha256:abc", Digest: "sha256:abc",
Length: 10, Size: 10,
MediaType: "application/octet-stream"} MediaType: "application/octet-stream"}
cache, err := provider.RepositoryScoped("foo/bar") cache, err := provider.RepositoryScoped("foo/bar")