content: cleanup service and interfaces
After implementing pull, a few changes are required to the content store interface to make sure that the implementation works smoothly. Specifically, we work to make sure the predeclaration path for digests works the same between remote and local writers. Before, we were hesitent to require the the size and digest up front, but it became clear that having this provided significant benefit. There are also several cleanups related to naming. We now call the expected digest `Expected` consistently across the board and `Total` is used to mark the expected size. This whole effort comes together to provide a very smooth status reporting workflow for image pull and push. This will be more obvious when the bulk of pull code lands. There are a few other changes to make `content.WriteBlob` more broadly useful. In accordance with addition for predeclaring expected size when getting a `Writer`, `WriteBlob` now supports this fully. It will also resume downloads if provided an `io.Seeker` or `io.ReaderAt`. Coupled with the `httpReadSeeker` from `docker/distribution`, we should only be a lines of code away from resumable downloads. Signed-off-by: Stephen J Day <stephen.day@docker.com>
This commit is contained in:
parent
0a5544d8c4
commit
c062a85782
15 changed files with 568 additions and 337 deletions
|
@ -165,25 +165,26 @@ type WriteRequest struct {
|
||||||
Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.v1.WriteAction" json:"action,omitempty"`
|
Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.v1.WriteAction" json:"action,omitempty"`
|
||||||
// Ref identifies the pre-commit object to write to.
|
// Ref identifies the pre-commit object to write to.
|
||||||
Ref string `protobuf:"bytes,2,opt,name=ref,proto3" json:"ref,omitempty"`
|
Ref string `protobuf:"bytes,2,opt,name=ref,proto3" json:"ref,omitempty"`
|
||||||
// ExpectedSize can be set to have the service validate the total size of
|
// Total can be set to have the service validate the total size of the
|
||||||
// the of committed content.
|
// committed content.
|
||||||
//
|
//
|
||||||
// The latest value before or with the commit action message will be use to
|
// The latest value before or with the commit action message will be use to
|
||||||
// validate the content. It is only required on one message for the write.
|
// validate the content. If the offset overflows total, the service may
|
||||||
|
// report an error. It is only required on one message for the write.
|
||||||
//
|
//
|
||||||
// If the value is zero or less, no validation of the final content will be
|
// If the value is zero or less, no validation of the final content will be
|
||||||
// performed.
|
// performed.
|
||||||
ExpectedSize int64 `protobuf:"varint,3,opt,name=expected_size,json=expectedSize,proto3" json:"expected_size,omitempty"`
|
Total int64 `protobuf:"varint,3,opt,name=total,proto3" json:"total,omitempty"`
|
||||||
// ExpectedDigest can be set to have the service validate the final content
|
// Expected can be set to have the service validate the final content against
|
||||||
// against the provided digest.
|
// the provided digest.
|
||||||
//
|
//
|
||||||
// If the digest is already present in the object store, an AlreadyPresent
|
// If the digest is already present in the object store, an AlreadyExists
|
||||||
// error will be returned.
|
// error will be returned.
|
||||||
//
|
//
|
||||||
// Only the latest version will be used to check the content against the
|
// Only the latest version will be used to check the content against the
|
||||||
// digest. It is only required to include it on a single message, before or
|
// digest. It is only required to include it on a single message, before or
|
||||||
// with the commit action message.
|
// with the commit action message.
|
||||||
ExpectedDigest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,4,opt,name=expected_digest,json=expectedDigest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected_digest"`
|
Expected github_com_opencontainers_go_digest.Digest `protobuf:"bytes,4,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"`
|
||||||
// Offset specifies the number of bytes from the start at which to begin
|
// Offset specifies the number of bytes from the start at which to begin
|
||||||
// the write. If zero or less, the write will be from the start. This uses
|
// the write. If zero or less, the write will be from the start. This uses
|
||||||
// standard zero-indexed semantics.
|
// standard zero-indexed semantics.
|
||||||
|
@ -204,17 +205,30 @@ type WriteResponse struct {
|
||||||
// Action contains the action for the final message of the stream. A writer
|
// Action contains the action for the final message of the stream. A writer
|
||||||
// should confirm that they match the intended result.
|
// should confirm that they match the intended result.
|
||||||
Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.v1.WriteAction" json:"action,omitempty"`
|
Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.v1.WriteAction" json:"action,omitempty"`
|
||||||
// Offset provides the current "committed" size for the Write.
|
// StartedAt provides the time at which the write began.
|
||||||
Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
|
//
|
||||||
|
// This must be set for stat and commit write actions. All other write
|
||||||
|
// actions may omit this.
|
||||||
|
StartedAt time.Time `protobuf:"bytes,2,opt,name=started_at,json=startedAt,stdtime" json:"started_at"`
|
||||||
|
// UpdatedAt provides the last time of a successful write.
|
||||||
|
//
|
||||||
|
// This must be set for stat and commit write actions. All other write
|
||||||
|
// actions may omit this.
|
||||||
|
UpdatedAt time.Time `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
|
||||||
|
// Offset is the current committed size for the write.
|
||||||
|
Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
|
||||||
|
// Total provides the current, expected total size of the write.
|
||||||
|
//
|
||||||
|
// We include this to provide consistency with the Status structure on the
|
||||||
|
// client writer.
|
||||||
|
//
|
||||||
|
// This is only valid on the Stat and Commit response.
|
||||||
|
Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"`
|
||||||
// Digest, if present, includes the digest up to the currently committed
|
// Digest, if present, includes the digest up to the currently committed
|
||||||
// bytes. If action is commit, this field will be set. It is implementation
|
// bytes. If action is commit, this field will be set. It is implementation
|
||||||
// defined if this is set for other actions, except abort. On abort, this
|
// defined if this is set for other actions, except abort. On abort, this
|
||||||
// will be empty.
|
// will be empty.
|
||||||
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,3,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
|
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
|
||||||
// StartedAt is the time at which the write first started.
|
|
||||||
StartedAt time.Time `protobuf:"bytes,4,opt,name=started_at,json=startedAt,stdtime" json:"started_at"`
|
|
||||||
// UpdatedAt is the time the write was last updated.
|
|
||||||
UpdatedAt time.Time `protobuf:"bytes,5,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *WriteResponse) Reset() { *m = WriteResponse{} }
|
func (m *WriteResponse) Reset() { *m = WriteResponse{} }
|
||||||
|
@ -231,10 +245,11 @@ func (*StatusRequest) ProtoMessage() {}
|
||||||
func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{6} }
|
func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{6} }
|
||||||
|
|
||||||
type StatusResponse struct {
|
type StatusResponse struct {
|
||||||
Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"`
|
StartedAt time.Time `protobuf:"bytes,1,opt,name=started_at,json=startedAt,stdtime" json:"started_at"`
|
||||||
Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
|
UpdatedAt time.Time `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
|
||||||
StartedAt time.Time `protobuf:"bytes,3,opt,name=started_at,json=startedAt,stdtime" json:"started_at"`
|
Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
|
||||||
UpdatedAt time.Time `protobuf:"bytes,4,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
|
Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
|
||||||
|
Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *StatusResponse) Reset() { *m = StatusResponse{} }
|
func (m *StatusResponse) Reset() { *m = StatusResponse{} }
|
||||||
|
@ -717,16 +732,16 @@ func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||||
i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
|
i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
|
||||||
i += copy(dAtA[i:], m.Ref)
|
i += copy(dAtA[i:], m.Ref)
|
||||||
}
|
}
|
||||||
if m.ExpectedSize != 0 {
|
if m.Total != 0 {
|
||||||
dAtA[i] = 0x18
|
dAtA[i] = 0x18
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContent(dAtA, i, uint64(m.ExpectedSize))
|
i = encodeVarintContent(dAtA, i, uint64(m.Total))
|
||||||
}
|
}
|
||||||
if len(m.ExpectedDigest) > 0 {
|
if len(m.Expected) > 0 {
|
||||||
dAtA[i] = 0x22
|
dAtA[i] = 0x22
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContent(dAtA, i, uint64(len(m.ExpectedDigest)))
|
i = encodeVarintContent(dAtA, i, uint64(len(m.Expected)))
|
||||||
i += copy(dAtA[i:], m.ExpectedDigest)
|
i += copy(dAtA[i:], m.Expected)
|
||||||
}
|
}
|
||||||
if m.Offset != 0 {
|
if m.Offset != 0 {
|
||||||
dAtA[i] = 0x28
|
dAtA[i] = 0x28
|
||||||
|
@ -762,18 +777,7 @@ func (m *WriteResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContent(dAtA, i, uint64(m.Action))
|
i = encodeVarintContent(dAtA, i, uint64(m.Action))
|
||||||
}
|
}
|
||||||
if m.Offset != 0 {
|
dAtA[i] = 0x12
|
||||||
dAtA[i] = 0x10
|
|
||||||
i++
|
|
||||||
i = encodeVarintContent(dAtA, i, uint64(m.Offset))
|
|
||||||
}
|
|
||||||
if len(m.Digest) > 0 {
|
|
||||||
dAtA[i] = 0x1a
|
|
||||||
i++
|
|
||||||
i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
|
|
||||||
i += copy(dAtA[i:], m.Digest)
|
|
||||||
}
|
|
||||||
dAtA[i] = 0x22
|
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)))
|
i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)))
|
||||||
n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i:])
|
n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i:])
|
||||||
|
@ -781,7 +785,7 @@ func (m *WriteResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n2
|
i += n2
|
||||||
dAtA[i] = 0x2a
|
dAtA[i] = 0x1a
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
|
i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
|
||||||
n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
|
n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
|
||||||
|
@ -789,6 +793,22 @@ func (m *WriteResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n3
|
i += n3
|
||||||
|
if m.Offset != 0 {
|
||||||
|
dAtA[i] = 0x20
|
||||||
|
i++
|
||||||
|
i = encodeVarintContent(dAtA, i, uint64(m.Offset))
|
||||||
|
}
|
||||||
|
if m.Total != 0 {
|
||||||
|
dAtA[i] = 0x28
|
||||||
|
i++
|
||||||
|
i = encodeVarintContent(dAtA, i, uint64(m.Total))
|
||||||
|
}
|
||||||
|
if len(m.Digest) > 0 {
|
||||||
|
dAtA[i] = 0x32
|
||||||
|
i++
|
||||||
|
i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
|
||||||
|
i += copy(dAtA[i:], m.Digest)
|
||||||
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -855,26 +875,15 @@ func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
if len(m.Ref) > 0 {
|
|
||||||
dAtA[i] = 0xa
|
dAtA[i] = 0xa
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
|
|
||||||
i += copy(dAtA[i:], m.Ref)
|
|
||||||
}
|
|
||||||
if m.Offset != 0 {
|
|
||||||
dAtA[i] = 0x10
|
|
||||||
i++
|
|
||||||
i = encodeVarintContent(dAtA, i, uint64(m.Offset))
|
|
||||||
}
|
|
||||||
dAtA[i] = 0x1a
|
|
||||||
i++
|
|
||||||
i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)))
|
i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)))
|
||||||
n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i:])
|
n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n4
|
i += n4
|
||||||
dAtA[i] = 0x22
|
dAtA[i] = 0x12
|
||||||
i++
|
i++
|
||||||
i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
|
i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
|
||||||
n5, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
|
n5, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
|
||||||
|
@ -882,6 +891,22 @@ func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
i += n5
|
i += n5
|
||||||
|
if len(m.Ref) > 0 {
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
i++
|
||||||
|
i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
|
||||||
|
i += copy(dAtA[i:], m.Ref)
|
||||||
|
}
|
||||||
|
if m.Offset != 0 {
|
||||||
|
dAtA[i] = 0x20
|
||||||
|
i++
|
||||||
|
i = encodeVarintContent(dAtA, i, uint64(m.Offset))
|
||||||
|
}
|
||||||
|
if m.Total != 0 {
|
||||||
|
dAtA[i] = 0x28
|
||||||
|
i++
|
||||||
|
i = encodeVarintContent(dAtA, i, uint64(m.Total))
|
||||||
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -976,10 +1001,10 @@ func (m *WriteRequest) Size() (n int) {
|
||||||
if l > 0 {
|
if l > 0 {
|
||||||
n += 1 + l + sovContent(uint64(l))
|
n += 1 + l + sovContent(uint64(l))
|
||||||
}
|
}
|
||||||
if m.ExpectedSize != 0 {
|
if m.Total != 0 {
|
||||||
n += 1 + sovContent(uint64(m.ExpectedSize))
|
n += 1 + sovContent(uint64(m.Total))
|
||||||
}
|
}
|
||||||
l = len(m.ExpectedDigest)
|
l = len(m.Expected)
|
||||||
if l > 0 {
|
if l > 0 {
|
||||||
n += 1 + l + sovContent(uint64(l))
|
n += 1 + l + sovContent(uint64(l))
|
||||||
}
|
}
|
||||||
|
@ -999,17 +1024,20 @@ func (m *WriteResponse) Size() (n int) {
|
||||||
if m.Action != 0 {
|
if m.Action != 0 {
|
||||||
n += 1 + sovContent(uint64(m.Action))
|
n += 1 + sovContent(uint64(m.Action))
|
||||||
}
|
}
|
||||||
|
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)
|
||||||
|
n += 1 + l + sovContent(uint64(l))
|
||||||
|
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
|
||||||
|
n += 1 + l + sovContent(uint64(l))
|
||||||
if m.Offset != 0 {
|
if m.Offset != 0 {
|
||||||
n += 1 + sovContent(uint64(m.Offset))
|
n += 1 + sovContent(uint64(m.Offset))
|
||||||
}
|
}
|
||||||
|
if m.Total != 0 {
|
||||||
|
n += 1 + sovContent(uint64(m.Total))
|
||||||
|
}
|
||||||
l = len(m.Digest)
|
l = len(m.Digest)
|
||||||
if l > 0 {
|
if l > 0 {
|
||||||
n += 1 + l + sovContent(uint64(l))
|
n += 1 + l + sovContent(uint64(l))
|
||||||
}
|
}
|
||||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)
|
|
||||||
n += 1 + l + sovContent(uint64(l))
|
|
||||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
|
|
||||||
n += 1 + l + sovContent(uint64(l))
|
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1034,6 +1062,10 @@ func (m *StatusRequest) Size() (n int) {
|
||||||
func (m *StatusResponse) Size() (n int) {
|
func (m *StatusResponse) Size() (n int) {
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
|
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)
|
||||||
|
n += 1 + l + sovContent(uint64(l))
|
||||||
|
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
|
||||||
|
n += 1 + l + sovContent(uint64(l))
|
||||||
l = len(m.Ref)
|
l = len(m.Ref)
|
||||||
if l > 0 {
|
if l > 0 {
|
||||||
n += 1 + l + sovContent(uint64(l))
|
n += 1 + l + sovContent(uint64(l))
|
||||||
|
@ -1041,10 +1073,9 @@ func (m *StatusResponse) Size() (n int) {
|
||||||
if m.Offset != 0 {
|
if m.Offset != 0 {
|
||||||
n += 1 + sovContent(uint64(m.Offset))
|
n += 1 + sovContent(uint64(m.Offset))
|
||||||
}
|
}
|
||||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)
|
if m.Total != 0 {
|
||||||
n += 1 + l + sovContent(uint64(l))
|
n += 1 + sovContent(uint64(m.Total))
|
||||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
|
}
|
||||||
n += 1 + l + sovContent(uint64(l))
|
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1113,8 +1144,8 @@ func (this *WriteRequest) String() string {
|
||||||
s := strings.Join([]string{`&WriteRequest{`,
|
s := strings.Join([]string{`&WriteRequest{`,
|
||||||
`Action:` + fmt.Sprintf("%v", this.Action) + `,`,
|
`Action:` + fmt.Sprintf("%v", this.Action) + `,`,
|
||||||
`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
|
`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
|
||||||
`ExpectedSize:` + fmt.Sprintf("%v", this.ExpectedSize) + `,`,
|
`Total:` + fmt.Sprintf("%v", this.Total) + `,`,
|
||||||
`ExpectedDigest:` + fmt.Sprintf("%v", this.ExpectedDigest) + `,`,
|
`Expected:` + fmt.Sprintf("%v", this.Expected) + `,`,
|
||||||
`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
|
`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
|
||||||
`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
|
`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
|
@ -1127,10 +1158,11 @@ func (this *WriteResponse) String() string {
|
||||||
}
|
}
|
||||||
s := strings.Join([]string{`&WriteResponse{`,
|
s := strings.Join([]string{`&WriteResponse{`,
|
||||||
`Action:` + fmt.Sprintf("%v", this.Action) + `,`,
|
`Action:` + fmt.Sprintf("%v", this.Action) + `,`,
|
||||||
`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
|
|
||||||
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
|
|
||||||
`StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
|
`StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||||
`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
|
`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||||
|
`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
|
||||||
|
`Total:` + fmt.Sprintf("%v", this.Total) + `,`,
|
||||||
|
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
|
@ -1151,10 +1183,11 @@ func (this *StatusResponse) String() string {
|
||||||
return "nil"
|
return "nil"
|
||||||
}
|
}
|
||||||
s := strings.Join([]string{`&StatusResponse{`,
|
s := strings.Join([]string{`&StatusResponse{`,
|
||||||
`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
|
|
||||||
`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
|
|
||||||
`StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
|
`StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||||
`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
|
`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||||
|
`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
|
||||||
|
`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
|
||||||
|
`Total:` + fmt.Sprintf("%v", this.Total) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
|
@ -1670,9 +1703,9 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 3:
|
case 3:
|
||||||
if wireType != 0 {
|
if wireType != 0 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field ExpectedSize", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
|
||||||
}
|
}
|
||||||
m.ExpectedSize = 0
|
m.Total = 0
|
||||||
for shift := uint(0); ; shift += 7 {
|
for shift := uint(0); ; shift += 7 {
|
||||||
if shift >= 64 {
|
if shift >= 64 {
|
||||||
return ErrIntOverflowContent
|
return ErrIntOverflowContent
|
||||||
|
@ -1682,14 +1715,14 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
|
||||||
}
|
}
|
||||||
b := dAtA[iNdEx]
|
b := dAtA[iNdEx]
|
||||||
iNdEx++
|
iNdEx++
|
||||||
m.ExpectedSize |= (int64(b) & 0x7F) << shift
|
m.Total |= (int64(b) & 0x7F) << shift
|
||||||
if b < 0x80 {
|
if b < 0x80 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case 4:
|
case 4:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field ExpectedDigest", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType)
|
||||||
}
|
}
|
||||||
var stringLen uint64
|
var stringLen uint64
|
||||||
for shift := uint(0); ; shift += 7 {
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
@ -1714,7 +1747,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
|
||||||
if postIndex > l {
|
if postIndex > l {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
m.ExpectedDigest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
|
m.Expected = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 5:
|
case 5:
|
||||||
if wireType != 0 {
|
if wireType != 0 {
|
||||||
|
@ -1836,54 +1869,6 @@ func (m *WriteResponse) Unmarshal(dAtA []byte) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case 2:
|
case 2:
|
||||||
if wireType != 0 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
|
|
||||||
}
|
|
||||||
m.Offset = 0
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowContent
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
m.Offset |= (int64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case 3:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowContent
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthContent
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 4:
|
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
|
||||||
}
|
}
|
||||||
|
@ -1913,7 +1898,7 @@ func (m *WriteResponse) Unmarshal(dAtA []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 5:
|
case 3:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType)
|
||||||
}
|
}
|
||||||
|
@ -1943,6 +1928,73 @@ func (m *WriteResponse) Unmarshal(dAtA []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 4:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
|
||||||
|
}
|
||||||
|
m.Offset = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Offset |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 5:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
|
||||||
|
}
|
||||||
|
m.Total = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Total |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 6:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthContent
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipContent(dAtA[iNdEx:])
|
skippy, err := skipContent(dAtA[iNdEx:])
|
||||||
|
@ -2102,54 +2154,6 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error {
|
||||||
}
|
}
|
||||||
switch fieldNum {
|
switch fieldNum {
|
||||||
case 1:
|
case 1:
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowContent
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthContent
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Ref = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 2:
|
|
||||||
if wireType != 0 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
|
|
||||||
}
|
|
||||||
m.Offset = 0
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowContent
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
m.Offset |= (int64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case 3:
|
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
|
||||||
}
|
}
|
||||||
|
@ -2179,7 +2183,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
case 4:
|
case 2:
|
||||||
if wireType != 2 {
|
if wireType != 2 {
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType)
|
return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType)
|
||||||
}
|
}
|
||||||
|
@ -2209,6 +2213,73 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 3:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthContent
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Ref = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 4:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
|
||||||
|
}
|
||||||
|
m.Offset = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Offset |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 5:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
|
||||||
|
}
|
||||||
|
m.Total = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowContent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Total |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipContent(dAtA[iNdEx:])
|
skippy, err := skipContent(dAtA[iNdEx:])
|
||||||
|
@ -2340,51 +2411,51 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorContent = []byte{
|
var fileDescriptorContent = []byte{
|
||||||
// 733 bytes of a gzipped FileDescriptorProto
|
// 734 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xb1, 0x6f, 0xd3, 0x4e,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0xd3, 0x4a,
|
||||||
0x14, 0xc7, 0x73, 0x89, 0x93, 0xdf, 0x2f, 0x2f, 0x49, 0x1b, 0xae, 0x05, 0x45, 0x6e, 0xeb, 0x84,
|
0x14, 0xcd, 0xe4, 0xc3, 0xaf, 0xb9, 0x49, 0xfb, 0xf2, 0xa6, 0x7d, 0x4f, 0x91, 0xdb, 0x3a, 0x79,
|
||||||
0xb0, 0x44, 0x95, 0xb0, 0x4b, 0xd8, 0x60, 0xa8, 0x9c, 0x14, 0xaa, 0x22, 0x55, 0x95, 0xdc, 0x48,
|
0x59, 0x45, 0x95, 0xb0, 0x4b, 0xd8, 0xc1, 0xa2, 0x72, 0x02, 0x54, 0x45, 0x2a, 0x95, 0xdc, 0x48,
|
||||||
0x15, 0x62, 0x40, 0x4e, 0x72, 0x31, 0x16, 0xc4, 0x67, 0xec, 0x4b, 0x55, 0x31, 0x21, 0x24, 0x24,
|
0x15, 0x2b, 0xe4, 0xc4, 0x13, 0x63, 0xd1, 0x78, 0x8c, 0x3d, 0xa9, 0x2a, 0x56, 0x6c, 0x90, 0x50,
|
||||||
0xd4, 0x89, 0x7f, 0xa0, 0x2c, 0xb0, 0x23, 0x26, 0x24, 0x66, 0x86, 0x8e, 0x8c, 0x88, 0xa1, 0xd0,
|
0x57, 0xfc, 0x81, 0xb2, 0x81, 0x3d, 0x4b, 0x24, 0xfe, 0x00, 0x5d, 0xb2, 0x44, 0x5d, 0x14, 0x9a,
|
||||||
0xfc, 0x23, 0x20, 0x9f, 0xcf, 0x89, 0x9b, 0xa6, 0x43, 0x4b, 0xc9, 0xe2, 0x67, 0xbf, 0xf7, 0xbe,
|
0x05, 0x7f, 0x03, 0xe4, 0xf1, 0x38, 0x71, 0xda, 0xb0, 0x68, 0x28, 0xdd, 0xf8, 0x8e, 0xef, 0xbd,
|
||||||
0x79, 0xf7, 0xf1, 0x37, 0x2f, 0xb0, 0x6a, 0xd9, 0xec, 0xc9, 0xa0, 0xad, 0x76, 0x68, 0x5f, 0xeb,
|
0xa7, 0xf7, 0x1c, 0x9f, 0xb9, 0x81, 0x0d, 0xdb, 0x61, 0x4f, 0x06, 0x1d, 0xb5, 0x4b, 0xfb, 0x9a,
|
||||||
0xd2, 0xce, 0x53, 0xe2, 0x69, 0x1d, 0xea, 0x30, 0xd3, 0x76, 0x88, 0xd7, 0xd5, 0x4c, 0xd7, 0xd6,
|
0x45, 0xbb, 0x4f, 0x89, 0xaf, 0x75, 0xa9, 0xcb, 0x4c, 0xc7, 0x25, 0xbe, 0xa5, 0x99, 0x9e, 0xa3,
|
||||||
0x7c, 0xe2, 0xed, 0xda, 0x1d, 0xe2, 0xf3, 0xe7, 0xc4, 0x61, 0xd1, 0x55, 0x75, 0x3d, 0xca, 0x28,
|
0x05, 0xc4, 0x3f, 0x70, 0xba, 0x24, 0xe0, 0xef, 0x89, 0xcb, 0xe2, 0xa7, 0xea, 0xf9, 0x94, 0x51,
|
||||||
0x2e, 0x8c, 0xcb, 0xd5, 0xdd, 0x5b, 0xf2, 0xbc, 0x45, 0x2d, 0xca, 0x33, 0x5a, 0x10, 0x85, 0x45,
|
0x3c, 0x3f, 0x2e, 0x57, 0x0f, 0x6e, 0xca, 0x4b, 0x36, 0xb5, 0x29, 0xcf, 0x68, 0x61, 0x14, 0x15,
|
||||||
0x72, 0xd9, 0xa2, 0xd4, 0x7a, 0x46, 0x34, 0x7e, 0xd7, 0x1e, 0xf4, 0x34, 0x66, 0xf7, 0x89, 0xcf,
|
0xc9, 0x15, 0x9b, 0x52, 0x7b, 0x9f, 0x68, 0xfc, 0xd4, 0x19, 0xf4, 0x34, 0xe6, 0xf4, 0x49, 0xc0,
|
||||||
0xcc, 0xbe, 0x1b, 0x16, 0x54, 0x1f, 0x42, 0x6e, 0xc3, 0xe9, 0x51, 0x83, 0x3c, 0x1f, 0x10, 0x9f,
|
0xcc, 0xbe, 0x17, 0x15, 0xd4, 0x1e, 0x41, 0x61, 0xcb, 0xed, 0x51, 0x83, 0x3c, 0x1b, 0x90, 0x80,
|
||||||
0xe1, 0x07, 0x90, 0xe9, 0xda, 0x16, 0xf1, 0x59, 0x09, 0x55, 0x50, 0x2d, 0xdb, 0xa8, 0x1f, 0x1e,
|
0xe1, 0x07, 0x20, 0x59, 0x8e, 0x4d, 0x02, 0x56, 0x46, 0x55, 0x54, 0xcf, 0x37, 0x1b, 0x27, 0x67,
|
||||||
0x95, 0x13, 0x3f, 0x8e, 0xca, 0xcb, 0xb1, 0x69, 0xa9, 0x4b, 0x9c, 0xd1, 0x77, 0xfb, 0x9a, 0x45,
|
0x95, 0xd4, 0xe9, 0x59, 0x65, 0x2d, 0x31, 0x2d, 0xf5, 0x88, 0x3b, 0xfa, 0xdf, 0x81, 0x66, 0xd3,
|
||||||
0x6f, 0x86, 0x2d, 0xea, 0x1a, 0xbf, 0x18, 0x42, 0xa1, 0xfa, 0x19, 0x41, 0x3e, 0xd4, 0xf6, 0x5d,
|
0x1b, 0x51, 0x8b, 0x7a, 0x97, 0x3f, 0x0c, 0x81, 0x50, 0xfb, 0x80, 0xa0, 0x18, 0x61, 0x07, 0x1e,
|
||||||
0xea, 0xf8, 0xe4, 0x32, 0xc5, 0x31, 0x06, 0xc9, 0xb7, 0x5f, 0x90, 0x52, 0xb2, 0x82, 0x6a, 0x29,
|
0x75, 0x03, 0x72, 0x9d, 0xe0, 0x18, 0x43, 0x36, 0x70, 0x9e, 0x93, 0x72, 0xba, 0x8a, 0xea, 0x19,
|
||||||
0x83, 0xc7, 0x78, 0x1d, 0xf2, 0x1d, 0xda, 0xef, 0xdb, 0x8c, 0x91, 0xee, 0x63, 0x93, 0x95, 0x52,
|
0x83, 0xc7, 0x78, 0x13, 0x8a, 0x5d, 0xda, 0xef, 0x3b, 0x8c, 0x11, 0xeb, 0xb1, 0xc9, 0xca, 0x99,
|
||||||
0x15, 0x54, 0xcb, 0xd5, 0x65, 0x35, 0x64, 0xa0, 0x46, 0x0c, 0xd4, 0x56, 0xc4, 0xa0, 0xf1, 0x7f,
|
0x2a, 0xaa, 0x17, 0x1a, 0xb2, 0x1a, 0x69, 0xa0, 0xc6, 0x1a, 0xa8, 0xed, 0x58, 0x83, 0xe6, 0x5c,
|
||||||
0x30, 0xc1, 0xdb, 0x9f, 0x65, 0x64, 0xe4, 0x46, 0x9d, 0x3a, 0xab, 0xbe, 0x46, 0x90, 0x33, 0x88,
|
0x38, 0xc1, 0xeb, 0xaf, 0x15, 0x64, 0x14, 0x46, 0x9d, 0x3a, 0xab, 0xbd, 0x44, 0x50, 0x30, 0x88,
|
||||||
0xd9, 0xfd, 0x07, 0x54, 0xf0, 0x35, 0xc8, 0xd0, 0x5e, 0xcf, 0x27, 0x4c, 0x8c, 0x2e, 0xee, 0x46,
|
0x69, 0xfd, 0x01, 0x55, 0xf0, 0x7f, 0x20, 0xd1, 0x5e, 0x2f, 0x20, 0x4c, 0x8c, 0x2e, 0x4e, 0x23,
|
||||||
0x07, 0x4a, 0x8d, 0x0f, 0x54, 0xbd, 0x03, 0xf9, 0x70, 0x0c, 0x01, 0x70, 0xdc, 0x8b, 0x26, 0x7b,
|
0x42, 0x99, 0x31, 0xa1, 0xda, 0x6d, 0x28, 0x46, 0x63, 0x08, 0x01, 0xc7, 0xbd, 0xe8, 0x62, 0xaf,
|
||||||
0xbb, 0x26, 0x33, 0xb9, 0x62, 0xde, 0xe0, 0x71, 0xf5, 0x55, 0x12, 0xf2, 0x3b, 0x9e, 0xcd, 0x48,
|
0x65, 0x32, 0x93, 0x23, 0x16, 0x0d, 0x1e, 0xd7, 0xbe, 0x23, 0x28, 0xee, 0xf9, 0x0e, 0x23, 0x31,
|
||||||
0x74, 0x88, 0x3a, 0x64, 0xcc, 0x0e, 0xb3, 0xa9, 0xc3, 0x9b, 0x67, 0xea, 0xb2, 0x7a, 0xc2, 0x40,
|
0x89, 0x06, 0x48, 0x66, 0x97, 0x39, 0xd4, 0xe5, 0xcd, 0x0b, 0x0d, 0x59, 0x9d, 0x30, 0x90, 0xca,
|
||||||
0x2a, 0x2f, 0xd6, 0x79, 0x85, 0x21, 0x2a, 0x71, 0x11, 0x52, 0x1e, 0xe9, 0x71, 0xdd, 0xac, 0x11,
|
0x8b, 0x75, 0x5e, 0x61, 0x88, 0x4a, 0x5c, 0x82, 0x8c, 0x4f, 0x7a, 0x1c, 0x37, 0x6f, 0x84, 0x21,
|
||||||
0x84, 0xf8, 0x06, 0x14, 0xc8, 0x9e, 0x4b, 0x3a, 0x01, 0xe2, 0xd8, 0xbc, 0xf9, 0xe8, 0xe1, 0x76,
|
0x5e, 0x82, 0x1c, 0xa3, 0xcc, 0xdc, 0x17, 0x73, 0x46, 0x07, 0xfc, 0x10, 0xe6, 0xc8, 0xa1, 0x47,
|
||||||
0xf0, 0x22, 0x1e, 0xc1, 0xec, 0xa8, 0x48, 0x80, 0x93, 0x2e, 0x0c, 0x6e, 0x26, 0x92, 0x5a, 0x9b,
|
0xba, 0x8c, 0x58, 0xe5, 0xec, 0xcc, 0x12, 0x8d, 0x30, 0x12, 0x44, 0x73, 0x53, 0x89, 0x4a, 0x09,
|
||||||
0x04, 0x98, 0x9e, 0x0a, 0x21, 0x13, 0x83, 0xf0, 0x29, 0x09, 0x05, 0x01, 0x41, 0x20, 0xbc, 0x08,
|
0xa2, 0x9f, 0xd2, 0x30, 0x2f, 0x88, 0x0a, 0x99, 0x66, 0x61, 0xda, 0x02, 0x08, 0x98, 0xe9, 0x0b,
|
||||||
0x85, 0xb3, 0x5e, 0xd9, 0xd8, 0x16, 0xa9, 0xbf, 0xb6, 0x45, 0x13, 0xc0, 0x67, 0xa6, 0x27, 0x9c,
|
0xe7, 0xa4, 0xaf, 0xe0, 0x9c, 0xbc, 0xe8, 0xd3, 0x59, 0x08, 0x32, 0xf0, 0x2c, 0x73, 0x06, 0xfb,
|
||||||
0x2b, 0x9d, 0xc3, 0xb9, 0x59, 0xd1, 0xa7, 0x73, 0x91, 0x81, 0xdb, 0x35, 0x85, 0x48, 0xfa, 0x3c,
|
0xe5, 0x45, 0x9f, 0x9e, 0x34, 0x48, 0x76, 0x82, 0xfb, 0x48, 0xf9, 0x5c, 0x52, 0xf9, 0xb1, 0x35,
|
||||||
0x22, 0xa2, 0x4f, 0x67, 0xd5, 0xbb, 0x50, 0xd8, 0x66, 0x26, 0x1b, 0xf8, 0x91, 0x71, 0x30, 0x48,
|
0xa5, 0xdf, 0xbe, 0xb0, 0x77, 0x60, 0x7e, 0x97, 0x99, 0x6c, 0x10, 0xc4, 0x96, 0xc1, 0x90, 0xf5,
|
||||||
0x1e, 0xe9, 0xf9, 0x25, 0x54, 0x49, 0xd5, 0xb2, 0x06, 0x8f, 0x03, 0x24, 0xae, 0x47, 0x7a, 0xf6,
|
0x49, 0x2f, 0x28, 0xa3, 0x6a, 0xa6, 0x9e, 0x37, 0x78, 0x1c, 0x8e, 0xe7, 0xf9, 0xa4, 0xe7, 0x1c,
|
||||||
0x5e, 0x29, 0xc9, 0x9f, 0x8a, 0xbb, 0xea, 0x57, 0x04, 0x33, 0x51, 0xb7, 0x20, 0x2e, 0x3c, 0x84,
|
0x96, 0xd3, 0xfc, 0xad, 0x38, 0xd5, 0x4e, 0x11, 0x2c, 0xc4, 0xdd, 0xe2, 0x3b, 0x4c, 0x6a, 0x8a,
|
||||||
0xc6, 0x1e, 0x3a, 0x8b, 0xe7, 0x49, 0x06, 0xa9, 0xcb, 0x60, 0x20, 0x5d, 0x88, 0xc1, 0xf2, 0x47,
|
0xae, 0x43, 0xd3, 0xf4, 0x6c, 0x9a, 0x0a, 0x1f, 0x67, 0xc6, 0x3e, 0xbe, 0x92, 0xca, 0x6b, 0xef,
|
||||||
0x04, 0xb9, 0x98, 0x13, 0xf0, 0x12, 0x48, 0xdb, 0x2d, 0xbd, 0x55, 0x4c, 0xc8, 0x73, 0xfb, 0x07,
|
0x11, 0x14, 0x12, 0xae, 0xc1, 0xab, 0x90, 0xdd, 0x6d, 0xeb, 0xed, 0x52, 0x4a, 0x5e, 0x3c, 0x3a,
|
||||||
0x95, 0xd9, 0x58, 0x2a, 0x38, 0x2c, 0x2e, 0x43, 0x7a, 0xc7, 0xd8, 0x68, 0xdd, 0x2b, 0x22, 0x79,
|
0xae, 0xfe, 0x9d, 0x48, 0x85, 0x12, 0xe0, 0x0a, 0xe4, 0xf6, 0x8c, 0xad, 0xf6, 0xbd, 0x12, 0x92,
|
||||||
0x7e, 0xff, 0xa0, 0x52, 0x8c, 0xe5, 0x79, 0x88, 0xaf, 0x43, 0xa6, 0xb9, 0xb5, 0xb9, 0xb9, 0xd1,
|
0x97, 0x8e, 0x8e, 0xab, 0xa5, 0x44, 0x9e, 0x87, 0xf8, 0x7f, 0x90, 0x5a, 0x3b, 0xdb, 0xdb, 0x5b,
|
||||||
0x2a, 0x26, 0xe5, 0xab, 0xfb, 0x07, 0x95, 0x2b, 0xb1, 0x8a, 0x26, 0x5f, 0x3c, 0xb8, 0x06, 0x69,
|
0xed, 0x52, 0x5a, 0xfe, 0xf7, 0xe8, 0xb8, 0xfa, 0x4f, 0xa2, 0xa2, 0xc5, 0x17, 0x11, 0xae, 0x43,
|
||||||
0xbd, 0xb1, 0x65, 0xb4, 0x8a, 0xbf, 0xa3, 0xcf, 0x69, 0x31, 0xbd, 0x4d, 0x3d, 0x26, 0xcf, 0xbd,
|
0x4e, 0x6f, 0xee, 0x18, 0xed, 0xd2, 0x8f, 0xf8, 0xef, 0x32, 0x98, 0xde, 0xa1, 0x3e, 0x93, 0x17,
|
||||||
0x79, 0xaf, 0x24, 0xbe, 0x7c, 0x50, 0xe2, 0x13, 0xd6, 0xdf, 0x25, 0xe1, 0xbf, 0x66, 0xf8, 0xff,
|
0x5f, 0xbd, 0x55, 0x52, 0x1f, 0xdf, 0x29, 0xc9, 0x09, 0x1b, 0x6f, 0xd2, 0xf0, 0x57, 0x2b, 0xfa,
|
||||||
0x80, 0x57, 0x41, 0x0a, 0xf6, 0x2e, 0x9e, 0xf4, 0x76, 0x6c, 0xd1, 0xcb, 0x0b, 0x53, 0x73, 0xe2,
|
0xbd, 0xc0, 0x1b, 0x90, 0x0d, 0xf7, 0x30, 0xbe, 0x78, 0x0f, 0x12, 0x8b, 0x5f, 0x5e, 0x9e, 0x9a,
|
||||||
0x95, 0xe9, 0x20, 0x05, 0x7b, 0xe7, 0x94, 0x40, 0x6c, 0x27, 0x9e, 0x12, 0x88, 0x2f, 0xaa, 0x15,
|
0x13, 0x1f, 0x52, 0x87, 0x6c, 0xb8, 0x87, 0x2e, 0x01, 0x24, 0x76, 0xe4, 0x25, 0x80, 0xe4, 0xe2,
|
||||||
0x84, 0xd7, 0x21, 0x13, 0xfa, 0x00, 0x2f, 0x4e, 0x14, 0x9e, 0x30, 0x97, 0xbc, 0x74, 0x46, 0x76,
|
0x5a, 0x47, 0x78, 0x13, 0xa4, 0xc8, 0x1d, 0x78, 0xe5, 0x42, 0xe1, 0x84, 0xe5, 0xe4, 0xd5, 0x5f,
|
||||||
0x24, 0x74, 0x1f, 0xd2, 0x21, 0xc3, 0x85, 0x69, 0xbf, 0xd4, 0x48, 0x66, 0x71, 0x7a, 0x32, 0x54,
|
0x64, 0x47, 0x40, 0xf7, 0x21, 0x17, 0x69, 0xb8, 0x3c, 0xed, 0x56, 0xc7, 0x30, 0x2b, 0xd3, 0x93,
|
||||||
0xa9, 0xa1, 0x15, 0xd4, 0x28, 0x1d, 0x1e, 0x2b, 0x89, 0xef, 0xc7, 0x4a, 0xe2, 0xe5, 0x50, 0x41,
|
0x11, 0x4a, 0x1d, 0xad, 0xa3, 0x66, 0xf9, 0xe4, 0x5c, 0x49, 0x7d, 0x39, 0x57, 0x52, 0x2f, 0x86,
|
||||||
0x87, 0x43, 0x05, 0x7d, 0x1b, 0x2a, 0xe8, 0xd7, 0x50, 0x41, 0xed, 0x0c, 0x77, 0xc5, 0xed, 0x3f,
|
0x0a, 0x3a, 0x19, 0x2a, 0xe8, 0xf3, 0x50, 0x41, 0xdf, 0x86, 0x0a, 0xea, 0x48, 0xdc, 0x55, 0xb7,
|
||||||
0x01, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xe6, 0x59, 0x92, 0x92, 0x07, 0x00, 0x00,
|
0x7e, 0x06, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x7a, 0xfb, 0xa8, 0xa2, 0x07, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
|
@ -133,26 +133,27 @@ message WriteRequest {
|
||||||
// Ref identifies the pre-commit object to write to.
|
// Ref identifies the pre-commit object to write to.
|
||||||
string ref = 2;
|
string ref = 2;
|
||||||
|
|
||||||
// ExpectedSize can be set to have the service validate the total size of
|
// Total can be set to have the service validate the total size of the
|
||||||
// the of committed content.
|
// committed content.
|
||||||
//
|
//
|
||||||
// The latest value before or with the commit action message will be use to
|
// The latest value before or with the commit action message will be use to
|
||||||
// validate the content. It is only required on one message for the write.
|
// validate the content. If the offset overflows total, the service may
|
||||||
|
// report an error. It is only required on one message for the write.
|
||||||
//
|
//
|
||||||
// If the value is zero or less, no validation of the final content will be
|
// If the value is zero or less, no validation of the final content will be
|
||||||
// performed.
|
// performed.
|
||||||
int64 expected_size = 3;
|
int64 total = 3;
|
||||||
|
|
||||||
// ExpectedDigest can be set to have the service validate the final content
|
// Expected can be set to have the service validate the final content against
|
||||||
// against the provided digest.
|
// the provided digest.
|
||||||
//
|
//
|
||||||
// If the digest is already present in the object store, an AlreadyPresent
|
// If the digest is already present in the object store, an AlreadyExists
|
||||||
// error will be returned.
|
// error will be returned.
|
||||||
//
|
//
|
||||||
// Only the latest version will be used to check the content against the
|
// Only the latest version will be used to check the content against the
|
||||||
// digest. It is only required to include it on a single message, before or
|
// digest. It is only required to include it on a single message, before or
|
||||||
// with the commit action message.
|
// with the commit action message.
|
||||||
string expected_digest = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
string expected = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
|
||||||
// Offset specifies the number of bytes from the start at which to begin
|
// Offset specifies the number of bytes from the start at which to begin
|
||||||
// the write. If zero or less, the write will be from the start. This uses
|
// the write. If zero or less, the write will be from the start. This uses
|
||||||
|
@ -172,20 +173,34 @@ message WriteResponse {
|
||||||
// should confirm that they match the intended result.
|
// should confirm that they match the intended result.
|
||||||
WriteAction action = 1;
|
WriteAction action = 1;
|
||||||
|
|
||||||
// Offset provides the current "committed" size for the Write.
|
// StartedAt provides the time at which the write began.
|
||||||
int64 offset = 2;
|
//
|
||||||
|
// This must be set for stat and commit write actions. All other write
|
||||||
|
// actions may omit this.
|
||||||
|
google.protobuf.Timestamp started_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// UpdatedAt provides the last time of a successful write.
|
||||||
|
//
|
||||||
|
// This must be set for stat and commit write actions. All other write
|
||||||
|
// actions may omit this.
|
||||||
|
google.protobuf.Timestamp updated_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// Offset is the current committed size for the write.
|
||||||
|
int64 offset = 4;
|
||||||
|
|
||||||
|
// Total provides the current, expected total size of the write.
|
||||||
|
//
|
||||||
|
// We include this to provide consistency with the Status structure on the
|
||||||
|
// client writer.
|
||||||
|
//
|
||||||
|
// This is only valid on the Stat and Commit response.
|
||||||
|
int64 total = 5;
|
||||||
|
|
||||||
// Digest, if present, includes the digest up to the currently committed
|
// Digest, if present, includes the digest up to the currently committed
|
||||||
// bytes. If action is commit, this field will be set. It is implementation
|
// bytes. If action is commit, this field will be set. It is implementation
|
||||||
// defined if this is set for other actions, except abort. On abort, this
|
// defined if this is set for other actions, except abort. On abort, this
|
||||||
// will be empty.
|
// will be empty.
|
||||||
string digest = 3 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
|
||||||
// StartedAt is the time at which the write first started.
|
|
||||||
google.protobuf.Timestamp started_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// UpdatedAt is the time the write was last updated.
|
|
||||||
google.protobuf.Timestamp updated_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message StatusRequest {
|
message StatusRequest {
|
||||||
|
@ -194,8 +209,9 @@ message StatusRequest {
|
||||||
}
|
}
|
||||||
|
|
||||||
message StatusResponse {
|
message StatusResponse {
|
||||||
string ref = 1;
|
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
int64 offset = 2;
|
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
google.protobuf.Timestamp started_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
string ref = 3;
|
||||||
google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
int64 offset = 4;
|
||||||
|
int64 total = 5;
|
||||||
}
|
}
|
||||||
|
|
2
cmd/dist/common.go
vendored
2
cmd/dist/common.go
vendored
|
@ -11,7 +11,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func resolveContentStore(context *cli.Context) (*content.Store, error) {
|
func resolveContentStore(context *cli.Context) (*content.Store, error) {
|
||||||
root := context.GlobalString("root")
|
root := filepath.Join(context.GlobalString("root"), "content")
|
||||||
if !filepath.IsAbs(root) {
|
if !filepath.IsAbs(root) {
|
||||||
var err error
|
var err error
|
||||||
root, err = filepath.Abs(root)
|
root, err = filepath.Abs(root)
|
||||||
|
|
12
cmd/dist/fetch.go
vendored
12
cmd/dist/fetch.go
vendored
|
@ -100,8 +100,8 @@ var fetchCommand = cli.Command{
|
||||||
|
|
||||||
// getResolver prepares the resolver from the environment and options.
|
// getResolver prepares the resolver from the environment and options.
|
||||||
func getResolver(ctx contextpkg.Context) (remotes.Resolver, error) {
|
func getResolver(ctx contextpkg.Context) (remotes.Resolver, error) {
|
||||||
return remotes.ResolverFunc(func(ctx contextpkg.Context, locator string) (remotes.Remote, error) {
|
return remotes.ResolverFunc(func(ctx contextpkg.Context, locator string) (remotes.Fetcher, error) {
|
||||||
if !strings.HasPrefix(locator, "docker.io") {
|
if !strings.HasPrefix(locator, "docker.io") && !strings.HasPrefix(locator, "localhost:5000") {
|
||||||
return nil, errors.Errorf("unsupported locator: %q", locator)
|
return nil, errors.Errorf("unsupported locator: %q", locator)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,12 +113,18 @@ func getResolver(ctx contextpkg.Context) (remotes.Resolver, error) {
|
||||||
prefix = strings.TrimPrefix(locator, "docker.io/")
|
prefix = strings.TrimPrefix(locator, "docker.io/")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if strings.HasPrefix(locator, "localhost:5000") {
|
||||||
|
base.Scheme = "http"
|
||||||
|
base.Host = "localhost:5000"
|
||||||
|
prefix = strings.TrimPrefix(locator, "localhost:5000/")
|
||||||
|
}
|
||||||
|
|
||||||
token, err := getToken(ctx, "repository:"+prefix+":pull")
|
token, err := getToken(ctx, "repository:"+prefix+":pull")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return remotes.RemoteFunc(func(ctx contextpkg.Context, object string, hints ...string) (io.ReadCloser, error) {
|
return remotes.FetcherFunc(func(ctx contextpkg.Context, object string, hints ...string) (io.ReadCloser, error) {
|
||||||
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(
|
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(
|
||||||
logrus.Fields{
|
logrus.Fields{
|
||||||
"prefix": prefix, // or repo?
|
"prefix": prefix, // or repo?
|
||||||
|
|
2
cmd/dist/ingest.go
vendored
2
cmd/dist/ingest.go
vendored
|
@ -56,6 +56,6 @@ var ingestCommand = cli.Command{
|
||||||
// TODO(stevvooe): Allow ingest to be reentrant. Currently, we expect
|
// TODO(stevvooe): Allow ingest to be reentrant. Currently, we expect
|
||||||
// all data to be written in a single invocation. Allow multiple writes
|
// all data to be written in a single invocation. Allow multiple writes
|
||||||
// to the same transaction key followed by a commit.
|
// to the same transaction key followed by a commit.
|
||||||
return content.WriteBlob(ctx, ingester, os.Stdin, ref, expectedSize, expectedDigest)
|
return content.WriteBlob(ctx, ingester, ref, os.Stdin, expectedSize, expectedDigest)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
4
cmd/dist/main.go
vendored
4
cmd/dist/main.go
vendored
|
@ -38,9 +38,11 @@ distribution tool
|
||||||
EnvVar: "CONTAINERD_FETCH_TIMEOUT",
|
EnvVar: "CONTAINERD_FETCH_TIMEOUT",
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
|
// TODO(stevvooe): for now, we allow circumventing the GRPC. Once
|
||||||
|
// we have clear separation, this will likely go away.
|
||||||
Name: "root",
|
Name: "root",
|
||||||
Usage: "path to content store root",
|
Usage: "path to content store root",
|
||||||
Value: "/tmp/content", // TODO(stevvooe): for now, just use the PWD/.content
|
Value: "/var/lib/containerd",
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "socket, s",
|
Name: "socket, s",
|
||||||
|
|
|
@ -4,6 +4,9 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
|
||||||
contentapi "github.com/docker/containerd/api/services/content"
|
contentapi "github.com/docker/containerd/api/services/content"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -83,10 +86,10 @@ type remoteIngester struct {
|
||||||
client contentapi.ContentClient
|
client contentapi.ContentClient
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ri *remoteIngester) Writer(ctx context.Context, ref string) (Writer, error) {
|
func (ri *remoteIngester) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (Writer, error) {
|
||||||
wrclient, offset, err := ri.negotiate(ctx, ref)
|
wrclient, offset, err := ri.negotiate(ctx, ref, size, expected)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, rewriteGRPCError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &remoteWriter{
|
return &remoteWriter{
|
||||||
|
@ -95,7 +98,7 @@ func (ri *remoteIngester) Writer(ctx context.Context, ref string) (Writer, error
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ri *remoteIngester) negotiate(ctx context.Context, ref string) (contentapi.Content_WriteClient, int64, error) {
|
func (ri *remoteIngester) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
|
||||||
wrclient, err := ri.client.Write(ctx)
|
wrclient, err := ri.client.Write(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
|
@ -104,6 +107,8 @@ func (ri *remoteIngester) negotiate(ctx context.Context, ref string) (contentapi
|
||||||
if err := wrclient.Send(&contentapi.WriteRequest{
|
if err := wrclient.Send(&contentapi.WriteRequest{
|
||||||
Action: contentapi.WriteActionStat,
|
Action: contentapi.WriteActionStat,
|
||||||
Ref: ref,
|
Ref: ref,
|
||||||
|
Total: size,
|
||||||
|
Expected: expected,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
@ -193,11 +198,11 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) {
|
||||||
func (rw *remoteWriter) Commit(size int64, expected digest.Digest) error {
|
func (rw *remoteWriter) Commit(size int64, expected digest.Digest) error {
|
||||||
resp, err := rw.send(&contentapi.WriteRequest{
|
resp, err := rw.send(&contentapi.WriteRequest{
|
||||||
Action: contentapi.WriteActionCommit,
|
Action: contentapi.WriteActionCommit,
|
||||||
ExpectedSize: size,
|
Total: size,
|
||||||
ExpectedDigest: expected,
|
Expected: expected,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return rewriteGRPCError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if size != 0 && resp.Offset != size {
|
if size != 0 && resp.Offset != size {
|
||||||
|
@ -205,7 +210,7 @@ func (rw *remoteWriter) Commit(size int64, expected digest.Digest) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if expected != "" && resp.Digest != expected {
|
if expected != "" && resp.Digest != expected {
|
||||||
return errors.New("unexpected digest")
|
return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -214,3 +219,14 @@ func (rw *remoteWriter) Commit(size int64, expected digest.Digest) error {
|
||||||
func (rw *remoteWriter) Close() error {
|
func (rw *remoteWriter) Close() error {
|
||||||
return rw.client.CloseSend()
|
return rw.client.CloseSend()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func rewriteGRPCError(err error) error {
|
||||||
|
switch grpc.Code(errors.Cause(err)) {
|
||||||
|
case codes.AlreadyExists:
|
||||||
|
return errExists
|
||||||
|
case codes.NotFound:
|
||||||
|
return errNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errNotFound = errors.New("content: not found")
|
errNotFound = errors.New("content: not found")
|
||||||
|
errExists = errors.New("content: exists")
|
||||||
|
|
||||||
BufPool = sync.Pool{
|
BufPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
|
@ -33,6 +34,7 @@ type Provider interface {
|
||||||
type Status struct {
|
type Status struct {
|
||||||
Ref string
|
Ref string
|
||||||
Offset int64
|
Offset int64
|
||||||
|
Total int64
|
||||||
StartedAt time.Time
|
StartedAt time.Time
|
||||||
UpdatedAt time.Time
|
UpdatedAt time.Time
|
||||||
}
|
}
|
||||||
|
@ -45,9 +47,13 @@ type Writer interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Ingester interface {
|
type Ingester interface {
|
||||||
Writer(ctx context.Context, ref string) (Writer, error)
|
Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (Writer, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsNotFound(err error) bool {
|
func IsNotFound(err error) bool {
|
||||||
return errors.Cause(err) == errNotFound
|
return errors.Cause(err) == errNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IsExists(err error) bool {
|
||||||
|
return errors.Cause(err) == errExists
|
||||||
|
}
|
||||||
|
|
|
@ -30,7 +30,7 @@ func TestContentWriter(t *testing.T) {
|
||||||
t.Fatal("ingest dir should be created", err)
|
t.Fatal("ingest dir should be created", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cw, err := cs.Writer(ctx, "myref")
|
cw, err := cs.Writer(ctx, "myref", 0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -39,13 +39,13 @@ func TestContentWriter(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// reopen, so we can test things
|
// reopen, so we can test things
|
||||||
cw, err = cs.Writer(ctx, "myref")
|
cw, err = cs.Writer(ctx, "myref", 0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// make sure that second resume also fails
|
// make sure that second resume also fails
|
||||||
if _, err = cs.Writer(ctx, "myref"); err == nil {
|
if _, err = cs.Writer(ctx, "myref", 0, ""); err == nil {
|
||||||
// TODO(stevvooe): This also works across processes. Need to find a way
|
// TODO(stevvooe): This also works across processes. Need to find a way
|
||||||
// to test that, as well.
|
// to test that, as well.
|
||||||
t.Fatal("no error on second resume")
|
t.Fatal("no error on second resume")
|
||||||
|
@ -88,7 +88,7 @@ func TestContentWriter(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cw, err = cs.Writer(ctx, "aref")
|
cw, err = cs.Writer(ctx, "aref", 0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -269,7 +269,7 @@ func checkBlobPath(t *testing.T, cs *Store, dgst digest.Digest) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkWrite(t checker, ctx context.Context, cs *Store, dgst digest.Digest, p []byte) digest.Digest {
|
func checkWrite(t checker, ctx context.Context, cs *Store, dgst digest.Digest, p []byte) digest.Digest {
|
||||||
if err := WriteBlob(ctx, cs, bytes.NewReader(p), dgst.String(), int64(len(p)), dgst); err != nil {
|
if err := WriteBlob(ctx, cs, dgst.String(), bytes.NewReader(p), int64(len(p)), dgst); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ package content
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
|
@ -16,42 +17,72 @@ import (
|
||||||
// This is useful when the digest and size are known beforehand.
|
// This is useful when the digest and size are known beforehand.
|
||||||
//
|
//
|
||||||
// Copy is buffered, so no need to wrap reader in buffered io.
|
// Copy is buffered, so no need to wrap reader in buffered io.
|
||||||
func WriteBlob(ctx context.Context, cs Ingester, r io.Reader, ref string, size int64, expected digest.Digest) error {
|
func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size int64, expected digest.Digest) error {
|
||||||
cw, err := cs.Writer(ctx, ref)
|
cw, err := cs.Writer(ctx, ref, size, expected)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if !IsExists(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil // all ready present
|
||||||
|
}
|
||||||
|
|
||||||
ws, err := cw.Status()
|
ws, err := cw.Status()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if ws.Offset > 0 {
|
if ws.Offset > 0 {
|
||||||
// Arbitrary limitation for now. We can detect io.Seeker on r and
|
r, err = seekReader(r, ws.Offset, size)
|
||||||
// resume.
|
if err != nil {
|
||||||
return errors.Errorf("cannot resume already started write")
|
return errors.Wrapf(err, "unabled to resume write to %v", ref)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := BufPool.Get().([]byte)
|
buf := BufPool.Get().([]byte)
|
||||||
defer BufPool.Put(buf)
|
defer BufPool.Put(buf)
|
||||||
|
|
||||||
nn, err := io.CopyBuffer(cw, r, buf)
|
if _, err := io.CopyBuffer(cw, r, buf); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if size > 0 && nn != size {
|
|
||||||
return errors.Errorf("failed size verification: %v != %v", nn, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cw.Commit(size, expected); err != nil {
|
if err := cw.Commit(size, expected); err != nil {
|
||||||
|
if !IsExists(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// seekReader attempts to seek the reader to the given offset, either by
|
||||||
|
// resolving `io.Seeker` or by detecting `io.ReaderAt`.
|
||||||
|
func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
||||||
|
// attempt to resolve r as a seeker and setup the offset.
|
||||||
|
seeker, ok := r.(io.Seeker)
|
||||||
|
if ok {
|
||||||
|
nn, err := seeker.Seek(offset, io.SeekStart)
|
||||||
|
if nn != offset {
|
||||||
|
return nil, fmt.Errorf("failed to seek to offset %v", offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ok, let's try io.ReaderAt!
|
||||||
|
readerAt, ok := r.(io.ReaderAt)
|
||||||
|
if ok && size > offset {
|
||||||
|
sr := io.NewSectionReader(readerAt, offset, size)
|
||||||
|
return sr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.Errorf("cannot seek to offset %v", offset)
|
||||||
|
}
|
||||||
|
|
||||||
func readFileString(path string) (string, error) {
|
func readFileString(path string) (string, error) {
|
||||||
p, err := ioutil.ReadFile(path)
|
p, err := ioutil.ReadFile(path)
|
||||||
return string(p), err
|
return string(p), err
|
||||||
|
|
|
@ -2,10 +2,12 @@ package content
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -165,17 +167,34 @@ func (s *Store) status(ingestPath string) (Status, error) {
|
||||||
return Status{
|
return Status{
|
||||||
Ref: ref,
|
Ref: ref,
|
||||||
Offset: fi.Size(),
|
Offset: fi.Size(),
|
||||||
|
Total: s.total(ingestPath),
|
||||||
UpdatedAt: fi.ModTime(),
|
UpdatedAt: fi.ModTime(),
|
||||||
StartedAt: startedAt,
|
StartedAt: startedAt,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// total attempts to resolve the total expected size for the write.
|
||||||
|
func (s *Store) total(ingestPath string) int64 {
|
||||||
|
totalS, err := readFileString(filepath.Join(ingestPath, "total"))
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
total, err := strconv.ParseInt(totalS, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
// represents a corrupted file, should probably remove.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
|
||||||
// Writer begins or resumes the active writer identified by ref. If the writer
|
// Writer begins or resumes the active writer identified by ref. If the writer
|
||||||
// is already in use, an error is returned. Only one writer may be in use per
|
// is already in use, an error is returned. Only one writer may be in use per
|
||||||
// ref at a time.
|
// ref at a time.
|
||||||
//
|
//
|
||||||
// The argument `ref` is used to uniquely identify a long-lived writer transaction.
|
// The argument `ref` is used to uniquely identify a long-lived writer transaction.
|
||||||
func (s *Store) Writer(ctx context.Context, ref string) (Writer, error) {
|
func (s *Store) Writer(ctx context.Context, ref string, total int64, expected digest.Digest) (Writer, error) {
|
||||||
path, refp, data, lock, err := s.ingestPaths(ref)
|
path, refp, data, lock, err := s.ingestPaths(ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -202,16 +221,19 @@ func (s *Store) Writer(ctx context.Context, ref string) (Writer, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate that we have no collision for the ref.
|
status, err := s.status(path)
|
||||||
refraw, err := readFileString(refp)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "could not read ref")
|
return nil, errors.Wrap(err, "failed reading status of resume write")
|
||||||
}
|
}
|
||||||
|
|
||||||
if ref != refraw {
|
if ref != status.Ref {
|
||||||
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
|
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
|
||||||
// layout corruption or a hash collision for the ref key.
|
// layout corruption or a hash collision for the ref key.
|
||||||
return nil, errors.Wrapf(err, "ref key does not match: %v != %v", ref, refraw)
|
return nil, errors.Wrapf(err, "ref key does not match: %v != %v", ref, status.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
if total > 0 && status.Total > 0 && total != status.Total {
|
||||||
|
return nil, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
|
||||||
}
|
}
|
||||||
|
|
||||||
// slow slow slow!!, send to goroutine or use resumable hashes
|
// slow slow slow!!, send to goroutine or use resumable hashes
|
||||||
|
@ -229,18 +251,9 @@ func (s *Store) Writer(ctx context.Context, ref string) (Writer, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fi, err := os.Stat(data)
|
updatedAt = status.UpdatedAt
|
||||||
if err != nil {
|
startedAt = status.StartedAt
|
||||||
return nil, err
|
total = status.Total
|
||||||
}
|
|
||||||
|
|
||||||
updatedAt = fi.ModTime()
|
|
||||||
|
|
||||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
|
||||||
startedAt = time.Unix(st.Ctim.Sec, st.Ctim.Nsec)
|
|
||||||
} else {
|
|
||||||
startedAt = updatedAt
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// the ingest is new, we need to setup the target location.
|
// the ingest is new, we need to setup the target location.
|
||||||
// write the ref to a file for later use
|
// write the ref to a file for later use
|
||||||
|
@ -248,6 +261,12 @@ func (s *Store) Writer(ctx context.Context, ref string) (Writer, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if total > 0 {
|
||||||
|
if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
startedAt = time.Now()
|
startedAt = time.Now()
|
||||||
updatedAt = startedAt
|
updatedAt = startedAt
|
||||||
}
|
}
|
||||||
|
@ -264,6 +283,7 @@ func (s *Store) Writer(ctx context.Context, ref string) (Writer, error) {
|
||||||
ref: ref,
|
ref: ref,
|
||||||
path: path,
|
path: path,
|
||||||
offset: offset,
|
offset: offset,
|
||||||
|
total: total,
|
||||||
digester: digester,
|
digester: digester,
|
||||||
startedAt: startedAt,
|
startedAt: startedAt,
|
||||||
updatedAt: updatedAt,
|
updatedAt: updatedAt,
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
package content
|
package content
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/containerd/log"
|
||||||
"github.com/nightlyone/lockfile"
|
"github.com/nightlyone/lockfile"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -19,6 +19,7 @@ type writer struct {
|
||||||
path string // path to writer dir
|
path string // path to writer dir
|
||||||
ref string // ref key
|
ref string // ref key
|
||||||
offset int64
|
offset int64
|
||||||
|
total int64
|
||||||
digester digest.Digester
|
digester digest.Digester
|
||||||
startedAt time.Time
|
startedAt time.Time
|
||||||
updatedAt time.Time
|
updatedAt time.Time
|
||||||
|
@ -28,6 +29,7 @@ func (w *writer) Status() (Status, error) {
|
||||||
return Status{
|
return Status{
|
||||||
Ref: w.ref,
|
Ref: w.ref,
|
||||||
Offset: w.offset,
|
Offset: w.offset,
|
||||||
|
Total: w.total,
|
||||||
StartedAt: w.startedAt,
|
StartedAt: w.startedAt,
|
||||||
UpdatedAt: w.updatedAt,
|
UpdatedAt: w.updatedAt,
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -52,12 +54,12 @@ func (w *writer) Write(p []byte) (n int, err error) {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cw *writer) Commit(size int64, expected digest.Digest) error {
|
func (w *writer) Commit(size int64, expected digest.Digest) error {
|
||||||
if err := cw.fp.Sync(); err != nil {
|
if err := w.fp.Sync(); err != nil {
|
||||||
return errors.Wrap(err, "sync failed")
|
return errors.Wrap(err, "sync failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
fi, err := cw.fp.Stat()
|
fi, err := w.fp.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "stat on ingest file failed")
|
return errors.Wrap(err, "stat on ingest file failed")
|
||||||
}
|
}
|
||||||
|
@ -67,7 +69,7 @@ func (cw *writer) Commit(size int64, expected digest.Digest) error {
|
||||||
// only allowing reads honoring the umask on creation.
|
// only allowing reads honoring the umask on creation.
|
||||||
//
|
//
|
||||||
// This removes write and exec, only allowing read per the creation umask.
|
// This removes write and exec, only allowing read per the creation umask.
|
||||||
if err := cw.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil {
|
if err := w.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil {
|
||||||
return errors.Wrap(err, "failed to change ingest file permissions")
|
return errors.Wrap(err, "failed to change ingest file permissions")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,18 +77,18 @@ func (cw *writer) Commit(size int64, expected digest.Digest) error {
|
||||||
return errors.Errorf("failed size validation: %v != %v", fi.Size(), size)
|
return errors.Errorf("failed size validation: %v != %v", fi.Size(), size)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cw.fp.Close(); err != nil {
|
if err := w.fp.Close(); err != nil {
|
||||||
return errors.Wrap(err, "failed closing ingest")
|
return errors.Wrap(err, "failed closing ingest")
|
||||||
}
|
}
|
||||||
|
|
||||||
dgst := cw.digester.Digest()
|
dgst := w.digester.Digest()
|
||||||
if expected != "" && expected != dgst {
|
if expected != "" && expected != dgst {
|
||||||
return errors.Errorf("unexpected digest: %v != %v", dgst, expected)
|
return errors.Errorf("unexpected digest: %v != %v", dgst, expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ingest = filepath.Join(cw.path, "data")
|
ingest = filepath.Join(w.path, "data")
|
||||||
target = cw.s.blobPath(dgst)
|
target = w.s.blobPath(dgst)
|
||||||
)
|
)
|
||||||
|
|
||||||
// make sure parent directories of blob exist
|
// make sure parent directories of blob exist
|
||||||
|
@ -95,18 +97,18 @@ func (cw *writer) Commit(size int64, expected digest.Digest) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// clean up!!
|
// clean up!!
|
||||||
defer os.RemoveAll(cw.path)
|
defer os.RemoveAll(w.path)
|
||||||
|
|
||||||
if err := os.Rename(ingest, target); err != nil {
|
if err := os.Rename(ingest, target); err != nil {
|
||||||
if os.IsExist(err) {
|
if os.IsExist(err) {
|
||||||
// collision with the target file!
|
// collision with the target file!
|
||||||
return nil
|
return errExists
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock(cw.lock)
|
unlock(w.lock)
|
||||||
cw.fp = nil
|
w.fp = nil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,7 +123,7 @@ func (cw *writer) Commit(size int64, expected digest.Digest) error {
|
||||||
// clean up the associated resources.
|
// clean up the associated resources.
|
||||||
func (cw *writer) Close() (err error) {
|
func (cw *writer) Close() (err error) {
|
||||||
if err := unlock(cw.lock); err != nil {
|
if err := unlock(cw.lock); err != nil {
|
||||||
log.Printf("unlock failed: %v", err)
|
log.L.Debug("unlock failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cw.fp != nil {
|
if cw.fp != nil {
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Remote interface {
|
type Fetcher interface {
|
||||||
// Fetch the resource identified by id. The id is opaque to the remote, but
|
// Fetch the resource identified by id. The id is opaque to the remote, but
|
||||||
// may typically be a tag or a digest.
|
// may typically be a tag or a digest.
|
||||||
//
|
//
|
||||||
|
@ -32,8 +32,10 @@ type Remote interface {
|
||||||
Fetch(ctx context.Context, id string, hints ...string) (io.ReadCloser, error)
|
Fetch(ctx context.Context, id string, hints ...string) (io.ReadCloser, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type RemoteFunc func(context.Context, string, ...string) (io.ReadCloser, error)
|
// FetcherFunc allows package users to implement a Fetcher with just a
|
||||||
|
// function.
|
||||||
|
type FetcherFunc func(context.Context, string, ...string) (io.ReadCloser, error)
|
||||||
|
|
||||||
func (fn RemoteFunc) Fetch(ctx context.Context, object string, hints ...string) (io.ReadCloser, error) {
|
func (fn FetcherFunc) Fetch(ctx context.Context, object string, hints ...string) (io.ReadCloser, error) {
|
||||||
return fn(ctx, object, hints...)
|
return fn(ctx, object, hints...)
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,11 +9,11 @@ type Resolver interface {
|
||||||
// A locator is a scheme-less URI representing the remote. Structurally, it
|
// A locator is a scheme-less URI representing the remote. Structurally, it
|
||||||
// has a host and path. The "host" can be used to directly reference a
|
// has a host and path. The "host" can be used to directly reference a
|
||||||
// specific host or be matched against a specific handler.
|
// specific host or be matched against a specific handler.
|
||||||
Resolve(ctx context.Context, locator string) (Remote, error)
|
Resolve(ctx context.Context, locator string) (Fetcher, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResolverFunc func(context.Context, string) (Remote, error)
|
type ResolverFunc func(context.Context, string) (Fetcher, error)
|
||||||
|
|
||||||
func (fn ResolverFunc) Resolve(ctx context.Context, locator string) (Remote, error) {
|
func (fn ResolverFunc) Resolve(ctx context.Context, locator string) (Fetcher, error) {
|
||||||
return fn(ctx, locator)
|
return fn(ctx, locator)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,15 @@
|
||||||
package content
|
package content
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/docker/containerd"
|
"github.com/docker/containerd"
|
||||||
api "github.com/docker/containerd/api/services/content"
|
api "github.com/docker/containerd/api/services/content"
|
||||||
"github.com/docker/containerd/content"
|
"github.com/docker/containerd/content"
|
||||||
|
"github.com/docker/containerd/log"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
@ -128,14 +131,25 @@ func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
|
||||||
|
|
||||||
func (s *Service) Write(session api.Content_WriteServer) (err error) {
|
func (s *Service) Write(session api.Content_WriteServer) (err error) {
|
||||||
var (
|
var (
|
||||||
ref string
|
ctx = session.Context()
|
||||||
msg api.WriteResponse
|
msg api.WriteResponse
|
||||||
req *api.WriteRequest
|
req *api.WriteRequest
|
||||||
|
ref string
|
||||||
|
total int64
|
||||||
|
expected digest.Digest
|
||||||
)
|
)
|
||||||
|
|
||||||
defer func(msg *api.WriteResponse) {
|
defer func(msg *api.WriteResponse) {
|
||||||
// pump through the last message if no error was encountered
|
// pump through the last message if no error was encountered
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// TODO(stevvooe): Really need a log line here to track which
|
||||||
|
// errors are actually causing failure on the server side. May want
|
||||||
|
// to configure the service with an interceptor to make this work
|
||||||
|
// identically across all GRPC methods.
|
||||||
|
//
|
||||||
|
// This is pretty noisy, so we can remove it but leave it for now.
|
||||||
|
log.G(ctx).WithError(err).Error("(*Service).Write failed")
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,47 +163,88 @@ func (s *Service) Write(session api.Content_WriteServer) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ref = req.Ref
|
ref = req.Ref
|
||||||
|
|
||||||
if ref == "" {
|
if ref == "" {
|
||||||
return grpc.Errorf(codes.InvalidArgument, "first message must have a reference")
|
return grpc.Errorf(codes.InvalidArgument, "first message must have a reference")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fields := logrus.Fields{
|
||||||
|
"ref": ref,
|
||||||
|
}
|
||||||
|
total = req.Total
|
||||||
|
expected = req.Expected
|
||||||
|
if total > 0 {
|
||||||
|
fields["total"] = total
|
||||||
|
}
|
||||||
|
|
||||||
|
if expected != "" {
|
||||||
|
fields["expected"] = expected
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields))
|
||||||
// this action locks the writer for the session.
|
// this action locks the writer for the session.
|
||||||
wr, err := s.store.Writer(session.Context(), ref)
|
wr, err := s.store.Writer(ctx, ref, total, expected)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer wr.Close()
|
defer wr.Close()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
// TODO(stevvooe): We need to study this behavior in containerd a
|
|
||||||
// little better to decide where to put this. We may be able to make
|
|
||||||
// this determination elsewhere and avoid even creating the writer.
|
|
||||||
//
|
|
||||||
// Ideally, we just use the expected digest on commit to abandon the
|
|
||||||
// cost of the move when they collide.
|
|
||||||
if req.ExpectedDigest != "" {
|
|
||||||
if _, err := s.store.Info(req.ExpectedDigest); err != nil {
|
|
||||||
if !content.IsNotFound(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return grpc.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.ExpectedDigest)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
msg.Action = req.Action
|
msg.Action = req.Action
|
||||||
ws, err := wr.Status()
|
ws, err := wr.Status()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
msg.Offset = ws.Offset
|
msg.Offset = ws.Offset // always set the offset.
|
||||||
msg.StartedAt = ws.StartedAt
|
|
||||||
msg.UpdatedAt = ws.UpdatedAt
|
// NOTE(stevvooe): In general, there are two cases underwhich a remote
|
||||||
|
// writer is used.
|
||||||
|
//
|
||||||
|
// For pull, we almost always have this before fetching large content,
|
||||||
|
// through descriptors. We allow predeclaration of the expected size
|
||||||
|
// and digest.
|
||||||
|
//
|
||||||
|
// For push, it is more complex. If we want to cut through content into
|
||||||
|
// storage, we may have no expectation until we are done processing the
|
||||||
|
// content. The case here is the following:
|
||||||
|
//
|
||||||
|
// 1. Start writing content.
|
||||||
|
// 2. Compress inline.
|
||||||
|
// 3. Validate digest and size (maybe).
|
||||||
|
//
|
||||||
|
// Supporting these two paths is quite awkward but it let's both API
|
||||||
|
// users use the same writer style for each with a minimum of overhead.
|
||||||
|
if req.Expected != "" {
|
||||||
|
if expected != "" && expected != req.Expected {
|
||||||
|
return grpc.Errorf(codes.InvalidArgument, "inconsistent digest provided: %v != %v", req.Expected, expected)
|
||||||
|
}
|
||||||
|
expected = req.Expected
|
||||||
|
|
||||||
|
if _, err := s.store.Info(req.Expected); err == nil {
|
||||||
|
if err := s.store.Abort(ref); err != nil {
|
||||||
|
log.G(ctx).WithError(err).Error("failed to abort write")
|
||||||
|
}
|
||||||
|
|
||||||
|
return grpc.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Total > 0 {
|
||||||
|
// Update the expected total. Typically, this could be seen at
|
||||||
|
// negotiation time or on a commit message.
|
||||||
|
if total > 0 && req.Total != total {
|
||||||
|
return grpc.Errorf(codes.InvalidArgument, "inconsistent total provided: %v != %v", req.Total, total)
|
||||||
|
}
|
||||||
|
total = req.Total
|
||||||
|
}
|
||||||
|
|
||||||
switch req.Action {
|
switch req.Action {
|
||||||
case api.WriteActionStat:
|
case api.WriteActionStat:
|
||||||
msg.Digest = wr.Digest()
|
msg.Digest = wr.Digest()
|
||||||
|
msg.StartedAt = ws.StartedAt
|
||||||
|
msg.UpdatedAt = ws.UpdatedAt
|
||||||
|
msg.Total = total
|
||||||
case api.WriteActionWrite, api.WriteActionCommit:
|
case api.WriteActionWrite, api.WriteActionCommit:
|
||||||
if req.Offset > 0 {
|
if req.Offset > 0 {
|
||||||
// validate the offset if provided
|
// validate the offset if provided
|
||||||
|
@ -217,7 +272,11 @@ func (s *Service) Write(session api.Content_WriteServer) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.Action == api.WriteActionCommit {
|
if req.Action == api.WriteActionCommit {
|
||||||
return wr.Commit(req.ExpectedSize, req.ExpectedDigest)
|
if err := wr.Commit(total, expected); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.Digest = wr.Digest()
|
||||||
}
|
}
|
||||||
case api.WriteActionAbort:
|
case api.WriteActionAbort:
|
||||||
return s.store.Abort(ref)
|
return s.store.Abort(ref)
|
||||||
|
|
Loading…
Reference in a new issue