Merge pull request #520 from stevvooe/content-service

api/services/content: define the content service
This commit is contained in:
Michael Crosby 2017-02-21 15:03:13 -08:00 committed by GitHub
commit 8ae905b92b
40 changed files with 4055 additions and 1022 deletions

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,201 @@
syntax = "proto3";
package containerd.v1;
import "gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
// Content provides access to a content addressable storage system.
service Content {
// Info returns information about a committed object.
//
// This call can be used for getting the size of content and checking for
// existence.
rpc Info(InfoRequest) returns (InfoResponse);
// Read allows one to read an object based on the offset into the content.
//
// The requested data may be returned in one or more messages.
rpc Read(ReadRequest) returns (stream ReadResponse);
// Status returns the status of ongoing object ingestions, started via
// Write.
//
// For active ingestions, the status will be streamed until the client
// closes the connection or all matched ingestions are committed.
rpc Status(StatusRequest) returns (stream StatusResponse);
// Write begins or resumes writes to a resource identified by a unique ref.
// Only one active stream may exist at a time for each ref.
//
// Once a write stream has started, it may only write to a single ref, thus
// once a stream is started, the ref may be ommitted on subsequent writes.
//
// For any write transaction represented by a ref, only a single write may
// be made to a given offset. If overlapping writes occur, it is an error.
// Writes should be sequential and implementations may throw an error if
// this is required.
//
// If expected_digest is set and already part of the content store, the
// write will fail.
//
// When completed, the commit flag should be set to true. If expected size
// or digest is set, the content will be validated against those values.
rpc Write(stream WriteRequest) returns (stream WriteResponse);
}
message InfoRequest {
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}
message InfoResponse {
// Digest is the hash identity of the blob.
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
// Size is the total number of bytes in the blob.
int64 size = 2;
// CommittedAt provides the time at which the blob was committed.
google.protobuf.Timestamp committed_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
// ReadRequest defines the fields that make up a request to read a portion of
// data from a stored object.
message ReadRequest {
// Digest is the hash identity to read.
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
// Offset specifies the number of bytes from the start at which to begin
// the read. If zero or less, the read will be from the start. This uses
// standard zero-indexed semantics.
int64 offset = 2;
// size is the total size of the read. If zero, the entire blob will be
// returned by the service.
int64 size = 3;
}
// ReadResponse carries byte data for a read request.
message ReadResponse {
int64 offset = 1; // offset of the returned data
bytes data = 2; // actual data
}
// WriteAction defines the behavior of a WriteRequest.
enum WriteAction {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "WriteAction";
// WriteActionStat instructs the writer to return the current status while
// holding the lock on the write.
STAT = 0 [(gogoproto.enumvalue_customname)="WriteActionStat"];
// WriteActionWrite sets the action for the write request to write data.
//
// Any data included will be written at the provided offset. The
// transaction will be left open for further writes.
//
// This is the default.
WRITE = 1 [(gogoproto.enumvalue_customname)="WriteActionWrite"];
// WriteActionCommit will write any outstanding data in the message and
// commit the write, storing it under the digest.
//
// This can be used in a single message to send the data, verify it and
// commit it.
//
// This action will always terminate the write.
COMMIT = 2 [(gogoproto.enumvalue_customname)="WriteActionCommit"];
// WriteActionAbort will release any resources associated with the write
// and free up the ref for a completely new set of writes.
//
// This action will always terminate the write.
ABORT = -1 [(gogoproto.enumvalue_customname)="WriteActionAbort"];
}
// WriteRequest writes data to the request ref at offset.
message WriteRequest {
// Action sets the behavior of the write.
//
// When this is a write and the ref is not yet allocated, the ref will be
// allocated and the data will be written at offset.
//
// If the action is write and the ref is allocated, it will accept data to
// an offset that has not yet been written.
//
// If the action is write and there is no data, the current write status
// will be returned. This works differently from status because the stream
// holds a lock.
WriteAction action = 1;
// Ref identifies the pre-commit object to write to.
string ref = 2;
// ExpectedSize can be set to have the service validate the total size of
// the of committed content.
//
// The latest value before or with the commit action message will be use to
// validate the content. It is only required on one message for the write.
//
// If the value is zero or less, no validation of the final content will be
// performed.
int64 expected_size = 3;
// ExpectedDigest can be set to have the service validate the final content
// against the provided digest.
//
// If the digest is already present in the object store, an AlreadyPresent
// error will be returned.
//
// Only the latest version will be used to check the content against the
// digest. It is only required to include it on a single message, before or
// with the commit action message.
string expected_digest = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
// Offset specifies the number of bytes from the start at which to begin
// the write. If zero or less, the write will be from the start. This uses
// standard zero-indexed semantics.
int64 offset = 5;
// Data is the actual bytes to be written.
//
// If this is empty and the message is not a commit, a response will be
// returned with the current write state.
bytes data = 6;
}
// WriteResponse is returned on the culmination of a write call.
message WriteResponse {
// Action contains the action for the final message of the stream. A writer
// should confirm that they match the intended result.
WriteAction action = 1;
// Offset provides the current "committed" size for the Write.
int64 offset = 2;
// Digest, if present, includes the digest up to the currently committed
// bytes. If action is commit, this field will be set. It is implementation
// defined if this is set for other actions, except abort. On abort, this
// will be empty.
string digest = 3 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
// StartedAt is the time at which the write first started.
google.protobuf.Timestamp started_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// UpdatedAt is the time the write was last updated.
google.protobuf.Timestamp updated_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message StatusRequest {
repeated string refs = 1;
repeated string prefix = 2;
}
message StatusResponse {
string ref = 1;
int64 offset = 2;
google.protobuf.Timestamp started_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}

View file

@ -984,7 +984,24 @@ func (m *User) Unmarshal(dAtA []byte) error {
} }
} }
case 3: case 3:
if wireType == 2 { if wireType == 0 {
var v uint32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowContainer
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.AdditionalGids = append(m.AdditionalGids, v)
} else if wireType == 2 {
var packedLen int var packedLen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 { if shift >= 64 {
@ -1025,23 +1042,6 @@ func (m *User) Unmarshal(dAtA []byte) error {
} }
m.AdditionalGids = append(m.AdditionalGids, v) m.AdditionalGids = append(m.AdditionalGids, v)
} }
} else if wireType == 0 {
var v uint32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowContainer
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.AdditionalGids = append(m.AdditionalGids, v)
} else { } else {
return fmt.Errorf("proto: wrong wireType = %d for field AdditionalGids", wireType) return fmt.Errorf("proto: wrong wireType = %d for field AdditionalGids", wireType)
} }

View file

@ -7,6 +7,7 @@ import (
_ "net/http/pprof" _ "net/http/pprof"
"os" "os"
"os/signal" "os/signal"
"path/filepath"
"syscall" "syscall"
"time" "time"
@ -15,7 +16,9 @@ import (
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/docker/containerd" "github.com/docker/containerd"
contentapi "github.com/docker/containerd/api/services/content"
api "github.com/docker/containerd/api/services/execution" api "github.com/docker/containerd/api/services/execution"
"github.com/docker/containerd/content"
_ "github.com/docker/containerd/linux" _ "github.com/docker/containerd/linux"
"github.com/docker/containerd/log" "github.com/docker/containerd/log"
"github.com/docker/containerd/services/execution" "github.com/docker/containerd/services/execution"
@ -55,10 +58,6 @@ func main() {
Name: "log-level,l", Name: "log-level,l",
Usage: "set the logging level [debug, info, warn, error, fatal, panic]", Usage: "set the logging level [debug, info, warn, error, fatal, panic]",
}, },
cli.StringFlag{
Name: "root,r",
Usage: "containerd root directory",
},
cli.StringFlag{ cli.StringFlag{
Name: "state", Name: "state",
Usage: "containerd state directory", Usage: "containerd state directory",
@ -90,14 +89,27 @@ func main() {
return err return err
} }
serveMetricsAPI() serveMetricsAPI()
contentStore, err := resolveContentStore(context)
if err != nil {
return err
}
contentService := content.NewService(contentStore)
// start the GRPC api with the execution service registered // start the GRPC api with the execution service registered
server := newGRPCServer(execution.New(supervisor)) server := newGRPCServer()
api.RegisterContainerServiceServer(server, execution.New(supervisor))
contentapi.RegisterContentServer(server, contentService)
// start the GRPC api with registered services
if err := serveGRPC(server); err != nil { if err := serveGRPC(server); err != nil {
return err return err
} }
log.G(global).Infof("containerd successfully booted in %fs", time.Now().Sub(start).Seconds()) log.G(global).Infof("containerd successfully booted in %fs", time.Now().Sub(start).Seconds())
return handleSignals(signals, server) return handleSignals(signals, server)
} }
if err := app.Run(os.Args); err != nil { if err := app.Run(os.Args); err != nil {
fmt.Fprintf(os.Stderr, "containerd: %s\n", err) fmt.Fprintf(os.Stderr, "containerd: %s\n", err)
os.Exit(1) os.Exit(1)
@ -192,8 +204,13 @@ func serveDebugAPI() error {
return nil return nil
} }
func resolveContentStore(context *cli.Context) (*content.Store, error) {
cp := filepath.Join(conf.Root, "content")
return content.NewStore(cp)
}
func loadRuntimes() (map[string]containerd.Runtime, error) { func loadRuntimes() (map[string]containerd.Runtime, error) {
o := make(map[string]containerd.Runtime) o := map[string]containerd.Runtime{}
for _, name := range containerd.Runtimes() { for _, name := range containerd.Runtimes() {
r, err := containerd.NewRuntime(name, conf.State) r, err := containerd.NewRuntime(name, conf.State)
if err != nil { if err != nil {
@ -205,9 +222,8 @@ func loadRuntimes() (map[string]containerd.Runtime, error) {
return o, nil return o, nil
} }
func newGRPCServer(service api.ContainerServiceServer) *grpc.Server { func newGRPCServer() *grpc.Server {
s := grpc.NewServer(grpc.UnaryInterceptor(interceptor)) s := grpc.NewServer(grpc.UnaryInterceptor(interceptor))
api.RegisterContainerServiceServer(s, service)
return s return s
} }

23
cmd/dist/active.go vendored
View file

@ -3,11 +3,9 @@ package main
import ( import (
"fmt" "fmt"
"os" "os"
"path/filepath"
"text/tabwriter" "text/tabwriter"
"time" "time"
"github.com/docker/containerd/content"
units "github.com/docker/go-units" units "github.com/docker/go-units"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
@ -26,24 +24,11 @@ var activeCommand = cli.Command{
cli.StringFlag{ cli.StringFlag{
Name: "root", Name: "root",
Usage: "path to content store root", Usage: "path to content store root",
Value: ".content", // TODO(stevvooe): for now, just use the PWD/.content Value: "/tmp/content", // TODO(stevvooe): for now, just use the PWD/.content
}, },
}, },
Action: func(context *cli.Context) error { Action: func(context *cli.Context) error {
var ( cs, err := resolveContentStore(context)
// ctx = contextpkg.Background()
root = context.String("root")
)
if !filepath.IsAbs(root) {
var err error
root, err = filepath.Abs(root)
if err != nil {
return err
}
}
cs, err := content.Open(root)
if err != nil { if err != nil {
return err return err
} }
@ -58,8 +43,8 @@ var activeCommand = cli.Command{
for _, active := range active { for _, active := range active {
fmt.Fprintf(tw, "%s\t%s\t%s\n", fmt.Fprintf(tw, "%s\t%s\t%s\n",
active.Ref, active.Ref,
units.HumanSize(float64(active.Size)), units.HumanSize(float64(active.Offset)),
units.HumanDuration(time.Since(active.ModTime))) units.HumanDuration(time.Since(active.StartedAt)))
} }
tw.Flush() tw.Flush()

34
cmd/dist/common.go vendored Normal file
View file

@ -0,0 +1,34 @@
package main
import (
"net"
"path/filepath"
"time"
"github.com/docker/containerd/content"
"github.com/urfave/cli"
"google.golang.org/grpc"
)
func resolveContentStore(context *cli.Context) (*content.Store, error) {
root := context.GlobalString("root")
if !filepath.IsAbs(root) {
var err error
root, err = filepath.Abs(root)
if err != nil {
return nil, err
}
}
return content.NewStore(root)
}
func connectGRPC(context *cli.Context) (*grpc.ClientConn, error) {
socket := context.GlobalString("socket")
return grpc.Dial(socket,
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", socket, timeout)
}),
)
}

21
cmd/dist/delete.go vendored
View file

@ -3,9 +3,7 @@ package main
import ( import (
contextpkg "context" contextpkg "context"
"fmt" "fmt"
"path/filepath"
"github.com/docker/containerd/content"
"github.com/docker/containerd/log" "github.com/docker/containerd/log"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
"github.com/urfave/cli" "github.com/urfave/cli"
@ -18,30 +16,15 @@ var deleteCommand = cli.Command{
ArgsUsage: "[flags] [<digest>, ...]", ArgsUsage: "[flags] [<digest>, ...]",
Description: `Delete one or more blobs permanently. Successfully deleted Description: `Delete one or more blobs permanently. Successfully deleted
blobs are printed to stdout.`, blobs are printed to stdout.`,
Flags: []cli.Flag{ Flags: []cli.Flag{},
cli.StringFlag{
Name: "root",
Usage: "path to content store root",
Value: ".content", // TODO(stevvooe): for now, just use the PWD/.content
},
},
Action: func(context *cli.Context) error { Action: func(context *cli.Context) error {
var ( var (
ctx = contextpkg.Background() ctx = contextpkg.Background()
root = context.String("root")
args = []string(context.Args()) args = []string(context.Args())
exitError error exitError error
) )
if !filepath.IsAbs(root) { cs, err := resolveContentStore(context)
var err error
root, err = filepath.Abs(root)
if err != nil {
return err
}
}
cs, err := content.Open(root)
if err != nil { if err != nil {
return err return err
} }

47
cmd/dist/get.go vendored Normal file
View file

@ -0,0 +1,47 @@
package main
import (
"io"
"os"
contentapi "github.com/docker/containerd/api/services/content"
"github.com/docker/containerd/content"
digest "github.com/opencontainers/go-digest"
"github.com/urfave/cli"
)
var getCommand = cli.Command{
Name: "get",
Usage: "get the data for an object",
ArgsUsage: "[flags] [<digest>, ...]",
Description: `Display the paths to one or more blobs.
Output paths can be used to directly access blobs on disk.`,
Flags: []cli.Flag{},
Action: func(context *cli.Context) error {
var (
ctx = background
)
dgst, err := digest.Parse(context.Args().First())
if err != nil {
return err
}
conn, err := connectGRPC(context)
if err != nil {
return err
}
cs := content.NewProviderFromClient(contentapi.NewContentClient(conn))
rc, err := cs.Reader(ctx, dgst)
if err != nil {
return err
}
defer rc.Close()
_, err = io.Copy(os.Stdout, rc)
return err
},
}

55
cmd/dist/ingest.go vendored
View file

@ -4,9 +4,8 @@ import (
contextpkg "context" contextpkg "context"
"fmt" "fmt"
"os" "os"
"path/filepath"
"strings"
contentapi "github.com/docker/containerd/api/services/content"
"github.com/docker/containerd/content" "github.com/docker/containerd/content"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
"github.com/urfave/cli" "github.com/urfave/cli"
@ -18,17 +17,6 @@ var ingestCommand = cli.Command{
ArgsUsage: "[flags] <key>", ArgsUsage: "[flags] <key>",
Description: `Ingest objects into the local content store.`, Description: `Ingest objects into the local content store.`,
Flags: []cli.Flag{ Flags: []cli.Flag{
cli.DurationFlag{
Name: "timeout",
Usage: "total timeout for fetch",
EnvVar: "CONTAINERD_FETCH_TIMEOUT",
},
cli.StringFlag{
Name: "path, p",
Usage: "path to content store",
Value: ".content", // TODO(stevvooe): for now, just use the PWD/.content
EnvVar: "CONTAINERD_DIST_CONTENT_STORE",
},
cli.Int64Flag{ cli.Int64Flag{
Name: "expected-size", Name: "expected-size",
Usage: "validate against provided size", Usage: "validate against provided size",
@ -40,57 +28,34 @@ var ingestCommand = cli.Command{
}, },
Action: func(context *cli.Context) error { Action: func(context *cli.Context) error {
var ( var (
ctx = contextpkg.Background() ctx = background
timeout = context.Duration("timeout") cancel func()
root = context.String("path")
ref = context.Args().First() ref = context.Args().First()
expectedSize = context.Int64("expected-size") expectedSize = context.Int64("expected-size")
expectedDigest = digest.Digest(context.String("expected-digest")) expectedDigest = digest.Digest(context.String("expected-digest"))
) )
if timeout > 0 { ctx, cancel = contextpkg.WithCancel(ctx)
var cancel func() defer cancel()
ctx, cancel = contextpkg.WithTimeout(ctx, timeout)
defer cancel()
}
if err := expectedDigest.Validate(); expectedDigest != "" && err != nil { if err := expectedDigest.Validate(); expectedDigest != "" && err != nil {
return err return err
} }
if !filepath.IsAbs(root) { conn, err := connectGRPC(context)
var err error
root, err = filepath.Abs(root)
if err != nil {
return err
}
}
cs, err := content.Open(root)
if err != nil { if err != nil {
return err return err
} }
if expectedDigest != "" {
if ok, err := cs.Exists(expectedDigest); err != nil {
return err
} else if ok {
fmt.Fprintf(os.Stderr, "content with digest %v already exists\n", expectedDigest)
return nil
}
}
if ref == "" { if ref == "" {
if expectedDigest == "" { return fmt.Errorf("must specify a transaction reference")
return fmt.Errorf("must specify a transaction reference or expected digest")
}
ref = strings.Replace(expectedDigest.String(), ":", "-", -1)
} }
ingester := content.NewIngesterFromClient(contentapi.NewContentClient(conn))
// TODO(stevvooe): Allow ingest to be reentrant. Currently, we expect // TODO(stevvooe): Allow ingest to be reentrant. Currently, we expect
// all data to be written in a single invocation. Allow multiple writes // all data to be written in a single invocation. Allow multiple writes
// to the same transaction key followed by a commit. // to the same transaction key followed by a commit.
return content.WriteBlob(cs, os.Stdin, ref, expectedSize, expectedDigest) return content.WriteBlob(ctx, ingester, os.Stdin, ref, expectedSize, expectedDigest)
}, },
} }

17
cmd/dist/list.go vendored
View file

@ -4,7 +4,6 @@ import (
contextpkg "context" contextpkg "context"
"fmt" "fmt"
"os" "os"
"path/filepath"
"text/tabwriter" "text/tabwriter"
"time" "time"
@ -22,11 +21,6 @@ var listCommand = cli.Command{
ArgsUsage: "[flags] [<prefix>, ...]", ArgsUsage: "[flags] [<prefix>, ...]",
Description: `List blobs in the content store.`, Description: `List blobs in the content store.`,
Flags: []cli.Flag{ Flags: []cli.Flag{
cli.StringFlag{
Name: "root",
Usage: "path to content store root",
Value: ".content", // TODO(stevvooe): for now, just use the PWD/.content
},
cli.BoolFlag{ cli.BoolFlag{
Name: "quiet, q", Name: "quiet, q",
Usage: "print only the blob digest", Usage: "print only the blob digest",
@ -35,20 +29,11 @@ var listCommand = cli.Command{
Action: func(context *cli.Context) error { Action: func(context *cli.Context) error {
var ( var (
ctx = contextpkg.Background() ctx = contextpkg.Background()
root = context.String("root")
quiet = context.Bool("quiet") quiet = context.Bool("quiet")
args = []string(context.Args()) args = []string(context.Args())
) )
if !filepath.IsAbs(root) { cs, err := resolveContentStore(context)
var err error
root, err = filepath.Abs(root)
if err != nil {
return err
}
}
cs, err := content.Open(root)
if err != nil { if err != nil {
return err return err
} }

32
cmd/dist/main.go vendored
View file

@ -1,6 +1,7 @@
package main package main
import ( import (
contextpkg "context"
"fmt" "fmt"
"os" "os"
@ -9,6 +10,10 @@ import (
"github.com/urfave/cli" "github.com/urfave/cli"
) )
var (
background = contextpkg.Background()
)
func main() { func main() {
app := cli.NewApp() app := cli.NewApp()
app.Name = "dist" app.Name = "dist"
@ -27,20 +32,43 @@ distribution tool
Name: "debug", Name: "debug",
Usage: "enable debug output in logs", Usage: "enable debug output in logs",
}, },
cli.DurationFlag{
Name: "timeout",
Usage: "total timeout for fetch",
EnvVar: "CONTAINERD_FETCH_TIMEOUT",
},
cli.StringFlag{
Name: "root",
Usage: "path to content store root",
Value: "/tmp/content", // TODO(stevvooe): for now, just use the PWD/.content
},
cli.StringFlag{
Name: "socket, s",
Usage: "socket path for containerd's GRPC server",
Value: "/run/containerd/containerd.sock",
},
} }
app.Commands = []cli.Command{ app.Commands = []cli.Command{
fetchCommand, fetchCommand,
ingestCommand, ingestCommand,
activeCommand, activeCommand,
pathCommand, getCommand,
deleteCommand, deleteCommand,
listCommand, listCommand,
applyCommand, applyCommand,
} }
app.Before = func(context *cli.Context) error { app.Before = func(context *cli.Context) error {
if context.GlobalBool("debug") { var (
debug = context.GlobalBool("debug")
timeout = context.GlobalDuration("timeout")
)
if debug {
logrus.SetLevel(logrus.DebugLevel) logrus.SetLevel(logrus.DebugLevel)
} }
if timeout > 0 {
background, _ = contextpkg.WithTimeout(background, timeout)
}
return nil return nil
} }
if err := app.Run(os.Args); err != nil { if err := app.Run(os.Args); err != nil {

89
cmd/dist/path.go vendored
View file

@ -1,89 +0,0 @@
package main
import (
contextpkg "context"
"fmt"
"path/filepath"
"github.com/docker/containerd/content"
"github.com/docker/containerd/log"
digest "github.com/opencontainers/go-digest"
"github.com/urfave/cli"
)
var pathCommand = cli.Command{
Name: "path",
Usage: "print the path to one or more blobs",
ArgsUsage: "[flags] [<digest>, ...]",
Description: `Display the paths to one or more blobs.
Output paths can be used to directly access blobs on disk.`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "root",
Usage: "path to content store root",
Value: ".content", // TODO(stevvooe): for now, just use the PWD/.content
EnvVar: "CONTAINERD_DIST_CONTENT_STORE",
},
cli.BoolFlag{
Name: "quiet, q",
Usage: "elide digests in output",
},
},
Action: func(context *cli.Context) error {
var (
ctx = contextpkg.Background()
root = context.String("root")
args = []string(context.Args())
quiet = context.Bool("quiet")
exitError error
)
if !filepath.IsAbs(root) {
var err error
root, err = filepath.Abs(root)
if err != nil {
return err
}
}
cs, err := content.Open(root)
if err != nil {
return err
}
// TODO(stevvooe): Take the set of paths from stdin.
if len(args) < 1 {
return fmt.Errorf("please specify a blob digest")
}
for _, arg := range args {
dgst, err := digest.Parse(arg)
if err != nil {
log.G(ctx).WithError(err).Errorf("parsing %q as digest failed", arg)
if exitError == nil {
exitError = err
}
continue
}
p, err := cs.GetPath(dgst)
if err != nil {
log.G(ctx).WithError(err).Errorf("getting path for %q failed", dgst)
if exitError == nil {
exitError = err
}
continue
}
if !quiet {
fmt.Println(dgst, p)
} else {
fmt.Println(p)
}
}
return exitError
},
}

216
content/client.go Normal file
View file

@ -0,0 +1,216 @@
package content
import (
"context"
"io"
contentapi "github.com/docker/containerd/api/services/content"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
func NewProviderFromClient(client contentapi.ContentClient) Provider {
return &remoteProvider{
client: client,
}
}
type remoteProvider struct {
client contentapi.ContentClient
}
func (rp *remoteProvider) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
client, err := rp.client.Read(ctx, &contentapi.ReadRequest{Digest: dgst})
if err != nil {
return nil, err
}
return &remoteReader{
client: client,
}, nil
}
type remoteReader struct {
client contentapi.Content_ReadClient
extra []byte
}
func (rr *remoteReader) Read(p []byte) (n int, err error) {
n += copy(p, rr.extra)
if n >= len(p) {
if n <= len(rr.extra) {
rr.extra = rr.extra[n:]
} else {
rr.extra = rr.extra[:0]
}
return
}
p = p[n:]
for len(p) > 0 {
var resp *contentapi.ReadResponse
// fill our buffer up until we can fill p.
resp, err = rr.client.Recv()
if err != nil {
return
}
copied := copy(p, resp.Data)
n += copied
p = p[copied:]
if copied < len(p) {
continue
}
rr.extra = append(rr.extra, resp.Data[copied:]...)
}
return
}
func (rr *remoteReader) Close() error {
return rr.client.CloseSend()
}
func NewIngesterFromClient(client contentapi.ContentClient) Ingester {
return &remoteIngester{
client: client,
}
}
type remoteIngester struct {
client contentapi.ContentClient
}
func (ri *remoteIngester) Writer(ctx context.Context, ref string) (Writer, error) {
wrclient, offset, err := ri.negotiate(ctx, ref)
if err != nil {
return nil, err
}
return &remoteWriter{
client: wrclient,
offset: offset,
}, nil
}
func (ri *remoteIngester) negotiate(ctx context.Context, ref string) (contentapi.Content_WriteClient, int64, error) {
wrclient, err := ri.client.Write(ctx)
if err != nil {
return nil, 0, err
}
if err := wrclient.Send(&contentapi.WriteRequest{
Action: contentapi.WriteActionStat,
Ref: ref,
}); err != nil {
return nil, 0, err
}
resp, err := wrclient.Recv()
if err != nil {
return nil, 0, err
}
return wrclient, resp.Offset, nil
}
type remoteWriter struct {
ref string
client contentapi.Content_WriteClient
offset int64
digest digest.Digest
}
func newRemoteWriter(client contentapi.Content_WriteClient, ref string, offset int64) (*remoteWriter, error) {
return &remoteWriter{
ref: ref,
client: client,
offset: offset,
}, nil
}
// send performs a synchronous req-resp cycle on the client.
func (rw *remoteWriter) send(req *contentapi.WriteRequest) (*contentapi.WriteResponse, error) {
if err := rw.client.Send(req); err != nil {
return nil, err
}
resp, err := rw.client.Recv()
if err == nil {
// try to keep these in sync
if resp.Digest != "" {
rw.digest = resp.Digest
}
}
return resp, err
}
func (rw *remoteWriter) Status() (Status, error) {
resp, err := rw.send(&contentapi.WriteRequest{
Action: contentapi.WriteActionStat,
})
if err != nil {
return Status{}, err
}
return Status{
Ref: rw.ref,
Offset: resp.Offset,
StartedAt: resp.StartedAt,
UpdatedAt: resp.UpdatedAt,
}, nil
}
func (rw *remoteWriter) Digest() digest.Digest {
return rw.digest
}
func (rw *remoteWriter) Write(p []byte) (n int, err error) {
offset := rw.offset
resp, err := rw.send(&contentapi.WriteRequest{
Action: contentapi.WriteActionWrite,
Offset: offset,
Data: p,
})
if err != nil {
return 0, err
}
n = int(resp.Offset - offset)
if n < len(p) {
err = io.ErrShortWrite
}
rw.offset += int64(n)
return
}
func (rw *remoteWriter) Commit(size int64, expected digest.Digest) error {
resp, err := rw.send(&contentapi.WriteRequest{
Action: contentapi.WriteActionCommit,
ExpectedSize: size,
ExpectedDigest: expected,
})
if err != nil {
return err
}
if size != 0 && resp.Offset != size {
return errors.Errorf("unexpected size: %v != %v", resp.Offset, size)
}
if expected != "" && resp.Digest != expected {
return errors.New("unexpected digest")
}
return nil
}
func (rw *remoteWriter) Close() error {
return rw.client.CloseSend()
}

View file

@ -1,364 +1,53 @@
package content package content
import ( import (
"context"
"io" "io"
"io/ioutil"
"os"
"path/filepath"
"sync" "sync"
"time" "time"
"github.com/docker/containerd/log"
"github.com/nightlyone/lockfile"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
var ( var (
ErrBlobNotFound = errors.New("blob not found") errNotFound = errors.New("content: not found")
bufPool = sync.Pool{ bufPool = sync.Pool{
New: func() interface{} { New: func() interface{} {
return make([]byte, 32<<10) return make([]byte, 1<<20)
}, },
} }
) )
// Store is digest-keyed store for content. All data written into the store is type Info struct {
// stored under a verifiable digest. Digest digest.Digest
// Size int64
// Store can generally support multi-reader, single-writer ingest of data, CommittedAt time.Time
// including resumable ingest.
type Store struct {
root string
} }
func Open(root string) (*Store, error) { type Provider interface {
if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) { Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error)
return nil, err
}
return &Store{
root: root,
}, nil
} }
type Status struct { type Status struct {
Ref string Ref string
Size int64 Offset int64
ModTime time.Time StartedAt time.Time
Meta interface{} UpdatedAt time.Time
} }
func (cs *Store) Exists(dgst digest.Digest) (bool, error) { type Writer interface {
if _, err := os.Stat(cs.blobPath(dgst)); err != nil { io.WriteCloser
if !os.IsNotExist(err) { Status() (Status, error)
return false, err Digest() digest.Digest
} Commit(size int64, expected digest.Digest) error
return false, nil
}
return true, nil
} }
func (cs *Store) GetPath(dgst digest.Digest) (string, error) { type Ingester interface {
p := cs.blobPath(dgst) Writer(ctx context.Context, ref string) (Writer, error)
if _, err := os.Stat(p); err != nil {
if os.IsNotExist(err) {
return "", ErrBlobNotFound
}
return "", err
}
return p, nil
} }
// Delete removes a blob by its digest. func IsNotFound(err error) bool {
// return errors.Cause(err) == errNotFound
// While this is safe to do concurrently, safe exist-removal logic must hold
// some global lock on the store.
func (cs *Store) Delete(dgst digest.Digest) error {
if err := os.RemoveAll(cs.blobPath(dgst)); err != nil {
if !os.IsNotExist(err) {
return err
}
return nil
}
return nil
}
func (cs *Store) blobPath(dgst digest.Digest) string {
return filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex())
}
// Stat returns the current status of a blob by the ingest ref.
func (cs *Store) Stat(ref string) (Status, error) {
dp := filepath.Join(cs.ingestRoot(ref), "data")
return cs.stat(dp)
}
// stat works like stat above except uses the path to the ingest.
func (cs *Store) stat(ingestPath string) (Status, error) {
dp := filepath.Join(ingestPath, "data")
dfi, err := os.Stat(dp)
if err != nil {
return Status{}, err
}
ref, err := readFileString(filepath.Join(ingestPath, "ref"))
if err != nil {
return Status{}, err
}
return Status{
Ref: ref,
Size: dfi.Size(),
ModTime: dfi.ModTime(),
}, nil
}
func (cs *Store) Active() ([]Status, error) {
ip := filepath.Join(cs.root, "ingest")
fp, err := os.Open(ip)
if err != nil {
return nil, err
}
fis, err := fp.Readdir(-1)
if err != nil {
return nil, err
}
var active []Status
for _, fi := range fis {
p := filepath.Join(ip, fi.Name())
stat, err := cs.stat(p)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
// TODO(stevvooe): This is a common error if uploads are being
// completed while making this listing. Need to consider taking a
// lock on the whole store to coordinate this aspect.
//
// Another option is to cleanup downloads asynchronously and
// coordinate this method with the cleanup process.
//
// For now, we just skip them, as they really don't exist.
continue
}
active = append(active, stat)
}
return active, nil
}
// TODO(stevvooe): Allow querying the set of blobs in the blob store.
// WalkFunc defines the callback for a blob walk.
//
// TODO(stevvooe): Remove the file info. Just need size and modtime. Perhaps,
// not a huge deal, considering we have a path, but let's not just let this one
// go without scrutiny.
type WalkFunc func(path string, fi os.FileInfo, dgst digest.Digest) error
func (cs *Store) Walk(fn WalkFunc) error {
root := filepath.Join(cs.root, "blobs")
var alg digest.Algorithm
return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if !fi.IsDir() && !alg.Available() {
return nil
}
// TODO(stevvooe): There are few more cases with subdirs that should be
// handled in case the layout gets corrupted. This isn't strict enough
// an may spew bad data.
if path == root {
return nil
}
if filepath.Dir(path) == root {
alg = digest.Algorithm(filepath.Base(path))
if !alg.Available() {
alg = ""
return filepath.SkipDir
}
// descending into a hash directory
return nil
}
dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path))
if err := dgst.Validate(); err != nil {
// log error but don't report
log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path")
// if we see this, it could mean some sort of corruption of the
// store or extra paths not expected previously.
}
return fn(path, fi, dgst)
})
}
// Begin starts a new write transaction against the blob store.
//
// The argument `ref` is used to identify the transaction. It must be a valid
// path component, meaning it has no `/` characters and no `:` (we'll ban
// others fs characters, as needed).
func (cs *Store) Begin(ref string) (*Writer, error) {
path, refp, data, lock, err := cs.ingestPaths(ref)
if err != nil {
return nil, err
}
// use single path mkdir for this to ensure ref is only base path, in
// addition to validation above.
if err := os.Mkdir(path, 0755); err != nil {
return nil, err
}
if err := tryLock(lock); err != nil {
return nil, err
}
// write the ref to a file for later use
if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil {
return nil, err
}
fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return nil, errors.Wrap(err, "failed to open data file")
}
defer fp.Close()
// re-open the file in append mode
fp, err = os.OpenFile(data, os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
return nil, errors.Wrap(err, "error opening for append")
}
return &Writer{
cs: cs,
fp: fp,
lock: lock,
path: path,
digester: digest.Canonical.Digester(),
}, nil
}
func (cs *Store) Resume(ref string) (*Writer, error) {
path, refp, data, lock, err := cs.ingestPaths(ref)
if err != nil {
return nil, err
}
if err := tryLock(lock); err != nil {
return nil, err
}
refraw, err := readFileString(refp)
if err != nil {
return nil, errors.Wrap(err, "could not read ref")
}
if ref != refraw {
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
// layout corruption or a hash collision for the ref key.
return nil, errors.Wrapf(err, "ref key does not match: %v != %v", ref, refraw)
}
digester := digest.Canonical.Digester()
// slow slow slow!!, send to goroutine or use resumable hashes
fp, err := os.Open(data)
if err != nil {
return nil, err
}
defer fp.Close()
p := bufPool.Get().([]byte)
defer bufPool.Put(p)
offset, err := io.CopyBuffer(digester.Hash(), fp, p)
if err != nil {
return nil, err
}
fp1, err := os.OpenFile(data, os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
if os.IsNotExist(err) {
return nil, errors.Wrap(err, "ingest does not exist")
}
return nil, errors.Wrap(err, "error opening for append")
}
return &Writer{
cs: cs,
fp: fp1,
lock: lock,
ref: ref,
path: path,
offset: offset,
digester: digester,
}, nil
}
// Remove an active transaction keyed by ref.
func (cs *Store) Remove(ref string) error {
root := cs.ingestRoot(ref)
if err := os.RemoveAll(root); err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
return nil
}
func (cs *Store) ingestRoot(ref string) string {
dgst := digest.FromString(ref)
return filepath.Join(cs.root, "ingest", dgst.Hex())
}
// ingestPaths are returned, including the lockfile. The paths are the following:
//
// - root: entire ingest directory
// - ref: name of the starting ref, must be unique
// - data: file where data is written
// - lock: lock file location
//
func (cs *Store) ingestPaths(ref string) (string, string, string, lockfile.Lockfile, error) {
var (
fp = cs.ingestRoot(ref)
rp = filepath.Join(fp, "ref")
lp = filepath.Join(fp, "lock")
dp = filepath.Join(fp, "data")
)
lock, err := lockfile.New(lp)
if err != nil {
return "", "", "", "", errors.Wrapf(err, "error creating lockfile %v", lp)
}
return fp, rp, dp, lock, nil
}
func readFileString(path string) (string, error) {
p, err := ioutil.ReadFile(path)
return string(p), err
} }

View file

@ -3,6 +3,7 @@ package content
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"context"
"crypto/rand" "crypto/rand"
_ "crypto/sha256" // required for digest package _ "crypto/sha256" // required for digest package
"fmt" "fmt"
@ -21,7 +22,7 @@ import (
) )
func TestContentWriter(t *testing.T) { func TestContentWriter(t *testing.T) {
tmpdir, cs, cleanup := contentStoreEnv(t) ctx, tmpdir, cs, cleanup := contentStoreEnv(t)
defer cleanup() defer cleanup()
defer testutil.DumpDir(t, tmpdir) defer testutil.DumpDir(t, tmpdir)
@ -29,7 +30,7 @@ func TestContentWriter(t *testing.T) {
t.Fatal("ingest dir should be created", err) t.Fatal("ingest dir should be created", err)
} }
cw, err := cs.Begin("myref") cw, err := cs.Writer(ctx, "myref")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -37,20 +38,14 @@ func TestContentWriter(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
// try to begin again with same ref, should fail
cw, err = cs.Begin("myref")
if err == nil {
t.Fatal("expected error on repeated begin")
}
// reopen, so we can test things // reopen, so we can test things
cw, err = cs.Resume("myref") cw, err = cs.Writer(ctx, "myref")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// make sure that second resume also fails // make sure that second resume also fails
if _, err = cs.Resume("myref"); err == nil { if _, err = cs.Writer(ctx, "myref"); err == nil {
// TODO(stevvooe): This also works across processes. Need to find a way // TODO(stevvooe): This also works across processes. Need to find a way
// to test that, as well. // to test that, as well.
t.Fatal("no error on second resume") t.Fatal("no error on second resume")
@ -64,14 +59,14 @@ func TestContentWriter(t *testing.T) {
// clear out the time and meta cause we don't care for this test // clear out the time and meta cause we don't care for this test
for i := range ingestions { for i := range ingestions {
ingestions[i].Meta = nil ingestions[i].UpdatedAt = time.Time{}
ingestions[i].ModTime = time.Time{} ingestions[i].StartedAt = time.Time{}
} }
if !reflect.DeepEqual(ingestions, []Status{ if !reflect.DeepEqual(ingestions, []Status{
{ {
Ref: "myref", Ref: "myref",
Size: 0, Offset: 0,
}, },
}) { }) {
t.Fatalf("unexpected ingestion set: %v", ingestions) t.Fatalf("unexpected ingestion set: %v", ingestions)
@ -93,7 +88,7 @@ func TestContentWriter(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
cw, err = cs.Begin("aref") cw, err = cs.Writer(ctx, "aref")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -119,7 +114,7 @@ func TestContentWriter(t *testing.T) {
} }
func TestWalkBlobs(t *testing.T) { func TestWalkBlobs(t *testing.T) {
_, cs, cleanup := contentStoreEnv(t) ctx, _, cs, cleanup := contentStoreEnv(t)
defer cleanup() defer cleanup()
const ( const (
@ -128,7 +123,7 @@ func TestWalkBlobs(t *testing.T) {
) )
var ( var (
blobs = populateBlobStore(t, cs, nblobs, maxsize) blobs = populateBlobStore(t, ctx, cs, nblobs, maxsize)
expected = map[digest.Digest]struct{}{} expected = map[digest.Digest]struct{}{}
found = map[digest.Digest]struct{}{} found = map[digest.Digest]struct{}{}
) )
@ -158,7 +153,7 @@ func TestWalkBlobs(t *testing.T) {
// for blobs. This seems to be due to the number of syscalls and file io we do // for blobs. This seems to be due to the number of syscalls and file io we do
// coordinating the ingestion. // coordinating the ingestion.
func BenchmarkIngests(b *testing.B) { func BenchmarkIngests(b *testing.B) {
_, cs, cleanup := contentStoreEnv(b) ctx, _, cs, cleanup := contentStoreEnv(b)
defer cleanup() defer cleanup()
for _, size := range []int64{ for _, size := range []int64{
@ -181,7 +176,7 @@ func BenchmarkIngests(b *testing.B) {
b.StartTimer() b.StartTimer()
for dgst, p := range blobs { for dgst, p := range blobs {
checkWrite(b, cs, dgst, p) checkWrite(b, ctx, cs, dgst, p)
} }
}) })
} }
@ -208,17 +203,17 @@ func generateBlobs(t checker, nblobs, maxsize int64) map[digest.Digest][]byte {
return blobs return blobs
} }
func populateBlobStore(t checker, cs *Store, nblobs, maxsize int64) map[digest.Digest][]byte { func populateBlobStore(t checker, ctx context.Context, cs *Store, nblobs, maxsize int64) map[digest.Digest][]byte {
blobs := generateBlobs(t, nblobs, maxsize) blobs := generateBlobs(t, nblobs, maxsize)
for dgst, p := range blobs { for dgst, p := range blobs {
checkWrite(t, cs, dgst, p) checkWrite(t, ctx, cs, dgst, p)
} }
return blobs return blobs
} }
func contentStoreEnv(t checker) (string, *Store, func()) { func contentStoreEnv(t checker) (context.Context, string, *Store, func()) {
pc, _, _, ok := runtime.Caller(1) pc, _, _, ok := runtime.Caller(1)
if !ok { if !ok {
t.Fatal("failed to resolve caller") t.Fatal("failed to resolve caller")
@ -230,13 +225,15 @@ func contentStoreEnv(t checker) (string, *Store, func()) {
t.Fatal(err) t.Fatal(err)
} }
cs, err := Open(tmpdir) cs, err := NewStore(tmpdir)
if err != nil { if err != nil {
os.RemoveAll(tmpdir) os.RemoveAll(tmpdir)
t.Fatal(err) t.Fatal(err)
} }
return tmpdir, cs, func() { ctx, cancel := context.WithCancel(context.Background())
return ctx, tmpdir, cs, func() {
cancel()
os.RemoveAll(tmpdir) os.RemoveAll(tmpdir)
} }
} }
@ -253,10 +250,8 @@ func checkCopy(t checker, size int64, dst io.Writer, src io.Reader) {
} }
func checkBlobPath(t *testing.T, cs *Store, dgst digest.Digest) string { func checkBlobPath(t *testing.T, cs *Store, dgst digest.Digest) string {
path, err := cs.GetPath(dgst) path := cs.blobPath(dgst)
if err != nil {
t.Fatal(err, dgst)
}
if path != filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex()) { if path != filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex()) {
t.Fatalf("unexpected path: %q", path) t.Fatalf("unexpected path: %q", path)
} }
@ -273,8 +268,8 @@ func checkBlobPath(t *testing.T, cs *Store, dgst digest.Digest) string {
return path return path
} }
func checkWrite(t checker, cs *Store, dgst digest.Digest, p []byte) digest.Digest { func checkWrite(t checker, ctx context.Context, cs *Store, dgst digest.Digest, p []byte) digest.Digest {
if err := WriteBlob(cs, bytes.NewReader(p), dgst.String(), int64(len(p)), dgst); err != nil { if err := WriteBlob(ctx, cs, bytes.NewReader(p), dgst.String(), int64(len(p)), dgst); err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -1,37 +1,14 @@
package content package content
import ( import (
"context"
"io" "io"
"os" "io/ioutil"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
// Provider gives access to blob content by paths.
//
// Typically, this is implemented by `*Store`.
type Provider interface {
GetPath(dgst digest.Digest) (string, error)
}
// OpenBlob opens the blob for reading identified by dgst.
//
// The opened blob may also implement seek. Callers can detect with io.Seeker.
func OpenBlob(provider Provider, dgst digest.Digest) (io.ReadCloser, error) {
path, err := provider.GetPath(dgst)
if err != nil {
return nil, err
}
fp, err := os.Open(path)
return fp, err
}
type Ingester interface {
Begin(key string) (*Writer, error)
}
// WriteBlob writes data with the expected digest into the content store. If // WriteBlob writes data with the expected digest into the content store. If
// expected already exists, the method returns immediately and the reader will // expected already exists, the method returns immediately and the reader will
// not be consumed. // not be consumed.
@ -39,11 +16,23 @@ type Ingester interface {
// This is useful when the digest and size are known beforehand. // This is useful when the digest and size are known beforehand.
// //
// Copy is buffered, so no need to wrap reader in buffered io. // Copy is buffered, so no need to wrap reader in buffered io.
func WriteBlob(cs Ingester, r io.Reader, ref string, size int64, expected digest.Digest) error { func WriteBlob(ctx context.Context, cs Ingester, r io.Reader, ref string, size int64, expected digest.Digest) error {
cw, err := cs.Begin(ref) cw, err := cs.Writer(ctx, ref)
if err != nil { if err != nil {
return err return err
} }
ws, err := cw.Status()
if err != nil {
return err
}
if ws.Offset > 0 {
// Arbitrary limitation for now. We can detect io.Seeker on r and
// resume.
return errors.Errorf("cannot resume already started write")
}
buf := bufPool.Get().([]byte) buf := bufPool.Get().([]byte)
defer bufPool.Put(buf) defer bufPool.Put(buf)
@ -62,3 +51,8 @@ func WriteBlob(cs Ingester, r io.Reader, ref string, size int64, expected digest
return nil return nil
} }
func readFileString(path string) (string, error) {
p, err := ioutil.ReadFile(path)
return string(p), err
}

View file

@ -1,10 +1,10 @@
package content package content
import ( import (
"errors"
"sync" "sync"
"github.com/nightlyone/lockfile" "github.com/nightlyone/lockfile"
"github.com/pkg/errors"
) )
// In addition to providing inter-process locks for content ingest, we also // In addition to providing inter-process locks for content ingest, we also
@ -16,6 +16,8 @@ import (
// error reporting. // error reporting.
var ( var (
errLocked = errors.New("key is locked")
// locks lets us lock in process, as well as output of process. // locks lets us lock in process, as well as output of process.
locks = map[lockfile.Lockfile]struct{}{} locks = map[lockfile.Lockfile]struct{}{}
locksMu sync.Mutex locksMu sync.Mutex
@ -26,11 +28,15 @@ func tryLock(lock lockfile.Lockfile) error {
defer locksMu.Unlock() defer locksMu.Unlock()
if _, ok := locks[lock]; ok { if _, ok := locks[lock]; ok {
return errors.New("file in use") return errLocked
} }
if err := lock.TryLock(); err != nil { if err := lock.TryLock(); err != nil {
return err if errors.Cause(err) == lockfile.ErrBusy {
return errLocked
}
return errors.Wrapf(err, "lock.TryLock() encountered an error")
} }
locks[lock] = struct{}{} locks[lock] = struct{}{}

233
content/service.go Normal file
View file

@ -0,0 +1,233 @@
package content
import (
"errors"
"io"
contentapi "github.com/docker/containerd/api/services/content"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
type Service struct {
store *Store
}
var _ contentapi.ContentServer = &Service{}
func NewService(store *Store) contentapi.ContentServer {
return &Service{store: store}
}
func (s *Service) Info(ctx context.Context, req *contentapi.InfoRequest) (*contentapi.InfoResponse, error) {
if err := req.Digest.Validate(); err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest)
}
bi, err := s.store.Info(req.Digest)
if err != nil {
return nil, maybeNotFoundGRPC(err, req.Digest.String())
}
return &contentapi.InfoResponse{
Digest: req.Digest,
Size_: bi.Size,
CommittedAt: bi.CommittedAt,
}, nil
}
func (s *Service) Read(req *contentapi.ReadRequest, session contentapi.Content_ReadServer) error {
if err := req.Digest.Validate(); err != nil {
return grpc.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err)
}
oi, err := s.store.Info(req.Digest)
if err != nil {
return maybeNotFoundGRPC(err, req.Digest.String())
}
rc, err := s.store.Reader(session.Context(), req.Digest)
if err != nil {
return maybeNotFoundGRPC(err, req.Digest.String())
}
defer rc.Close() // TODO(stevvooe): Cache these file descriptors for performance.
ra, ok := rc.(io.ReaderAt)
if !ok {
// TODO(stevvooe): Need to set this up to get correct behavior across
// board. May change interface to store to just return ReaderAtCloser.
// Possibly, we could just return io.ReaderAt and handle file
// descriptors internally.
return errors.New("content service only supports content stores that return ReaderAt")
}
var (
offset = req.Offset
size = req.Size_
// TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably
// little inefficient for work over a fast network. We can tune this later.
p = bufPool.Get().([]byte)
)
defer bufPool.Put(p)
if offset < 0 {
offset = 0
}
if size <= 0 {
size = oi.Size - offset
}
if offset+size > oi.Size {
return grpc.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size)
}
if _, err := io.CopyBuffer(
&readResponseWriter{session: session},
io.NewSectionReader(ra, offset, size), p); err != nil {
return err
}
return nil
}
type readResponseWriter struct {
offset int64
session contentapi.Content_ReadServer
}
func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
if err := rw.session.Send(&contentapi.ReadResponse{
Offset: rw.offset,
Data: p,
}); err != nil {
return 0, err
}
rw.offset += int64(len(p))
return len(p), nil
}
func (s *Service) Write(session contentapi.Content_WriteServer) (err error) {
var (
ref string
msg contentapi.WriteResponse
req *contentapi.WriteRequest
)
defer func(msg *contentapi.WriteResponse) {
// pump through the last message if no error was encountered
if err != nil {
return
}
err = session.Send(msg)
}(&msg)
// handle the very first request!
req, err = session.Recv()
if err != nil {
return err
}
ref = req.Ref
if ref == "" {
return grpc.Errorf(codes.InvalidArgument, "first message must have a reference")
}
// this action locks the writer for the session.
wr, err := s.store.Writer(session.Context(), ref)
if err != nil {
return err
}
defer wr.Close()
for {
// TODO(stevvooe): We need to study this behavior in containerd a
// little better to decide where to put this. We may be able to make
// this determination elsewhere and avoid even creating the writer.
//
// Ideally, we just use the expected digest on commit to abandon the
// cost of the move when they collide.
if req.ExpectedDigest != "" {
if _, err := s.store.Info(req.ExpectedDigest); err != nil {
if !IsNotFound(err) {
return err
}
return grpc.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.ExpectedDigest)
}
}
msg.Action = req.Action
ws, err := wr.Status()
if err != nil {
return err
}
msg.Offset = ws.Offset
msg.StartedAt = ws.StartedAt
msg.UpdatedAt = ws.UpdatedAt
switch req.Action {
case contentapi.WriteActionStat:
msg.Digest = wr.Digest()
case contentapi.WriteActionWrite, contentapi.WriteActionCommit:
if req.Offset > 0 {
// validate the offset if provided
if req.Offset != ws.Offset {
return grpc.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset)
}
}
// issue the write if we actually have data.
if len(req.Data) > 0 {
// While this looks like we could use io.WriterAt here, because we
// maintain the offset as append only, we just issue the write.
n, err := wr.Write(req.Data)
if err != nil {
return err
}
if n != len(req.Data) {
// TODO(stevvooe): Perhaps, we can recover this by including it
// in the offset on the write return.
return grpc.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data))
}
msg.Offset += int64(n)
}
if req.Action == contentapi.WriteActionCommit {
return wr.Commit(req.ExpectedSize, req.ExpectedDigest)
}
case contentapi.WriteActionAbort:
return s.store.Abort(ref)
}
if err := session.Send(&msg); err != nil {
return err
}
req, err = session.Recv()
if err != nil {
return err
}
}
return nil
}
func (s *Service) Status(*contentapi.StatusRequest, contentapi.Content_StatusServer) error {
return grpc.Errorf(codes.Unimplemented, "not implemented")
}
func maybeNotFoundGRPC(err error, id string) error {
if IsNotFound(err) {
return grpc.Errorf(codes.NotFound, "%v: not found", id)
}
return err
}

355
content/store.go Normal file
View file

@ -0,0 +1,355 @@
package content
import (
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
"syscall"
"time"
"github.com/docker/containerd/log"
"github.com/nightlyone/lockfile"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// Store is digest-keyed store for content. All data written into the store is
// stored under a verifiable digest.
//
// Store can generally support multi-reader, single-writer ingest of data,
// including resumable ingest.
type Store struct {
root string
}
func NewStore(root string) (*Store, error) {
if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) {
return nil, err
}
return &Store{
root: root,
}, nil
}
func (s *Store) Info(dgst digest.Digest) (Info, error) {
p := s.blobPath(dgst)
fi, err := os.Stat(p)
if err != nil {
if os.IsNotExist(err) {
err = errNotFound
}
return Info{}, err
}
return Info{
Digest: dgst,
Size: fi.Size(),
CommittedAt: fi.ModTime(),
}, nil
}
// Open returns an io.ReadCloser for the blob.
//
// TODO(stevvooe): This would work much better as an io.ReaderAt in practice.
// Right now, we are doing type assertion to tease that out, but it won't scale
// well.
func (s *Store) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
fp, err := os.Open(s.blobPath(dgst))
if err != nil {
if os.IsNotExist(err) {
err = errNotFound
}
return nil, err
}
return fp, nil
}
// Delete removes a blob by its digest.
//
// While this is safe to do concurrently, safe exist-removal logic must hold
// some global lock on the store.
func (cs *Store) Delete(dgst digest.Digest) error {
if err := os.RemoveAll(cs.blobPath(dgst)); err != nil {
if !os.IsNotExist(err) {
return err
}
return nil
}
return nil
}
// TODO(stevvooe): Allow querying the set of blobs in the blob store.
// WalkFunc defines the callback for a blob walk.
//
// TODO(stevvooe): Remove the file info. Just need size and modtime. Perhaps,
// not a huge deal, considering we have a path, but let's not just let this one
// go without scrutiny.
type WalkFunc func(path string, fi os.FileInfo, dgst digest.Digest) error
func (cs *Store) Walk(fn WalkFunc) error {
root := filepath.Join(cs.root, "blobs")
var alg digest.Algorithm
return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if !fi.IsDir() && !alg.Available() {
return nil
}
// TODO(stevvooe): There are few more cases with subdirs that should be
// handled in case the layout gets corrupted. This isn't strict enough
// an may spew bad data.
if path == root {
return nil
}
if filepath.Dir(path) == root {
alg = digest.Algorithm(filepath.Base(path))
if !alg.Available() {
alg = ""
return filepath.SkipDir
}
// descending into a hash directory
return nil
}
dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path))
if err := dgst.Validate(); err != nil {
// log error but don't report
log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path")
// if we see this, it could mean some sort of corruption of the
// store or extra paths not expected previously.
}
return fn(path, fi, dgst)
})
}
// Stat returns the current status of a blob by the ingest ref.
func (s *Store) Status(ref string) (Status, error) {
dp := filepath.Join(s.ingestRoot(ref), "data")
return s.status(dp)
}
// stat works like stat above except uses the path to the ingest.
func (s *Store) status(ingestPath string) (Status, error) {
dp := filepath.Join(ingestPath, "data")
fi, err := os.Stat(dp)
if err != nil {
return Status{}, err
}
ref, err := readFileString(filepath.Join(ingestPath, "ref"))
if err != nil {
return Status{}, err
}
var startedAt time.Time
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
startedAt = time.Unix(st.Ctim.Sec, st.Ctim.Nsec)
} else {
startedAt = fi.ModTime()
}
return Status{
Ref: ref,
Offset: fi.Size(),
UpdatedAt: fi.ModTime(),
StartedAt: startedAt,
}, nil
}
// Writer begins or resumes the active writer identified by ref. If the writer
// is already in use, an error is returned. Only one writer may be in use per
// ref at a time.
//
// The argument `ref` is used to uniquely identify a long-lived writer transaction.
func (s *Store) Writer(ctx context.Context, ref string) (Writer, error) {
path, refp, data, lock, err := s.ingestPaths(ref)
if err != nil {
return nil, err
}
if err := tryLock(lock); err != nil {
if !os.IsNotExist(errors.Cause(err)) {
return nil, errors.Wrapf(err, "locking %v failed", ref)
}
// if it doesn't exist, we'll make it so below!
}
var (
digester = digest.Canonical.Digester()
offset int64
startedAt time.Time
updatedAt time.Time
)
// ensure that the ingest path has been created.
if err := os.Mkdir(path, 0755); err != nil {
if !os.IsExist(err) {
return nil, err
}
// validate that we have no collision for the ref.
refraw, err := readFileString(refp)
if err != nil {
return nil, errors.Wrap(err, "could not read ref")
}
if ref != refraw {
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
// layout corruption or a hash collision for the ref key.
return nil, errors.Wrapf(err, "ref key does not match: %v != %v", ref, refraw)
}
// slow slow slow!!, send to goroutine or use resumable hashes
fp, err := os.Open(data)
if err != nil {
return nil, err
}
defer fp.Close()
p := bufPool.Get().([]byte)
defer bufPool.Put(p)
offset, err = io.CopyBuffer(digester.Hash(), fp, p)
if err != nil {
return nil, err
}
fi, err := os.Stat(data)
if err != nil {
return nil, err
}
updatedAt = fi.ModTime()
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
startedAt = time.Unix(st.Ctim.Sec, st.Ctim.Nsec)
} else {
startedAt = updatedAt
}
} else {
// the ingest is new, we need to setup the target location.
// write the ref to a file for later use
if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil {
return nil, err
}
startedAt = time.Now()
updatedAt = startedAt
}
fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return nil, errors.Wrap(err, "failed to open data file")
}
return &writer{
s: s,
fp: fp,
lock: lock,
ref: ref,
path: path,
offset: offset,
digester: digester,
startedAt: startedAt,
updatedAt: updatedAt,
}, nil
}
// Abort an active transaction keyed by ref. If the ingest is active, it will
// be cancelled. Any resoures associated with the ingest will be cleaned.
func (s *Store) Abort(ref string) error {
root := s.ingestRoot(ref)
if err := os.RemoveAll(root); err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
return nil
}
func (s *Store) Active() ([]Status, error) {
fp, err := os.Open(filepath.Join(s.root, "ingest"))
if err != nil {
return nil, err
}
fis, err := fp.Readdir(-1)
if err != nil {
return nil, err
}
var active []Status
for _, fi := range fis {
p := filepath.Join(s.root, "ingest", fi.Name())
stat, err := s.status(p)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
// TODO(stevvooe): This is a common error if uploads are being
// completed while making this listing. Need to consider taking a
// lock on the whole store to coordinate this aspect.
//
// Another option is to cleanup downloads asynchronously and
// coordinate this method with the cleanup process.
//
// For now, we just skip them, as they really don't exist.
continue
}
active = append(active, stat)
}
return active, nil
}
func (cs *Store) blobPath(dgst digest.Digest) string {
return filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex())
}
func (s *Store) ingestRoot(ref string) string {
dgst := digest.FromString(ref)
return filepath.Join(s.root, "ingest", dgst.Hex())
}
// ingestPaths are returned, including the lockfile. The paths are the following:
//
// - root: entire ingest directory
// - ref: name of the starting ref, must be unique
// - data: file where data is written
// - lock: lock file location
//
func (s *Store) ingestPaths(ref string) (string, string, string, lockfile.Lockfile, error) {
var (
fp = s.ingestRoot(ref)
rp = filepath.Join(fp, "ref")
lp = filepath.Join(fp, "lock")
dp = filepath.Join(fp, "data")
)
lock, err := lockfile.New(lp)
if err != nil {
return "", "", "", "", errors.Wrapf(err, "error creating lockfile %v", lp)
}
return fp, rp, dp, lock, nil
}

View file

@ -4,54 +4,55 @@ import (
"log" "log"
"os" "os"
"path/filepath" "path/filepath"
"time"
"github.com/nightlyone/lockfile" "github.com/nightlyone/lockfile"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
// Writer represents a write transaction against the blob store. // writer represents a write transaction against the blob store.
type Writer struct { type writer struct {
cs *Store s *Store
fp *os.File // opened data file fp *os.File // opened data file
lock lockfile.Lockfile lock lockfile.Lockfile
path string // path to writer dir path string // path to writer dir
ref string // ref key ref string // ref key
offset int64 offset int64
digester digest.Digester digester digest.Digester
startedAt time.Time
updatedAt time.Time
} }
func (cw *Writer) Ref() string { func (w *writer) Status() (Status, error) {
return cw.ref return Status{
} Ref: w.ref,
Offset: w.offset,
// Size returns the current size written. StartedAt: w.startedAt,
// UpdatedAt: w.updatedAt,
// Cannot be called concurrently with `Write`. If you need need concurrent }, nil
// status, query it with `Store.Stat`.
func (cw *Writer) Size() int64 {
return cw.offset
} }
// Digest returns the current digest of the content, up to the current write. // Digest returns the current digest of the content, up to the current write.
// //
// Cannot be called concurrently with `Write`. // Cannot be called concurrently with `Write`.
func (cw *Writer) Digest() digest.Digest { func (w *writer) Digest() digest.Digest {
return cw.digester.Digest() return w.digester.Digest()
} }
// Write p to the transaction. // Write p to the transaction.
// //
// Note that writes are unbuffered to the backing file. When writing, it is // Note that writes are unbuffered to the backing file. When writing, it is
// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer. // recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer.
func (cw *Writer) Write(p []byte) (n int, err error) { func (w *writer) Write(p []byte) (n int, err error) {
n, err = cw.fp.Write(p) n, err = w.fp.Write(p)
cw.digester.Hash().Write(p[:n]) w.digester.Hash().Write(p[:n])
cw.offset += int64(len(p)) w.offset += int64(len(p))
w.updatedAt = time.Now()
return n, err return n, err
} }
func (cw *Writer) Commit(size int64, expected digest.Digest) error { func (cw *writer) Commit(size int64, expected digest.Digest) error {
if err := cw.fp.Sync(); err != nil { if err := cw.fp.Sync(); err != nil {
return errors.Wrap(err, "sync failed") return errors.Wrap(err, "sync failed")
} }
@ -85,7 +86,7 @@ func (cw *Writer) Commit(size int64, expected digest.Digest) error {
var ( var (
ingest = filepath.Join(cw.path, "data") ingest = filepath.Join(cw.path, "data")
target = cw.cs.blobPath(dgst) target = cw.s.blobPath(dgst)
) )
// make sure parent directories of blob exist // make sure parent directories of blob exist
@ -118,7 +119,7 @@ func (cw *Writer) Commit(size int64, expected digest.Digest) error {
// //
// To abandon a transaction completely, first call close then `Store.Remove` to // To abandon a transaction completely, first call close then `Store.Remove` to
// clean up the associated resources. // clean up the associated resources.
func (cw *Writer) Close() (err error) { func (cw *writer) Close() (err error) {
if err := unlock(cw.lock); err != nil { if err := unlock(cw.lock); err != nil {
log.Printf("unlock failed: %v", err) log.Printf("unlock failed: %v", err)
} }

View file

@ -18,8 +18,8 @@ github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/matttproud/golang_protobuf_extensions v1.0.0 github.com/matttproud/golang_protobuf_extensions v1.0.0
# go-units from Docker; latest release as of 12/16/2016 # go-units from Docker; latest release as of 12/16/2016
github.com/docker/go-units v0.3.1 github.com/docker/go-units v0.3.1
# gogo/protobuf - master as of 12/16/2016 (latest tagged release doesn't have needed change) # gogo/protobuf - master as of 2/15/2016 (latest tagged release doesn't have needed change)
github.com/gogo/protobuf 06ec6c31ff1bac6ed4e205a547a3d72934813ef3 github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8
# golang support for protobufs - master as of 12/16/2016 # golang support for protobufs - master as of 12/16/2016
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93 github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
# runc, latest release as of 12/16/2016 # runc, latest release as of 12/16/2016

View file

@ -64,6 +64,15 @@ var E_EnumCustomname = &proto.ExtensionDesc{
Filename: "gogo.proto", Filename: "gogo.proto",
} }
var E_Enumdecl = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.EnumOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 62024,
Name: "gogoproto.enumdecl",
Tag: "varint,62024,opt,name=enumdecl",
Filename: "gogo.proto",
}
var E_EnumvalueCustomname = &proto.ExtensionDesc{ var E_EnumvalueCustomname = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.EnumValueOptions)(nil), ExtendedType: (*google_protobuf.EnumValueOptions)(nil),
ExtensionType: (*string)(nil), ExtensionType: (*string)(nil),
@ -307,6 +316,24 @@ var E_CompareAll = &proto.ExtensionDesc{
Filename: "gogo.proto", Filename: "gogo.proto",
} }
var E_TypedeclAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63030,
Name: "gogoproto.typedecl_all",
Tag: "varint,63030,opt,name=typedecl_all,json=typedeclAll",
Filename: "gogo.proto",
}
var E_EnumdeclAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63031,
Name: "gogoproto.enumdecl_all",
Tag: "varint,63031,opt,name=enumdecl_all,json=enumdeclAll",
Filename: "gogo.proto",
}
var E_GoprotoGetters = &proto.ExtensionDesc{ var E_GoprotoGetters = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil), ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil), ExtensionType: (*bool)(nil),
@ -505,6 +532,15 @@ var E_Compare = &proto.ExtensionDesc{
Filename: "gogo.proto", Filename: "gogo.proto",
} }
var E_Typedecl = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64030,
Name: "gogoproto.typedecl",
Tag: "varint,64030,opt,name=typedecl",
Filename: "gogo.proto",
}
var E_Nullable = &proto.ExtensionDesc{ var E_Nullable = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FieldOptions)(nil), ExtendedType: (*google_protobuf.FieldOptions)(nil),
ExtensionType: (*bool)(nil), ExtensionType: (*bool)(nil),
@ -609,6 +645,7 @@ func init() {
proto.RegisterExtension(E_GoprotoEnumStringer) proto.RegisterExtension(E_GoprotoEnumStringer)
proto.RegisterExtension(E_EnumStringer) proto.RegisterExtension(E_EnumStringer)
proto.RegisterExtension(E_EnumCustomname) proto.RegisterExtension(E_EnumCustomname)
proto.RegisterExtension(E_Enumdecl)
proto.RegisterExtension(E_EnumvalueCustomname) proto.RegisterExtension(E_EnumvalueCustomname)
proto.RegisterExtension(E_GoprotoGettersAll) proto.RegisterExtension(E_GoprotoGettersAll)
proto.RegisterExtension(E_GoprotoEnumPrefixAll) proto.RegisterExtension(E_GoprotoEnumPrefixAll)
@ -636,6 +673,8 @@ func init() {
proto.RegisterExtension(E_GogoprotoImport) proto.RegisterExtension(E_GogoprotoImport)
proto.RegisterExtension(E_ProtosizerAll) proto.RegisterExtension(E_ProtosizerAll)
proto.RegisterExtension(E_CompareAll) proto.RegisterExtension(E_CompareAll)
proto.RegisterExtension(E_TypedeclAll)
proto.RegisterExtension(E_EnumdeclAll)
proto.RegisterExtension(E_GoprotoGetters) proto.RegisterExtension(E_GoprotoGetters)
proto.RegisterExtension(E_GoprotoStringer) proto.RegisterExtension(E_GoprotoStringer)
proto.RegisterExtension(E_VerboseEqual) proto.RegisterExtension(E_VerboseEqual)
@ -658,6 +697,7 @@ func init() {
proto.RegisterExtension(E_GoprotoUnrecognized) proto.RegisterExtension(E_GoprotoUnrecognized)
proto.RegisterExtension(E_Protosizer) proto.RegisterExtension(E_Protosizer)
proto.RegisterExtension(E_Compare) proto.RegisterExtension(E_Compare)
proto.RegisterExtension(E_Typedecl)
proto.RegisterExtension(E_Nullable) proto.RegisterExtension(E_Nullable)
proto.RegisterExtension(E_Embed) proto.RegisterExtension(E_Embed)
proto.RegisterExtension(E_Customtype) proto.RegisterExtension(E_Customtype)
@ -674,76 +714,79 @@ func init() {
func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) } func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) }
var fileDescriptorGogo = []byte{ var fileDescriptorGogo = []byte{
// 1129 bytes of a gzipped FileDescriptorProto // 1178 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xc9, 0x6f, 0x1c, 0x45, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x98, 0x4b, 0x6f, 0x1c, 0x45,
0x14, 0x87, 0x85, 0x70, 0x64, 0xcf, 0xf3, 0x86, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0x72, 0xe3, 0xe4, 0x10, 0x80, 0x85, 0x48, 0x64, 0x6f, 0xf9, 0x85, 0xd7, 0xc6, 0x84, 0x08, 0x44, 0x72, 0xe3, 0xe4,
0x9c, 0x22, 0x94, 0xb2, 0x22, 0xcb, 0xb1, 0x9c, 0x51, 0x10, 0x86, 0x91, 0x89, 0x03, 0x88, 0xc3, 0x9c, 0x22, 0x94, 0xb6, 0x22, 0xcb, 0xb1, 0x1c, 0x2b, 0x08, 0x83, 0x65, 0xe2, 0x00, 0xe2, 0xb0,
0xa8, 0x67, 0xa6, 0xdc, 0x69, 0xe8, 0xee, 0x6a, 0xba, 0xaa, 0xa3, 0x38, 0x37, 0x14, 0x16, 0x21, 0x1a, 0xef, 0xb6, 0x27, 0x0b, 0x33, 0xd3, 0xc3, 0x74, 0x4f, 0x14, 0xe7, 0x86, 0xc2, 0x43, 0x08,
0xc4, 0x8e, 0x04, 0x09, 0x09, 0xcb, 0x81, 0x7d, 0x0d, 0xcb, 0x9d, 0x0b, 0x70, 0xe6, 0x7f, 0xe0, 0xf1, 0x46, 0x82, 0x84, 0x24, 0xc0, 0x81, 0xf7, 0x33, 0x3c, 0xaf, 0x5c, 0x78, 0x5c, 0xf9, 0x0f,
0x02, 0x84, 0xdd, 0x37, 0x5f, 0x50, 0x75, 0xbf, 0xd7, 0x53, 0xdd, 0x1e, 0xa9, 0x6a, 0x6e, 0xe3, 0x5c, 0x00, 0xf3, 0xf6, 0xcd, 0x17, 0x54, 0x33, 0x55, 0xb3, 0xbd, 0xeb, 0x95, 0xba, 0xf7, 0x36,
0x71, 0x7d, 0xdf, 0x54, 0xbf, 0x37, 0xf5, 0x7e, 0x53, 0x00, 0xbe, 0xf0, 0xc5, 0x52, 0x92, 0x0a, 0xbb, 0xee, 0xef, 0xdb, 0x9a, 0xaa, 0xe9, 0xaa, 0x69, 0x03, 0x84, 0x2a, 0x54, 0xb3, 0x69, 0xa6,
0x25, 0x9a, 0x0d, 0xfd, 0x3a, 0x7f, 0x79, 0xe8, 0xb0, 0x2f, 0x84, 0x1f, 0xf2, 0xa3, 0xf9, 0x5f, 0x8c, 0xaa, 0xd7, 0xf0, 0xba, 0xb8, 0x3c, 0x78, 0x28, 0x54, 0x2a, 0x8c, 0xe4, 0x91, 0xe2, 0xd3,
0xdd, 0x6c, 0xfb, 0x68, 0x9f, 0xcb, 0x5e, 0x1a, 0x24, 0x4a, 0xa4, 0xc5, 0x62, 0x76, 0x3f, 0xcc, 0x46, 0xbe, 0x79, 0xa4, 0x25, 0x75, 0x33, 0x6b, 0xa7, 0x46, 0x65, 0xe5, 0x62, 0x71, 0x2f, 0x4c,
0xe3, 0xe2, 0x0e, 0x8f, 0xb3, 0xa8, 0x93, 0xa4, 0x7c, 0x3b, 0xb8, 0xd0, 0xbc, 0x63, 0xa9, 0x20, 0xd1, 0xe2, 0x86, 0x4c, 0xf2, 0xb8, 0x91, 0x66, 0x72, 0xb3, 0x7d, 0xbe, 0x7e, 0xdb, 0x6c, 0x49,
0x97, 0x88, 0x5c, 0x5a, 0x8f, 0xb3, 0xe8, 0x81, 0x44, 0x05, 0x22, 0x96, 0x07, 0xaf, 0xff, 0x72, 0xce, 0x32, 0x39, 0xbb, 0x94, 0xe4, 0xf1, 0x7d, 0xa9, 0x69, 0xab, 0x44, 0x1f, 0xb8, 0xfe, 0xcb,
0xf3, 0xe1, 0x9b, 0xee, 0x9e, 0xd8, 0x9c, 0x43, 0x54, 0xff, 0xaf, 0x9d, 0x83, 0x6c, 0x13, 0x6e, 0x8d, 0x87, 0x6e, 0xb8, 0x73, 0x78, 0x6d, 0x92, 0x50, 0xfc, 0xdb, 0x6a, 0x01, 0x8a, 0x35, 0xb8,
0xad, 0xf8, 0xa4, 0x4a, 0x83, 0xd8, 0xe7, 0xa9, 0xc5, 0xf8, 0x03, 0x1a, 0xe7, 0x0d, 0xe3, 0x83, 0xb9, 0xcb, 0xa7, 0x4d, 0xd6, 0x4e, 0x42, 0x99, 0x39, 0x8c, 0xdf, 0x93, 0x71, 0xca, 0x32, 0xde,
0x88, 0xb2, 0x35, 0x98, 0x1e, 0xc5, 0xf5, 0x23, 0xba, 0xa6, 0xb8, 0x29, 0x69, 0xc1, 0x6c, 0x2e, 0x4f, 0xa8, 0x58, 0x84, 0xb1, 0x41, 0x5c, 0x3f, 0x90, 0x6b, 0x54, 0xda, 0x92, 0x65, 0x98, 0x28,
0xe9, 0x65, 0x52, 0x89, 0x28, 0xf6, 0x22, 0x6e, 0xd1, 0xfc, 0x94, 0x6b, 0x1a, 0x9b, 0x33, 0x1a, 0x24, 0xcd, 0x5c, 0x1b, 0x15, 0x27, 0x41, 0x2c, 0x1d, 0x9a, 0x1f, 0x0b, 0x4d, 0x6d, 0x6d, 0x1c,
0x5b, 0x2b, 0x29, 0x76, 0x16, 0x16, 0xf4, 0x3b, 0xe7, 0xbd, 0x30, 0xe3, 0xa6, 0xed, 0xc8, 0x50, 0xb1, 0xc5, 0x8a, 0x12, 0x02, 0x86, 0xf1, 0x9b, 0x96, 0x6c, 0x46, 0x0e, 0xc3, 0x4f, 0x14, 0x48,
0xdb, 0x59, 0xbd, 0x8c, 0x94, 0x3f, 0x5f, 0x1a, 0xcb, 0x95, 0xf3, 0xa5, 0xc0, 0xf0, 0x1a, 0x9d, 0xb5, 0x5e, 0x9c, 0x81, 0x69, 0xbc, 0x3e, 0x17, 0x44, 0xb9, 0xb4, 0x23, 0x39, 0xdc, 0xd7, 0x73,
0xf0, 0xb9, 0x52, 0x3c, 0x95, 0x1d, 0x2f, 0x0c, 0x87, 0x6c, 0xf2, 0x54, 0x10, 0x96, 0xc6, 0xcb, 0x06, 0x97, 0xb1, 0xec, 0xe7, 0x8b, 0xfb, 0x8a, 0x70, 0xa6, 0x2a, 0x81, 0x15, 0x93, 0x55, 0xc5,
0x37, 0xaa, 0x9d, 0x68, 0x15, 0xe4, 0x6a, 0x18, 0xb2, 0x2d, 0xb8, 0x6d, 0x48, 0x67, 0x1d, 0x9c, 0x50, 0x1a, 0x23, 0x33, 0xdd, 0x08, 0xa2, 0x7e, 0xe1, 0x9d, 0x6c, 0x47, 0x95, 0xf1, 0xd2, 0x76,
0x57, 0xd0, 0xb9, 0xb0, 0xaf, 0xbb, 0x5a, 0xdb, 0x06, 0x7a, 0xbf, 0xec, 0x87, 0x83, 0xf3, 0x2d, 0x77, 0x15, 0x97, 0x4b, 0x72, 0x21, 0x8a, 0xc4, 0x3a, 0xdc, 0xd2, 0xe7, 0xa9, 0xf0, 0x70, 0x5e,
0x74, 0x36, 0x91, 0xa5, 0xb6, 0x68, 0xe3, 0xbd, 0x30, 0x77, 0x9e, 0xa7, 0x5d, 0x21, 0x79, 0x87, 0x26, 0xe7, 0xf4, 0x9e, 0x27, 0x03, 0xb5, 0xab, 0xc0, 0xdf, 0x57, 0xb5, 0xf4, 0x70, 0xbe, 0x41,
0x3f, 0x91, 0x79, 0xa1, 0x83, 0xee, 0x2a, 0xea, 0x66, 0x11, 0x5c, 0xd7, 0x9c, 0x76, 0x1d, 0x87, 0xce, 0x3a, 0xb1, 0x5c, 0x52, 0x34, 0xde, 0x0d, 0x93, 0xe7, 0x64, 0xb6, 0xa1, 0xb4, 0x6c, 0xc8,
0x89, 0x6d, 0xaf, 0xc7, 0x1d, 0x14, 0xd7, 0x50, 0x31, 0xae, 0xd7, 0x6b, 0x74, 0x15, 0xa6, 0x7c, 0xc7, 0xf2, 0x20, 0xf2, 0xd0, 0x5d, 0x21, 0xdd, 0x04, 0x81, 0x4b, 0xc8, 0xa1, 0xeb, 0x18, 0x0c,
0x51, 0x3c, 0x92, 0x03, 0xfe, 0x36, 0xe2, 0x93, 0xc4, 0xa0, 0x22, 0x11, 0x49, 0x16, 0x7a, 0xca, 0x6f, 0x06, 0x4d, 0xe9, 0xa1, 0xb8, 0x4a, 0x8a, 0x21, 0x5c, 0x8f, 0xe8, 0x02, 0x8c, 0x86, 0xaa,
0x65, 0x07, 0xef, 0x90, 0x82, 0x18, 0x54, 0x8c, 0x50, 0xd6, 0x77, 0x49, 0x21, 0x8d, 0x7a, 0xae, 0xbc, 0x25, 0x0f, 0xfc, 0x1a, 0xe1, 0x23, 0xcc, 0x90, 0x22, 0x55, 0x69, 0x1e, 0x05, 0xc6, 0x27,
0xc0, 0xa4, 0x88, 0xc3, 0x1d, 0x11, 0xbb, 0x6c, 0xe2, 0x3d, 0x34, 0x00, 0x22, 0x5a, 0xb0, 0x0c, 0x82, 0x37, 0x59, 0xc1, 0x0c, 0x29, 0x06, 0x48, 0xeb, 0x5b, 0xac, 0xd0, 0x56, 0x3e, 0xe7, 0x61,
0x0d, 0xd7, 0x46, 0xbc, 0x8f, 0xf8, 0x04, 0xa7, 0x0e, 0xb4, 0x60, 0x96, 0x86, 0x4c, 0x20, 0x62, 0x44, 0x25, 0xd1, 0x96, 0x4a, 0x7c, 0x82, 0x78, 0x9b, 0x0c, 0x40, 0x08, 0x0a, 0xe6, 0xa0, 0xe6,
0x07, 0xc5, 0x07, 0xa8, 0x98, 0x31, 0x30, 0x7c, 0x0c, 0xc5, 0xa5, 0xf2, 0xb9, 0x8b, 0xe4, 0x43, 0x5b, 0x88, 0x77, 0xb6, 0x79, 0x7b, 0x70, 0x05, 0x96, 0x61, 0x82, 0x1b, 0x54, 0x5b, 0x25, 0x1e,
0x7a, 0x0c, 0x44, 0xb0, 0x94, 0x5d, 0x1e, 0xf7, 0xce, 0xb9, 0x19, 0x3e, 0xa2, 0x52, 0x12, 0xa3, 0x8a, 0x77, 0x49, 0x31, 0x6e, 0x61, 0x74, 0x1b, 0x46, 0x6a, 0x13, 0x4a, 0x1f, 0xc9, 0x7b, 0x7c,
0x15, 0x6b, 0x30, 0x1d, 0x79, 0xa9, 0x3c, 0xe7, 0x85, 0x4e, 0xed, 0xf8, 0x18, 0x1d, 0x53, 0x25, 0x1b, 0x84, 0x50, 0x2a, 0x37, 0x64, 0xd2, 0x3c, 0xeb, 0x67, 0x78, 0x9f, 0x53, 0xc9, 0x0c, 0x2a,
0x84, 0x15, 0xc9, 0xe2, 0x51, 0x34, 0x9f, 0x50, 0x45, 0x0c, 0x0c, 0x8f, 0x9e, 0x54, 0x5e, 0x37, 0x16, 0x61, 0x2c, 0x0e, 0x32, 0x7d, 0x36, 0x88, 0xbc, 0xca, 0xf1, 0x01, 0x39, 0x46, 0x2b, 0x88,
0xe4, 0x9d, 0x51, 0x6c, 0x9f, 0xd2, 0xd1, 0x2b, 0xd8, 0x0d, 0xd3, 0xb8, 0x0c, 0x0d, 0x19, 0x5c, 0x32, 0x92, 0x27, 0x83, 0x68, 0x3e, 0xe4, 0x8c, 0x58, 0x18, 0x6d, 0x3d, 0x6d, 0x82, 0x8d, 0x48,
0x74, 0xd2, 0x7c, 0x46, 0x9d, 0xce, 0x01, 0x0d, 0x3f, 0x02, 0xb7, 0x0f, 0x1d, 0xf5, 0x0e, 0xb2, 0x36, 0x06, 0xb1, 0x7d, 0xc4, 0x5b, 0xaf, 0x64, 0x57, 0x6c, 0xe3, 0x1c, 0xd4, 0x74, 0xfb, 0x82,
0xcf, 0x51, 0xb6, 0x38, 0x64, 0xdc, 0xe3, 0x48, 0x18, 0x55, 0xf9, 0x05, 0x8d, 0x04, 0x5e, 0x73, 0x97, 0xe6, 0x63, 0xae, 0x74, 0x01, 0x20, 0xfc, 0x10, 0xdc, 0xda, 0x77, 0x4c, 0x78, 0xc8, 0x3e,
0xb5, 0x61, 0x21, 0x8b, 0xa5, 0xb7, 0x3d, 0x5a, 0xd5, 0xbe, 0xa4, 0xaa, 0x15, 0x6c, 0xa5, 0x6a, 0x21, 0xd9, 0x4c, 0x9f, 0x51, 0x41, 0x2d, 0x61, 0x50, 0xe5, 0xa7, 0xdc, 0x12, 0x64, 0x8f, 0x6b,
0x67, 0x60, 0x11, 0x8d, 0xa3, 0xf5, 0xf5, 0x2b, 0x1a, 0xac, 0x05, 0xbd, 0x55, 0xed, 0xee, 0xa3, 0x15, 0xa6, 0xf3, 0x44, 0x07, 0x9b, 0x83, 0x65, 0xed, 0x33, 0xce, 0x5a, 0xc9, 0x76, 0x65, 0xed,
0x70, 0xa8, 0x2c, 0xe7, 0x05, 0xc5, 0x63, 0xa9, 0x99, 0x4e, 0xe4, 0x25, 0x0e, 0xe6, 0xeb, 0x68, 0x34, 0xcc, 0x90, 0x71, 0xb0, 0xba, 0x7e, 0xce, 0x8d, 0xb5, 0xa4, 0xd7, 0xbb, 0xab, 0xfb, 0x30,
0xa6, 0x89, 0xbf, 0x5e, 0x0a, 0x36, 0xbc, 0x44, 0xcb, 0x1f, 0x86, 0x83, 0x24, 0xcf, 0xe2, 0x94, 0x1c, 0xac, 0xd2, 0x79, 0xde, 0xc8, 0x44, 0x23, 0xd3, 0x88, 0x83, 0xd4, 0xc3, 0x7c, 0x9d, 0xcc,
0xf7, 0x84, 0x1f, 0x07, 0x17, 0x79, 0xdf, 0x41, 0xfd, 0x75, 0xad, 0x55, 0x5b, 0x06, 0xae, 0xcd, 0xdc, 0xf1, 0x97, 0x2a, 0xc1, 0x4a, 0x90, 0xa2, 0xfc, 0x41, 0x38, 0xc0, 0xf2, 0x3c, 0xc9, 0x64,
0xa7, 0xe1, 0x96, 0xf2, 0xf7, 0x46, 0x27, 0x88, 0x12, 0x91, 0x2a, 0x8b, 0xf1, 0x1b, 0xea, 0x54, 0x53, 0x85, 0x49, 0xfb, 0x82, 0x6c, 0x79, 0xa8, 0xbf, 0xe8, 0x29, 0xd5, 0xba, 0x85, 0xa3, 0xf9,
0xc9, 0x9d, 0xce, 0x31, 0xb6, 0x0e, 0x33, 0xf9, 0x9f, 0xae, 0x5f, 0xc9, 0x6f, 0x51, 0x34, 0x3d, 0x14, 0xdc, 0x54, 0xbd, 0xab, 0x34, 0xda, 0x71, 0xaa, 0x32, 0xe3, 0x30, 0x7e, 0xc9, 0x95, 0xaa,
0xa0, 0x70, 0x70, 0xf4, 0x44, 0x94, 0x78, 0xa9, 0xcb, 0xfc, 0xfb, 0x8e, 0x06, 0x07, 0x22, 0xc5, 0xb8, 0x53, 0x05, 0x26, 0x96, 0x60, 0xbc, 0xf8, 0xe8, 0xfb, 0x48, 0x7e, 0x45, 0xa2, 0xb1, 0x0e,
0xb7, 0x6f, 0xb6, 0x96, 0xc4, 0xcd, 0xbb, 0xf6, 0x49, 0x36, 0xb8, 0x94, 0x9e, 0x5f, 0x7a, 0x9e, 0x45, 0x8d, 0xa3, 0xa9, 0xe2, 0x34, 0xc8, 0x7c, 0xfa, 0xdf, 0xd7, 0xdc, 0x38, 0x08, 0xa1, 0xc6,
0xdc, 0xc5, 0x33, 0x5b, 0x0d, 0x62, 0x76, 0x9f, 0x2e, 0x4f, 0x35, 0x2e, 0xed, 0xb2, 0x4b, 0xbb, 0x61, 0xb6, 0x52, 0x89, 0xd3, 0xde, 0xc3, 0xf0, 0x0d, 0x37, 0x0e, 0x66, 0x48, 0xc1, 0x2f, 0x0c,
0x65, 0x85, 0x2a, 0x69, 0xc9, 0x4e, 0xc1, 0x74, 0x25, 0x2a, 0xed, 0xaa, 0xa7, 0x50, 0x35, 0x65, 0x1e, 0x8a, 0x6f, 0x59, 0xc1, 0x4c, 0xb9, 0x07, 0x26, 0x7a, 0xde, 0x07, 0xea, 0x77, 0xec, 0xb1,
0x26, 0x25, 0x3b, 0x06, 0x63, 0x3a, 0xf6, 0xec, 0xf8, 0xd3, 0x88, 0xe7, 0xcb, 0xd9, 0x09, 0x98, 0xac, 0x48, 0xad, 0x83, 0xb0, 0x12, 0x3d, 0xbe, 0x43, 0x9d, 0xa3, 0xfb, 0x75, 0x40, 0xdc, 0x83,
0xa0, 0xb8, 0xb3, 0xa3, 0xcf, 0x20, 0x5a, 0x22, 0x1a, 0xa7, 0xa8, 0xb3, 0xe3, 0xcf, 0x12, 0x4e, 0x45, 0xea, 0x1e, 0xda, 0x6e, 0xd9, 0xc5, 0x9d, 0xaa, 0x4e, 0x5d, 0x33, 0x5b, 0x9c, 0x84, 0xb1,
0x88, 0xc6, 0xdd, 0x4b, 0xf8, 0xfd, 0xf3, 0x63, 0x38, 0xae, 0xa8, 0x76, 0xcb, 0x30, 0x8e, 0x19, 0xae, 0x81, 0xed, 0x56, 0x3d, 0x41, 0xaa, 0x51, 0x7b, 0x5e, 0x8b, 0xa3, 0xb0, 0x0f, 0x87, 0xaf,
0x67, 0xa7, 0x9f, 0xc3, 0x0f, 0x27, 0x82, 0xdd, 0x03, 0x07, 0x1c, 0x0b, 0xfe, 0x02, 0xa2, 0xc5, 0x1b, 0x7f, 0x92, 0xf0, 0x62, 0xb9, 0x38, 0x0e, 0xc3, 0x3c, 0x74, 0xdd, 0xe8, 0x53, 0x84, 0x56,
0x7a, 0xb6, 0x06, 0x93, 0x46, 0xae, 0xd9, 0xf1, 0x17, 0x11, 0x37, 0x29, 0xbd, 0x75, 0xcc, 0x35, 0x08, 0xe2, 0x3c, 0x70, 0xdd, 0xf8, 0xd3, 0x8c, 0x33, 0x82, 0xb8, 0x7f, 0x0a, 0xbf, 0x7b, 0x76,
0xbb, 0xe0, 0x25, 0xda, 0x3a, 0x12, 0xba, 0x6c, 0x14, 0x69, 0x76, 0xfa, 0x65, 0xaa, 0x3a, 0x21, 0x1f, 0x35, 0x4d, 0xce, 0xdd, 0x1c, 0x0c, 0xd1, 0xa4, 0x75, 0xd3, 0xcf, 0xd0, 0x8f, 0x33, 0x21,
0x6c, 0x05, 0x1a, 0xe5, 0x98, 0xb2, 0xf3, 0xaf, 0x20, 0x3f, 0x60, 0x74, 0x05, 0x8c, 0x31, 0x69, 0xee, 0x82, 0xfd, 0x9e, 0x09, 0x7f, 0x8e, 0xd0, 0x72, 0xbd, 0x58, 0x84, 0x11, 0x6b, 0xba, 0xba,
0x57, 0xbc, 0x4a, 0x15, 0x30, 0x28, 0x7d, 0x8c, 0xea, 0xd1, 0x67, 0x37, 0xbd, 0x46, 0xc7, 0xa8, 0xf1, 0xe7, 0x09, 0xb7, 0x29, 0x0c, 0x9d, 0xa6, 0xab, 0x5b, 0xf0, 0x02, 0x87, 0x4e, 0x04, 0xa6,
0x96, 0x7c, 0xba, 0x9b, 0xf9, 0xb4, 0xb0, 0x2b, 0x5e, 0xa7, 0x6e, 0xe6, 0xeb, 0xf5, 0x36, 0xea, 0x8d, 0x07, 0xab, 0x9b, 0x7e, 0x91, 0xb3, 0xce, 0x88, 0x98, 0x87, 0x5a, 0xd5, 0x2c, 0xdd, 0xfc,
0x59, 0x62, 0x77, 0xbc, 0x41, 0xdb, 0xa8, 0x45, 0x09, 0x6b, 0x43, 0x73, 0x7f, 0x8e, 0xd8, 0x7d, 0x4b, 0xc4, 0x77, 0x18, 0xcc, 0x80, 0xd5, 0xac, 0xdd, 0x8a, 0x97, 0x39, 0x03, 0x16, 0x85, 0xdb,
0x6f, 0xa2, 0x6f, 0x6e, 0x5f, 0x8c, 0xb0, 0x87, 0x60, 0x71, 0x78, 0x86, 0xd8, 0xad, 0x97, 0x77, 0xa8, 0x77, 0x00, 0xbb, 0x4d, 0xaf, 0xf0, 0x36, 0xea, 0x99, 0xbf, 0x58, 0xcd, 0xa2, 0x67, 0xb9,
0x6b, 0xbf, 0xfa, 0xcd, 0x08, 0x61, 0x67, 0x06, 0xbf, 0xfa, 0xcd, 0xfc, 0xb0, 0x6b, 0xaf, 0xec, 0x15, 0xaf, 0x72, 0x35, 0x8b, 0xf5, 0x18, 0x46, 0xef, 0x44, 0x73, 0x3b, 0x5e, 0xe3, 0x30, 0x7a,
0x56, 0x2f, 0x76, 0x66, 0x7c, 0xb0, 0x55, 0x80, 0xc1, 0xe8, 0xb6, 0xbb, 0xae, 0xa2, 0xcb, 0x80, 0x06, 0x9a, 0x58, 0x85, 0xfa, 0xde, 0x69, 0xe6, 0xf6, 0xbd, 0x4e, 0xbe, 0xc9, 0x3d, 0xc3, 0x4c,
0xf4, 0xd1, 0xc0, 0xc9, 0x6d, 0xe7, 0xaf, 0xd1, 0xd1, 0x40, 0x82, 0x2d, 0xc3, 0x44, 0x9c, 0x85, 0x3c, 0x00, 0x33, 0xfd, 0x27, 0x99, 0xdb, 0x7a, 0x69, 0xa7, 0xe7, 0xec, 0x61, 0x0f, 0x32, 0x71,
0xa1, 0xfe, 0x72, 0x34, 0xef, 0x1c, 0x12, 0x13, 0x3c, 0xec, 0x13, 0xfb, 0xeb, 0x1e, 0x1e, 0x0c, 0xba, 0x73, 0xf6, 0xb0, 0xa7, 0x98, 0x5b, 0x7b, 0x79, 0xa7, 0xfb, 0x68, 0x6a, 0x0f, 0x31, 0xb1,
0x02, 0xd8, 0x31, 0x38, 0xc0, 0xa3, 0x2e, 0xef, 0xdb, 0xc8, 0xdf, 0xf6, 0x68, 0x20, 0xe8, 0xd5, 0x00, 0xd0, 0x19, 0x20, 0x6e, 0xd7, 0x15, 0x72, 0x59, 0x10, 0x6e, 0x0d, 0x9a, 0x1f, 0x6e, 0xfe,
0x6c, 0x05, 0xa0, 0xb8, 0x34, 0xaa, 0x9d, 0xc4, 0xfa, 0xa9, 0xbf, 0xef, 0x15, 0x77, 0x50, 0x03, 0x2a, 0x6f, 0x0d, 0x22, 0x70, 0x6b, 0xf0, 0xe8, 0x70, 0xd3, 0xd7, 0x78, 0x6b, 0x30, 0x22, 0xe6,
0x19, 0x08, 0xf2, 0x5b, 0xa7, 0x45, 0x70, 0xa3, 0x2a, 0xc8, 0x2f, 0x9a, 0xc7, 0x61, 0xfc, 0x31, 0x60, 0x38, 0xc9, 0xa3, 0x08, 0x9f, 0xad, 0xfa, 0xed, 0x7d, 0xc6, 0x8c, 0x8c, 0x5a, 0x0c, 0xff,
0x29, 0x62, 0xe5, 0xf9, 0x36, 0xfa, 0x0f, 0xa4, 0x69, 0xbd, 0x2e, 0x58, 0x24, 0x52, 0xae, 0x3c, 0xba, 0x4b, 0x30, 0x03, 0xe2, 0x28, 0xec, 0x97, 0xf1, 0x86, 0x6c, 0xb9, 0xc8, 0xdf, 0x76, 0xb9,
0x5f, 0xda, 0xd8, 0x3f, 0x91, 0x2d, 0x01, 0x0d, 0xf7, 0x3c, 0xa9, 0x5c, 0x9e, 0xfb, 0x2f, 0x82, 0x9f, 0xe0, 0x6a, 0x31, 0x0f, 0x50, 0x9e, 0x7c, 0x31, 0x0a, 0x17, 0xfb, 0xfb, 0x6e, 0x79, 0x08,
0x09, 0xd0, 0x9b, 0xd6, 0xaf, 0x1f, 0xe7, 0x3b, 0x36, 0xf6, 0x6f, 0xda, 0x34, 0xae, 0x67, 0x27, 0xb7, 0x90, 0x8e, 0xa0, 0x38, 0x3a, 0x3b, 0x04, 0xdb, 0xdd, 0x82, 0xe2, 0xb4, 0x7c, 0x0c, 0x86,
0xa0, 0xa1, 0x5f, 0xe6, 0xf7, 0x6d, 0x1b, 0xfc, 0x0f, 0xc2, 0x03, 0x42, 0x7f, 0xb2, 0x54, 0x7d, 0x1e, 0xd1, 0x2a, 0x31, 0x41, 0xe8, 0xa2, 0xff, 0x20, 0x9a, 0xd7, 0x63, 0xc2, 0x62, 0x95, 0x49,
0x15, 0xd8, 0x8b, 0xfd, 0x2f, 0x76, 0x9a, 0xd6, 0xb3, 0x55, 0x98, 0x94, 0xaa, 0xdf, 0xcf, 0x52, 0x13, 0x84, 0xda, 0xc5, 0xfe, 0x49, 0x6c, 0x05, 0x20, 0xdc, 0x0c, 0xb4, 0xf1, 0xb9, 0xef, 0xbf,
0x2f, 0x1f, 0xfe, 0x16, 0xfc, 0xbf, 0xbd, 0xf2, 0x32, 0x57, 0x32, 0x27, 0x8f, 0xc0, 0x7c, 0x4f, 0x18, 0x66, 0x00, 0x83, 0xc6, 0xeb, 0x47, 0xe5, 0x96, 0x8b, 0xfd, 0x9b, 0x83, 0xa6, 0xf5, 0xe2,
0x44, 0x75, 0xf0, 0x24, 0xb4, 0x44, 0x4b, 0xb4, 0xf3, 0x63, 0xf0, 0x7f, 0x00, 0x00, 0x00, 0xff, 0x38, 0xd4, 0xf0, 0xb2, 0xf8, 0xa7, 0x81, 0x0b, 0xfe, 0x87, 0xe0, 0x0e, 0x81, 0xbf, 0xac, 0x4d,
0xff, 0x3f, 0x9b, 0x2b, 0x54, 0xfc, 0x11, 0x00, 0x00, 0xcb, 0xb4, 0xdd, 0xc9, 0xfe, 0x97, 0x2a, 0xcd, 0xeb, 0xc5, 0x02, 0x8c, 0x68, 0xd3, 0x6a, 0xe5,
0x59, 0x50, 0xcc, 0x0e, 0x07, 0xfe, 0xdf, 0x6e, 0x75, 0x22, 0xad, 0x98, 0x13, 0x87, 0x61, 0xaa,
0xa9, 0xe2, 0x5e, 0xf0, 0x04, 0x2c, 0xab, 0x65, 0xb5, 0x5a, 0xec, 0xa2, 0xff, 0x03, 0x00, 0x00,
0xff, 0xff, 0x06, 0x21, 0xec, 0x88, 0xfd, 0x12, 0x00, 0x00,
} }

View file

@ -39,6 +39,7 @@ extend google.protobuf.EnumOptions {
optional bool goproto_enum_stringer = 62021; optional bool goproto_enum_stringer = 62021;
optional bool enum_stringer = 62022; optional bool enum_stringer = 62022;
optional string enum_customname = 62023; optional string enum_customname = 62023;
optional bool enumdecl = 62024;
} }
extend google.protobuf.EnumValueOptions { extend google.protobuf.EnumValueOptions {
@ -77,6 +78,8 @@ extend google.protobuf.FileOptions {
optional bool gogoproto_import = 63027; optional bool gogoproto_import = 63027;
optional bool protosizer_all = 63028; optional bool protosizer_all = 63028;
optional bool compare_all = 63029; optional bool compare_all = 63029;
optional bool typedecl_all = 63030;
optional bool enumdecl_all = 63031;
} }
extend google.protobuf.MessageOptions { extend google.protobuf.MessageOptions {
@ -107,6 +110,8 @@ extend google.protobuf.MessageOptions {
optional bool protosizer = 64028; optional bool protosizer = 64028;
optional bool compare = 64029; optional bool compare = 64029;
optional bool typedecl = 64030;
} }
extend google.protobuf.FieldOptions { extend google.protobuf.FieldOptions {

View file

@ -90,6 +90,14 @@ func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool {
return false return false
} }
func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true))
}
func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true))
}
func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
if field == nil { if field == nil {
return "" return ""

View file

@ -97,8 +97,10 @@ not print their values, while the generated GoString method will always print al
package gostring package gostring
import ( import (
"fmt"
"github.com/gogo/protobuf/gogoproto" "github.com/gogo/protobuf/gogoproto"
"github.com/gogo/protobuf/protoc-gen-gogo/generator" "github.com/gogo/protobuf/protoc-gen-gogo/generator"
"os"
"strconv" "strconv"
"strings" "strings"
) )
@ -145,6 +147,7 @@ func (p *gostring) Generate(file *generator.FileDescriptor) {
reflectPkg := p.NewImport("reflect") reflectPkg := p.NewImport("reflect")
sortKeysPkg := p.NewImport("github.com/gogo/protobuf/sortkeys") sortKeysPkg := p.NewImport("github.com/gogo/protobuf/sortkeys")
extensionToGoStringUsed := false
for _, message := range file.Messages() { for _, message := range file.Messages() {
if !p.overwrite && !gogoproto.HasGoString(file.FileDescriptorProto, message.DescriptorProto) { if !p.overwrite && !gogoproto.HasGoString(file.FileDescriptorProto, message.DescriptorProto) {
continue continue
@ -221,7 +224,7 @@ func (p *gostring) Generate(file *generator.FileDescriptor) {
p.P(`s = append(s, "`, fieldname, `: " + `, mapName, `+ ",\n")`) p.P(`s = append(s, "`, fieldname, `: " + `, mapName, `+ ",\n")`)
p.Out() p.Out()
p.P(`}`) p.P(`}`)
} else if field.IsMessage() || p.IsGroup(field) { } else if (field.IsMessage() && !gogoproto.IsCustomType(field) && !gogoproto.IsStdTime(field) && !gogoproto.IsStdDuration(field)) || p.IsGroup(field) {
if nullable || repeated { if nullable || repeated {
p.P(`if this.`, fieldname, ` != nil {`) p.P(`if this.`, fieldname, ` != nil {`)
p.In() p.In()
@ -264,6 +267,7 @@ func (p *gostring) Generate(file *generator.FileDescriptor) {
if message.DescriptorProto.HasExtension() { if message.DescriptorProto.HasExtension() {
if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) {
p.P(`s = append(s, "XXX_InternalExtensions: " + extensionToGoString`, p.localName, `(this) + ",\n")`) p.P(`s = append(s, "XXX_InternalExtensions: " + extensionToGoString`, p.localName, `(this) + ",\n")`)
extensionToGoStringUsed = true
} else { } else {
p.P(`if this.XXX_extensions != nil {`) p.P(`if this.XXX_extensions != nil {`)
p.In() p.In()
@ -338,29 +342,34 @@ func (p *gostring) Generate(file *generator.FileDescriptor) {
p.Out() p.Out()
p.P(`}`) p.P(`}`)
p.P(`func extensionToGoString`, p.localName, `(m `, protoPkg.Use(), `.Message) string {`) if extensionToGoStringUsed {
p.In() if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) {
p.P(`e := `, protoPkg.Use(), `.GetUnsafeExtensionsMap(m)`) fmt.Fprintf(os.Stderr, "The GoString plugin for messages with extensions requires importing gogoprotobuf. Please see file %s", file.GetName())
p.P(`if e == nil { return "nil" }`) os.Exit(1)
p.P(`s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"`) }
p.P(`keys := make([]int, 0, len(e))`) p.P(`func extensionToGoString`, p.localName, `(m `, protoPkg.Use(), `.Message) string {`)
p.P(`for k := range e {`) p.In()
p.In() p.P(`e := `, protoPkg.Use(), `.GetUnsafeExtensionsMap(m)`)
p.P(`keys = append(keys, int(k))`) p.P(`if e == nil { return "nil" }`)
p.Out() p.P(`s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"`)
p.P(`}`) p.P(`keys := make([]int, 0, len(e))`)
p.P(sortPkg.Use(), `.Ints(keys)`) p.P(`for k := range e {`)
p.P(`ss := []string{}`) p.In()
p.P(`for _, k := range keys {`) p.P(`keys = append(keys, int(k))`)
p.In() p.Out()
p.P(`ss = append(ss, `, strconvPkg.Use(), `.Itoa(k) + ": " + e[int32(k)].GoString())`) p.P(`}`)
p.Out() p.P(sortPkg.Use(), `.Ints(keys)`)
p.P(`}`) p.P(`ss := []string{}`)
p.P(`s+=`, stringsPkg.Use(), `.Join(ss, ",") + "})"`) p.P(`for _, k := range keys {`)
p.P(`return s`) p.In()
p.Out() p.P(`ss = append(ss, `, strconvPkg.Use(), `.Itoa(k) + ": " + e[int32(k)].GoString())`)
p.P(`}`) p.Out()
p.P(`}`)
p.P(`s+=`, stringsPkg.Use(), `.Join(ss, ",") + "})"`)
p.P(`return s`)
p.Out()
p.P(`}`)
}
} }
func init() { func init() {

View file

@ -420,7 +420,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi
p.P(`if m.`, fieldname, ` != nil {`) p.P(`if m.`, fieldname, ` != nil {`)
p.In() p.In()
} }
packed := field.IsPacked() || (proto3 && field.IsRepeated() && generator.IsScalar(field)) packed := field.IsPacked() || (proto3 && field.IsPacked3())
wireType := field.WireType() wireType := field.WireType()
fieldNumber := field.GetNumber() fieldNumber := field.GetNumber()
if packed { if packed {

View file

@ -194,7 +194,6 @@ func (p *plugin) getFuncName(goTypName string) string {
case "time.NewPopulatedTime": case "time.NewPopulatedTime":
funcName = p.typesPkg.Use() + ".NewPopulatedStdTime" funcName = p.typesPkg.Use() + ".NewPopulatedStdTime"
case "time.NewPopulatedDuration": case "time.NewPopulatedDuration":
p.typesPkg.Use()
funcName = p.typesPkg.Use() + ".NewPopulatedStdDuration" funcName = p.typesPkg.Use() + ".NewPopulatedStdDuration"
} }
return funcName return funcName
@ -305,6 +304,23 @@ func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generato
} }
p.Out() p.Out()
p.P(`}`) p.P(`}`)
} else if gogoproto.IsCustomType(field) {
funcCall := p.getCustomFuncCall(goTypName)
if field.IsRepeated() {
p.P(p.varGen.Next(), ` := r.Intn(10)`)
p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`)
p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`)
p.In()
p.P(p.varGen.Next(), `:= `, funcCall)
p.P(`this.`, fieldname, `[i] = *`, p.varGen.Current())
p.Out()
p.P(`}`)
} else if gogoproto.IsNullable(field) {
p.P(`this.`, fieldname, ` = `, funcCall)
} else {
p.P(p.varGen.Next(), `:= `, funcCall)
p.P(`this.`, fieldname, ` = *`, p.varGen.Current())
}
} else if field.IsMessage() || p.IsGroup(field) { } else if field.IsMessage() || p.IsGroup(field) {
funcCall := p.getFuncCall(goTypName) funcCall := p.getFuncCall(goTypName)
if field.IsRepeated() { if field.IsRepeated() {
@ -345,23 +361,6 @@ func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generato
p.P(p.varGen.Next(), ` := `, val) p.P(p.varGen.Next(), ` := `, val)
p.P(`this.`, fieldname, ` = &`, p.varGen.Current()) p.P(`this.`, fieldname, ` = &`, p.varGen.Current())
} }
} else if gogoproto.IsCustomType(field) {
funcCall := p.getCustomFuncCall(goTypName)
if field.IsRepeated() {
p.P(p.varGen.Next(), ` := r.Intn(10)`)
p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`)
p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`)
p.In()
p.P(p.varGen.Next(), `:= `, funcCall)
p.P(`this.`, fieldname, `[i] = *`, p.varGen.Current())
p.Out()
p.P(`}`)
} else if gogoproto.IsNullable(field) {
p.P(`this.`, fieldname, ` = `, funcCall)
} else {
p.P(p.varGen.Next(), `:= `, funcCall)
p.P(`this.`, fieldname, ` = *`, p.varGen.Current())
}
} else if field.IsBytes() { } else if field.IsBytes() {
if field.IsRepeated() { if field.IsRepeated() {
p.P(p.varGen.Next(), ` := r.Intn(10)`) p.P(p.varGen.Next(), ` := r.Intn(10)`)

View file

@ -230,7 +230,7 @@ func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, messag
p.P(`if m.`, fieldname, ` != nil {`) p.P(`if m.`, fieldname, ` != nil {`)
p.In() p.In()
} }
packed := field.IsPacked() || (proto3 && field.IsRepeated() && generator.IsScalar(field)) packed := field.IsPacked() || (proto3 && field.IsPacked3())
_, wire := p.GoType(message, field) _, wire := p.GoType(message, field)
wireType := wireToType(wire) wireType := wireToType(wire)
fieldNumber := field.GetNumber() fieldNumber := field.GetNumber()

View file

@ -203,7 +203,7 @@ func (p *stringer) Generate(file *generator.FileDescriptor) {
} else if p.IsMap(field) { } else if p.IsMap(field) {
mapName := `mapStringFor` + fieldname mapName := `mapStringFor` + fieldname
p.P("`", fieldname, ":`", ` + `, mapName, " + `,", "`,") p.P("`", fieldname, ":`", ` + `, mapName, " + `,", "`,")
} else if field.IsMessage() || p.IsGroup(field) { } else if (field.IsMessage() && !gogoproto.IsCustomType(field)) || p.IsGroup(field) {
desc := p.ObjectNamed(field.GetTypeName()) desc := p.ObjectNamed(field.GetTypeName())
msgname := p.TypeName(desc) msgname := p.TypeName(desc)
msgnames := strings.Split(msgname, ".") msgnames := strings.Split(msgname, ".")

View file

@ -806,10 +806,13 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip
} else { } else {
p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, time.Duration(0))`) p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, time.Duration(0))`)
} }
} else if nullable { } else if nullable && !gogoproto.IsCustomType(field) {
p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, &`, msgname, `{})`) p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, &`, msgname, `{})`)
} else { } else {
p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, msgname, `{})`) goType, _ := p.GoType(nil, field)
// remove the slice from the type, i.e. []*T -> *T
goType = goType[2:]
p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, goType, `{})`)
} }
varName := `m.` + fieldname + `[len(m.` + fieldname + `)-1]` varName := `m.` + fieldname + `[len(m.` + fieldname + `)-1]`
buf := `dAtA[iNdEx:postIndex]` buf := `dAtA[iNdEx:postIndex]`
@ -840,7 +843,9 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip
} else if gogoproto.IsStdDuration(field) { } else if gogoproto.IsStdDuration(field) {
p.P(`m.`, fieldname, ` = new(time.Duration)`) p.P(`m.`, fieldname, ` = new(time.Duration)`)
} else { } else {
p.P(`m.`, fieldname, ` = &`, msgname, `{}`) goType, _ := p.GoType(nil, field)
// remove the star from the type
p.P(`m.`, fieldname, ` = &`, goType[1:], `{}`)
} }
p.Out() p.Out()
p.P(`}`) p.P(`}`)
@ -869,6 +874,7 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip
p.P(`}`) p.P(`}`)
} }
p.P(`iNdEx = postIndex`) p.P(`iNdEx = postIndex`)
case descriptor.FieldDescriptorProto_TYPE_BYTES: case descriptor.FieldDescriptorProto_TYPE_BYTES:
p.P(`var byteLen int`) p.P(`var byteLen int`)
p.decodeVarint("byteLen", "int") p.decodeVarint("byteLen", "int")
@ -1164,12 +1170,16 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) {
if field.OneofIndex != nil { if field.OneofIndex != nil {
errFieldname = p.GetOneOfFieldName(message, field) errFieldname = p.GetOneOfFieldName(message, field)
} }
packed := field.IsPacked() || (proto3 && field.IsRepeated() && generator.IsScalar(field)) possiblyPacked := field.IsScalar() && field.IsRepeated()
p.P(`case `, strconv.Itoa(int(field.GetNumber())), `:`) p.P(`case `, strconv.Itoa(int(field.GetNumber())), `:`)
p.In() p.In()
wireType := field.WireType() wireType := field.WireType()
if packed { if possiblyPacked {
p.P(`if wireType == `, strconv.Itoa(proto.WireBytes), `{`) p.P(`if wireType == `, strconv.Itoa(wireType), `{`)
p.In()
p.field(file, message, field, fieldname, false)
p.Out()
p.P(`} else if wireType == `, strconv.Itoa(proto.WireBytes), `{`)
p.In() p.In()
p.P(`var packedLen int`) p.P(`var packedLen int`)
p.decodeVarint("packedLen", "int") p.decodeVarint("packedLen", "int")
@ -1190,10 +1200,6 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) {
p.Out() p.Out()
p.P(`}`) p.P(`}`)
p.Out() p.Out()
p.P(`} else if wireType == `, strconv.Itoa(wireType), `{`)
p.In()
p.field(file, message, field, fieldname, false)
p.Out()
p.P(`} else {`) p.P(`} else {`)
p.In() p.In()
p.P(`return ` + fmtPkg.Use() + `.Errorf("proto: wrong wireType = %d for field ` + errFieldname + `", wireType)`) p.P(`return ` + fmtPkg.Use() + `.Errorf("proto: wrong wireType = %d for field ` + errFieldname + `", wireType)`)

View file

@ -90,3 +90,29 @@ func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) {
} }
return fd, md return fd, md
} }
// Is this field a scalar numeric type?
func (field *FieldDescriptorProto) IsScalar() bool {
if field.Type == nil {
return false
}
switch *field.Type {
case FieldDescriptorProto_TYPE_DOUBLE,
FieldDescriptorProto_TYPE_FLOAT,
FieldDescriptorProto_TYPE_INT64,
FieldDescriptorProto_TYPE_UINT64,
FieldDescriptorProto_TYPE_INT32,
FieldDescriptorProto_TYPE_FIXED64,
FieldDescriptorProto_TYPE_FIXED32,
FieldDescriptorProto_TYPE_BOOL,
FieldDescriptorProto_TYPE_UINT32,
FieldDescriptorProto_TYPE_ENUM,
FieldDescriptorProto_TYPE_SFIXED32,
FieldDescriptorProto_TYPE_SFIXED64,
FieldDescriptorProto_TYPE_SINT32,
FieldDescriptorProto_TYPE_SINT64:
return true
default:
return false
}
}

View file

@ -99,6 +99,17 @@ func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) {
return x return x
} }
func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) {
packed := field.IsPacked3()
wireType := field.WireType()
fieldNumber := field.GetNumber()
if packed {
wireType = 2
}
x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
return x
}
func (field *FieldDescriptorProto) GetKey() []byte { func (field *FieldDescriptorProto) GetKey() []byte {
x := field.GetKeyUint64() x := field.GetKeyUint64()
i := 0 i := 0
@ -111,6 +122,18 @@ func (field *FieldDescriptorProto) GetKey() []byte {
return keybuf return keybuf
} }
func (field *FieldDescriptorProto) GetKey3() []byte {
x := field.GetKey3Uint64()
i := 0
keybuf := make([]byte, 0)
for i = 0; x > 127; i++ {
keybuf = append(keybuf, 0x80|uint8(x&0x7F))
x >>= 7
}
keybuf = append(keybuf, uint8(x))
return keybuf
}
func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto {
msg := desc.GetMessage(packageName, messageName) msg := desc.GetMessage(packageName, messageName)
if msg == nil { if msg == nil {
@ -352,6 +375,16 @@ func (f *FieldDescriptorProto) IsPacked() bool {
return f.Options != nil && f.GetOptions().GetPacked() return f.Options != nil && f.GetOptions().GetPacked()
} }
func (f *FieldDescriptorProto) IsPacked3() bool {
if f.IsRepeated() && f.IsScalar() {
if f.Options == nil || f.GetOptions().Packed == nil {
return true
}
return f.Options != nil && f.GetOptions().GetPacked()
}
return false
}
func (m *DescriptorProto) HasExtension() bool { func (m *DescriptorProto) HasExtension() bool {
return len(m.ExtensionRange) > 0 return len(m.ExtensionRange) > 0
} }

View file

@ -1465,6 +1465,12 @@ func (g *Generator) generateImports() {
g.P("var _ = ", g.Pkg["proto"], ".Marshal") g.P("var _ = ", g.Pkg["proto"], ".Marshal")
g.P("var _ = ", g.Pkg["fmt"], ".Errorf") g.P("var _ = ", g.Pkg["fmt"], ".Errorf")
g.P("var _ = ", g.Pkg["math"], ".Inf") g.P("var _ = ", g.Pkg["math"], ".Inf")
for _, cimport := range g.customImports {
if cimport == "time" {
g.P("var _ = time.Kitchen")
break
}
}
g.P() g.P()
} }
@ -1506,23 +1512,27 @@ func (g *Generator) generateEnum(enum *EnumDescriptor) {
if !gogoproto.EnabledGoEnumPrefix(enum.file, enum.EnumDescriptorProto) { if !gogoproto.EnabledGoEnumPrefix(enum.file, enum.EnumDescriptorProto) {
ccPrefix = "" ccPrefix = ""
} }
g.P("type ", ccTypeName, " int32")
g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()})
g.P("const (")
g.In()
for i, e := range enum.Value {
g.PrintComments(fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i))
name := *e.Name
if gogoproto.IsEnumValueCustomName(e) {
name = gogoproto.GetEnumValueCustomName(e)
}
name = ccPrefix + name
g.P(name, " ", ccTypeName, " = ", e.Number) if gogoproto.HasEnumDecl(enum.file, enum.EnumDescriptorProto) {
g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) g.P("type ", ccTypeName, " int32")
g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()})
g.P("const (")
g.In()
for i, e := range enum.Value {
g.PrintComments(fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i))
name := *e.Name
if gogoproto.IsEnumValueCustomName(e) {
name = gogoproto.GetEnumValueCustomName(e)
}
name = ccPrefix + name
g.P(name, " ", ccTypeName, " = ", e.Number)
g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName})
}
g.Out()
g.P(")")
} }
g.Out()
g.P(")")
g.P("var ", ccTypeName, "_name = map[int32]string{") g.P("var ", ccTypeName, "_name = map[int32]string{")
g.In() g.In()
generated := make(map[int32]bool) // avoid duplicate values generated := make(map[int32]bool) // avoid duplicate values
@ -1769,7 +1779,7 @@ func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptor
func needsStar(field *descriptor.FieldDescriptorProto, proto3 bool, allowOneOf bool) bool { func needsStar(field *descriptor.FieldDescriptorProto, proto3 bool, allowOneOf bool) bool {
if isRepeated(field) && if isRepeated(field) &&
(*field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE) && (*field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE || gogoproto.IsCustomType(field)) &&
(*field.Type != descriptor.FieldDescriptorProto_TYPE_GROUP) { (*field.Type != descriptor.FieldDescriptorProto_TYPE_GROUP) {
return false return false
} }
@ -2046,10 +2056,6 @@ func (g *Generator) generateMessage(message *Descriptor) {
oneofTypeName := make(map[*descriptor.FieldDescriptorProto]string) // without star oneofTypeName := make(map[*descriptor.FieldDescriptorProto]string) // without star
oneofInsertPoints := make(map[int32]int) // oneof_index => offset of g.Buffer oneofInsertPoints := make(map[int32]int) // oneof_index => offset of g.Buffer
g.PrintComments(message.path)
g.P("type ", ccTypeName, " struct {")
g.In()
// allocNames finds a conflict-free variation of the given strings, // allocNames finds a conflict-free variation of the given strings,
// consistently mutating their suffixes. // consistently mutating their suffixes.
// It returns the same number of strings. // It returns the same number of strings.
@ -2071,7 +2077,7 @@ func (g *Generator) generateMessage(message *Descriptor) {
} }
} }
for i, field := range message.Field { for _, field := range message.Field {
// Allocate the getter and the field at the same time so name // Allocate the getter and the field at the same time so name
// collisions create field/method consistent names. // collisions create field/method consistent names.
// TODO: This allocation occurs based on the order of the fields // TODO: This allocation occurs based on the order of the fields
@ -2083,112 +2089,122 @@ func (g *Generator) generateMessage(message *Descriptor) {
} }
ns := allocNames(base, "Get"+base) ns := allocNames(base, "Get"+base)
fieldName, fieldGetterName := ns[0], ns[1] fieldName, fieldGetterName := ns[0], ns[1]
typename, wiretype := g.GoType(message, field)
jsonName := *field.Name
jsonTag := jsonName + ",omitempty"
repeatedNativeType := (!field.IsMessage() && !gogoproto.IsCustomType(field) && field.IsRepeated())
if !gogoproto.IsNullable(field) && !repeatedNativeType {
jsonTag = jsonName
}
gogoJsonTag := gogoproto.GetJsonTag(field)
if gogoJsonTag != nil {
jsonTag = *gogoJsonTag
}
gogoMoreTags := gogoproto.GetMoreTags(field)
moreTags := ""
if gogoMoreTags != nil {
moreTags = " " + *gogoMoreTags
}
tag := fmt.Sprintf("protobuf:%s json:%q%s", g.goTag(message, field, wiretype), jsonTag, moreTags)
fieldNames[field] = fieldName fieldNames[field] = fieldName
fieldGetterNames[field] = fieldGetterName fieldGetterNames[field] = fieldGetterName
if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE && gogoproto.IsEmbed(field) { }
fieldName = ""
}
oneof := field.OneofIndex != nil && message.allowOneof() if gogoproto.HasTypeDecl(message.file, message.DescriptorProto) {
if oneof && oneofFieldName[*field.OneofIndex] == "" { g.PrintComments(message.path)
odp := message.OneofDecl[int(*field.OneofIndex)] g.P("type ", ccTypeName, " struct {")
fname := allocNames(CamelCase(odp.GetName()))[0] g.In()
// This is the first field of a oneof we haven't seen before. for i, field := range message.Field {
// Generate the union field. fieldName := fieldNames[field]
com := g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)) typename, wiretype := g.GoType(message, field)
if com { jsonName := *field.Name
g.P("//") jsonTag := jsonName + ",omitempty"
repeatedNativeType := (!field.IsMessage() && !gogoproto.IsCustomType(field) && field.IsRepeated())
if !gogoproto.IsNullable(field) && !repeatedNativeType {
jsonTag = jsonName
} }
g.P("// Types that are valid to be assigned to ", fname, ":") gogoJsonTag := gogoproto.GetJsonTag(field)
// Generate the rest of this comment later, if gogoJsonTag != nil {
// when we've computed any disambiguation. jsonTag = *gogoJsonTag
oneofInsertPoints[*field.OneofIndex] = g.Buffer.Len() }
gogoMoreTags := gogoproto.GetMoreTags(field)
dname := "is" + ccTypeName + "_" + fname moreTags := ""
oneofFieldName[*field.OneofIndex] = fname if gogoMoreTags != nil {
oneofDisc[*field.OneofIndex] = dname moreTags = " " + *gogoMoreTags
otag := `protobuf_oneof:"` + odp.GetName() + `"` }
g.P(fname, " ", dname, " `", otag, "`") tag := fmt.Sprintf("protobuf:%s json:%q%s", g.goTag(message, field, wiretype), jsonTag, moreTags)
} if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE && gogoproto.IsEmbed(field) {
fieldName = ""
if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
desc := g.ObjectNamed(field.GetTypeName())
if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() {
m := g.GoMapType(d, field)
typename = m.GoType
mapFieldTypes[field] = typename // record for the getter generation
tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", m.KeyTag, m.ValueTag)
} }
}
fieldTypes[field] = typename oneof := field.OneofIndex != nil && message.allowOneof()
if oneof && oneofFieldName[*field.OneofIndex] == "" {
odp := message.OneofDecl[int(*field.OneofIndex)]
fname := allocNames(CamelCase(odp.GetName()))[0]
if oneof { // This is the first field of a oneof we haven't seen before.
tname := ccTypeName + "_" + fieldName // Generate the union field.
// It is possible for this to collide with a message or enum com := g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex))
// nested in this message. Check for collisions. if com {
for { g.P("//")
ok := true }
for _, desc := range message.nested { g.P("// Types that are valid to be assigned to ", fname, ":")
if CamelCaseSlice(desc.TypeName()) == tname { // Generate the rest of this comment later,
ok = false // when we've computed any disambiguation.
break oneofInsertPoints[*field.OneofIndex] = g.Buffer.Len()
dname := "is" + ccTypeName + "_" + fname
oneofFieldName[*field.OneofIndex] = fname
oneofDisc[*field.OneofIndex] = dname
otag := `protobuf_oneof:"` + odp.GetName() + `"`
g.P(fname, " ", dname, " `", otag, "`")
}
if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
desc := g.ObjectNamed(field.GetTypeName())
if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() {
m := g.GoMapType(d, field)
typename = m.GoType
mapFieldTypes[field] = typename // record for the getter generation
tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", m.KeyTag, m.ValueTag)
}
}
fieldTypes[field] = typename
if oneof {
tname := ccTypeName + "_" + fieldName
// It is possible for this to collide with a message or enum
// nested in this message. Check for collisions.
for {
ok := true
for _, desc := range message.nested {
if CamelCaseSlice(desc.TypeName()) == tname {
ok = false
break
}
} }
} for _, enum := range message.enums {
for _, enum := range message.enums { if CamelCaseSlice(enum.TypeName()) == tname {
if CamelCaseSlice(enum.TypeName()) == tname { ok = false
ok = false break
break }
} }
if !ok {
tname += "_"
continue
}
break
} }
if !ok {
tname += "_" oneofTypeName[field] = tname
continue continue
}
break
} }
oneofTypeName[field] = tname g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i))
continue g.P(fieldName, "\t", typename, "\t`", tag, "`")
if !gogoproto.IsStdTime(field) && !gogoproto.IsStdDuration(field) {
g.RecordTypeUse(field.GetTypeName())
}
} }
if len(message.ExtensionRange) > 0 {
g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)) if gogoproto.HasExtensionsMap(g.file.FileDescriptorProto, message.DescriptorProto) {
g.P(fieldName, "\t", typename, "\t`", tag, "`") g.P(g.Pkg["proto"], ".XXX_InternalExtensions `json:\"-\"`")
if !gogoproto.IsStdTime(field) && !gogoproto.IsStdDuration(field) { } else {
g.RecordTypeUse(field.GetTypeName()) g.P("XXX_extensions\t\t[]byte `protobuf:\"bytes,0,opt\" json:\"-\"`")
}
} }
} if gogoproto.HasUnrecognized(g.file.FileDescriptorProto, message.DescriptorProto) && !message.proto3() {
if len(message.ExtensionRange) > 0 { g.P("XXX_unrecognized\t[]byte `json:\"-\"`")
if gogoproto.HasExtensionsMap(g.file.FileDescriptorProto, message.DescriptorProto) {
g.P(g.Pkg["proto"], ".XXX_InternalExtensions `json:\"-\"`")
} else {
g.P("XXX_extensions\t\t[]byte `protobuf:\"bytes,0,opt\" json:\"-\"`")
} }
g.Out()
g.P("}")
} }
if gogoproto.HasUnrecognized(g.file.FileDescriptorProto, message.DescriptorProto) && !message.proto3() {
g.P("XXX_unrecognized\t[]byte `json:\"-\"`")
}
g.Out()
g.P("}")
// Update g.Buffer to list valid oneof types. // Update g.Buffer to list valid oneof types.
// We do this down here, after we've disambiguated the oneof type names. // We do this down here, after we've disambiguated the oneof type names.

View file

@ -20,9 +20,6 @@ import math "math"
import bytes "bytes" import bytes "bytes"
import strings "strings" import strings "strings"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import sort "sort"
import strconv "strconv"
import reflect "reflect" import reflect "reflect"
import io "io" import io "io"
@ -246,24 +243,6 @@ func valueToGoStringAny(v interface{}, typ string) string {
pv := reflect.Indirect(rv).Interface() pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
} }
func extensionToGoStringAny(m github_com_gogo_protobuf_proto.Message) string {
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
if e == nil {
return "nil"
}
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "})"
return s
}
func (m *Any) Marshal() (dAtA []byte, err error) { func (m *Any) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)

View file

@ -18,9 +18,6 @@ import fmt "fmt"
import math "math" import math "math"
import strings "strings" import strings "strings"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import sort "sort"
import strconv "strconv"
import reflect "reflect" import reflect "reflect"
import io "io" import io "io"
@ -204,24 +201,6 @@ func valueToGoStringDuration(v interface{}, typ string) string {
pv := reflect.Indirect(rv).Interface() pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
} }
func extensionToGoStringDuration(m github_com_gogo_protobuf_proto.Message) string {
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
if e == nil {
return "nil"
}
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "})"
return s
}
func (m *Duration) Marshal() (dAtA []byte, err error) { func (m *Duration) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)

View file

@ -18,9 +18,6 @@ import fmt "fmt"
import math "math" import math "math"
import strings "strings" import strings "strings"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import sort "sort"
import strconv "strconv"
import reflect "reflect" import reflect "reflect"
import io "io" import io "io"
@ -127,24 +124,6 @@ func valueToGoStringEmpty(v interface{}, typ string) string {
pv := reflect.Indirect(rv).Interface() pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
} }
func extensionToGoStringEmpty(m github_com_gogo_protobuf_proto.Message) string {
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
if e == nil {
return "nil"
}
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "})"
return s
}
func (m *Empty) Marshal() (dAtA []byte, err error) { func (m *Empty) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)

View file

@ -18,9 +18,6 @@ import fmt "fmt"
import math "math" import math "math"
import strings "strings" import strings "strings"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import sort "sort"
import strconv "strconv"
import reflect "reflect" import reflect "reflect"
import io "io" import io "io"
@ -350,24 +347,6 @@ func valueToGoStringFieldMask(v interface{}, typ string) string {
pv := reflect.Indirect(rv).Interface() pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
} }
func extensionToGoStringFieldMask(m github_com_gogo_protobuf_proto.Message) string {
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
if e == nil {
return "nil"
}
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "})"
return s
}
func (m *FieldMask) Marshal() (dAtA []byte, err error) { func (m *FieldMask) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)

View file

@ -22,8 +22,6 @@ import math "math"
import strconv "strconv" import strconv "strconv"
import strings "strings" import strings "strings"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import sort "sort"
import reflect "reflect" import reflect "reflect"
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
@ -748,24 +746,6 @@ func valueToGoStringStruct(v interface{}, typ string) string {
pv := reflect.Indirect(rv).Interface() pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
} }
func extensionToGoStringStruct(m github_com_gogo_protobuf_proto.Message) string {
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
if e == nil {
return "nil"
}
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "})"
return s
}
func (m *Struct) Marshal() (dAtA []byte, err error) { func (m *Struct) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)

View file

@ -18,9 +18,6 @@ import fmt "fmt"
import math "math" import math "math"
import strings "strings" import strings "strings"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import sort "sort"
import strconv "strconv"
import reflect "reflect" import reflect "reflect"
import io "io" import io "io"
@ -216,24 +213,6 @@ func valueToGoStringTimestamp(v interface{}, typ string) string {
pv := reflect.Indirect(rv).Interface() pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
} }
func extensionToGoStringTimestamp(m github_com_gogo_protobuf_proto.Message) string {
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
if e == nil {
return "nil"
}
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "})"
return s
}
func (m *Timestamp) Marshal() (dAtA []byte, err error) { func (m *Timestamp) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)

View file

@ -28,9 +28,6 @@ import math "math"
import bytes "bytes" import bytes "bytes"
import strings "strings" import strings "strings"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import sort "sort"
import strconv "strconv"
import reflect "reflect" import reflect "reflect"
import io "io" import io "io"
@ -899,24 +896,6 @@ func valueToGoStringWrappers(v interface{}, typ string) string {
pv := reflect.Indirect(rv).Interface() pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
} }
func extensionToGoStringWrappers(m github_com_gogo_protobuf_proto.Message) string {
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
if e == nil {
return "nil"
}
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "})"
return s
}
func (m *DoubleValue) Marshal() (dAtA []byte, err error) { func (m *DoubleValue) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)