From 303a3929b2112208594cab95dcbaa91d626d75e5 Mon Sep 17 00:00:00 2001 From: Mrunal Patel Date: Tue, 30 Aug 2016 15:27:31 -0700 Subject: [PATCH] Update the kubernetes api to latest Signed-off-by: Mrunal Patel --- Godeps/Godeps.json | 35 +- server/runtime.go | 4 +- testdata/container_redis.json | 4 +- testdata/sandbox_config.json | 4 +- .../kubelet/api/v1alpha1/runtime/api.pb.go | 1251 +++++++++++------ .../kubelet/api/v1alpha1/runtime/api.proto | 97 +- .../x/net/http2/client_conn_pool.go | 107 +- .../x/net/http2/configure_transport.go | 35 +- vendor/golang.org/x/net/http2/errors.go | 47 +- vendor/golang.org/x/net/http2/frame.go | 437 +++++- vendor/golang.org/x/net/http2/go16.go | 43 + vendor/golang.org/x/net/http2/go17.go | 94 ++ vendor/golang.org/x/net/http2/hpack/encode.go | 2 +- vendor/golang.org/x/net/http2/hpack/hpack.go | 23 +- .../golang.org/x/net/http2/hpack/huffman.go | 42 +- vendor/golang.org/x/net/http2/http2.go | 98 +- vendor/golang.org/x/net/http2/not_go16.go | 39 +- vendor/golang.org/x/net/http2/not_go17.go | 51 + vendor/golang.org/x/net/http2/pipe.go | 6 + vendor/golang.org/x/net/http2/server.go | 705 ++++++---- vendor/golang.org/x/net/http2/transport.go | 1009 +++++++++---- vendor/golang.org/x/net/http2/write.go | 48 +- vendor/google.golang.org/grpc/.travis.yml | 20 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 31 +- vendor/google.golang.org/grpc/Makefile | 34 +- vendor/google.golang.org/grpc/PATENTS | 12 +- vendor/google.golang.org/grpc/README.md | 6 +- vendor/google.golang.org/grpc/backoff.go | 80 ++ vendor/google.golang.org/grpc/balancer.go | 385 +++++ vendor/google.golang.org/grpc/call.go | 124 +- vendor/google.golang.org/grpc/clientconn.go | 907 ++++++++---- vendor/google.golang.org/grpc/coverage.sh | 2 + .../grpc/credentials/credentials.go | 105 +- .../grpc/credentials/credentials_util_go17.go | 76 + .../credentials/credentials_util_pre_go17.go | 74 + vendor/google.golang.org/grpc/doc.go | 2 +- .../google.golang.org/grpc/grpclog/logger.go | 5 +- vendor/google.golang.org/grpc/interceptor.go | 74 + .../grpc/internal/internal.go | 49 + .../grpc/metadata/metadata.go | 14 +- .../google.golang.org/grpc/naming/naming.go | 3 +- vendor/google.golang.org/grpc/peer/peer.go | 65 + vendor/google.golang.org/grpc/picker.go | 243 ---- vendor/google.golang.org/grpc/rpc_util.go | 262 +++- vendor/google.golang.org/grpc/server.go | 596 ++++++-- vendor/google.golang.org/grpc/stream.go | 225 ++- vendor/google.golang.org/grpc/trace.go | 3 +- .../grpc/transport/control.go | 91 +- .../google.golang.org/grpc/transport/go16.go | 46 + .../google.golang.org/grpc/transport/go17.go | 46 + .../grpc/transport/handler_server.go | 397 ++++++ .../grpc/transport/http2_client.go | 536 ++++--- .../grpc/transport/http2_server.go | 264 ++-- .../grpc/transport/http_util.go | 275 ++-- .../grpc/transport/pre_go16.go | 51 + .../grpc/transport/transport.go | 211 ++- 56 files changed, 6885 insertions(+), 2610 deletions(-) create mode 100644 vendor/golang.org/x/net/http2/go16.go create mode 100644 vendor/golang.org/x/net/http2/go17.go create mode 100644 vendor/golang.org/x/net/http2/not_go17.go create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/balancer.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_go17.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go delete mode 100644 vendor/google.golang.org/grpc/picker.go create mode 100644 vendor/google.golang.org/grpc/transport/go16.go create mode 100644 vendor/google.golang.org/grpc/transport/go17.go create mode 100644 vendor/google.golang.org/grpc/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/transport/pre_go16.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 298f0946..e987e60a 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -272,8 +272,8 @@ }, { "ImportPath": "github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime", - "Comment": "v1.4.0-alpha.2-500-ge18eda9", - "Rev": "e18eda9b2b80128a4464cd8712801152e48afe37" + "Comment": "v1.4.0-alpha.3-265-g3fd14d9", + "Rev": "3fd14d97fb13ba2849e0c908aaff18efcece70c1" }, { "ImportPath": "github.com/opencontainers/ocitools/generate", @@ -351,31 +351,38 @@ }, { "ImportPath": "google.golang.org/grpc", - "Rev": "715fec664d75c6b5cb5b12718458621d4b75df37" + "Comment": "v1.0.1-GA-29-g79b7c34", + "Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b" }, { "ImportPath": "google.golang.org/grpc/codes", - "Rev": "715fec664d75c6b5cb5b12718458621d4b75df37" + "Comment": "v1.0.1-GA-29-g79b7c34", + "Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "715fec664d75c6b5cb5b12718458621d4b75df37" + "Comment": "v1.0.1-GA-29-g79b7c34", + "Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "715fec664d75c6b5cb5b12718458621d4b75df37" + "Comment": "v1.0.1-GA-29-g79b7c34", + "Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "715fec664d75c6b5cb5b12718458621d4b75df37" + "Comment": "v1.0.1-GA-29-g79b7c34", + "Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b" }, { "ImportPath": "google.golang.org/grpc/naming", - "Rev": "715fec664d75c6b5cb5b12718458621d4b75df37" + "Comment": "v1.0.1-GA-29-g79b7c34", + "Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b" }, { "ImportPath": "google.golang.org/grpc/transport", - "Rev": "715fec664d75c6b5cb5b12718458621d4b75df37" + "Comment": "v1.0.1-GA-29-g79b7c34", + "Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/errors", @@ -396,6 +403,16 @@ "ImportPath": "k8s.io/kubernetes/pkg/util/sets", "Comment": "v1.4.0-alpha.1-489-g976ca09", "Rev": "976ca09d714cf114fb7a9e681bc0b170760cbdab" + }, + { + "ImportPath": "google.golang.org/grpc/internal", + "Comment": "v1.0.1-GA-29-g79b7c34", + "Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b" + }, + { + "ImportPath": "google.golang.org/grpc/peer", + "Comment": "v1.0.1-GA-29-g79b7c34", + "Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b" } ] } diff --git a/server/runtime.go b/server/runtime.go index 3edc5bf4..5e3b9af8 100644 --- a/server/runtime.go +++ b/server/runtime.go @@ -47,7 +47,7 @@ func (s *Server) CreatePodSandbox(ctx context.Context, req *pb.CreatePodSandboxR } // process req.Name - name := req.GetConfig().GetName() + name := req.GetConfig().GetMetadata().GetName() if name == "" { return nil, fmt.Errorf("PodSandboxConfig.Name should not be empty") } @@ -243,7 +243,7 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig is nil") } - name := containerConfig.GetName() + name := containerConfig.GetMetadata().GetName() if name == "" { return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Name is empty") } diff --git a/testdata/container_redis.json b/testdata/container_redis.json index 7cd636d4..98329f9c 100644 --- a/testdata/container_redis.json +++ b/testdata/container_redis.json @@ -1,5 +1,7 @@ { - "name": "podsandbox1-redis", + "metadata": { + "name": "podsandbox1-redis" + }, "image": { "image": "docker://redis:latest" }, diff --git a/testdata/sandbox_config.json b/testdata/sandbox_config.json index b8a52df5..6e39966d 100644 --- a/testdata/sandbox_config.json +++ b/testdata/sandbox_config.json @@ -1,5 +1,7 @@ { - "name": "podsandbox1", + "metadata": { + "name": "podsandbox1" + }, "hostname": "ocic_host", "log_directory": ".", "dns_options": { diff --git a/vendor/github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime/api.pb.go b/vendor/github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime/api.pb.go index 4a844f5b..abce8913 100644 --- a/vendor/github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime/api.pb.go +++ b/vendor/github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime/api.pb.go @@ -30,10 +30,9 @@ It has these top-level messages: DNSOption PortMapping Mount - ResourceRequirements - PodSandboxResources NamespaceOption LinuxPodSandboxConfig + PodSandboxMetadata PodSandboxConfig CreatePodSandboxRequest CreatePodSandboxResponse @@ -58,6 +57,7 @@ It has these top-level messages: Capability LinuxContainerConfig LinuxUser + ContainerMetadata ContainerConfig CreateContainerRequest CreateContainerResponse @@ -104,6 +104,10 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + type Protocol int32 const ( @@ -136,6 +140,7 @@ func (x *Protocol) UnmarshalJSON(data []byte) error { *x = Protocol(value) return nil } +func (Protocol) EnumDescriptor() ([]byte, []int) { return fileDescriptorApi, []int{0} } type PodSandBoxState int32 @@ -169,6 +174,7 @@ func (x *PodSandBoxState) UnmarshalJSON(data []byte) error { *x = PodSandBoxState(value) return nil } +func (PodSandBoxState) EnumDescriptor() ([]byte, []int) { return fileDescriptorApi, []int{1} } type ContainerState int32 @@ -208,6 +214,7 @@ func (x *ContainerState) UnmarshalJSON(data []byte) error { *x = ContainerState(value) return nil } +func (ContainerState) EnumDescriptor() ([]byte, []int) { return fileDescriptorApi, []int{2} } type VersionRequest struct { // The version of kubelet runtime API. @@ -215,9 +222,10 @@ type VersionRequest struct { XXX_unrecognized []byte `json:"-"` } -func (m *VersionRequest) Reset() { *m = VersionRequest{} } -func (m *VersionRequest) String() string { return proto.CompactTextString(m) } -func (*VersionRequest) ProtoMessage() {} +func (m *VersionRequest) Reset() { *m = VersionRequest{} } +func (m *VersionRequest) String() string { return proto.CompactTextString(m) } +func (*VersionRequest) ProtoMessage() {} +func (*VersionRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{0} } func (m *VersionRequest) GetVersion() string { if m != nil && m.Version != nil { @@ -230,17 +238,18 @@ type VersionResponse struct { // The version of the kubelet runtime API. Version *string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` // The name of the container runtime. - RuntimeName *string `protobuf:"bytes,2,opt,name=runtime_name" json:"runtime_name,omitempty"` + RuntimeName *string `protobuf:"bytes,2,opt,name=runtime_name,json=runtimeName" json:"runtime_name,omitempty"` // The version of the container runtime. - RuntimeVersion *string `protobuf:"bytes,3,opt,name=runtime_version" json:"runtime_version,omitempty"` + RuntimeVersion *string `protobuf:"bytes,3,opt,name=runtime_version,json=runtimeVersion" json:"runtime_version,omitempty"` // The API version of the container runtime. - RuntimeApiVersion *string `protobuf:"bytes,4,opt,name=runtime_api_version" json:"runtime_api_version,omitempty"` + RuntimeApiVersion *string `protobuf:"bytes,4,opt,name=runtime_api_version,json=runtimeApiVersion" json:"runtime_api_version,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *VersionResponse) Reset() { *m = VersionResponse{} } -func (m *VersionResponse) String() string { return proto.CompactTextString(m) } -func (*VersionResponse) ProtoMessage() {} +func (m *VersionResponse) Reset() { *m = VersionResponse{} } +func (m *VersionResponse) String() string { return proto.CompactTextString(m) } +func (*VersionResponse) ProtoMessage() {} +func (*VersionResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{1} } func (m *VersionResponse) GetVersion() string { if m != nil && m.Version != nil { @@ -279,9 +288,10 @@ type DNSOption struct { XXX_unrecognized []byte `json:"-"` } -func (m *DNSOption) Reset() { *m = DNSOption{} } -func (m *DNSOption) String() string { return proto.CompactTextString(m) } -func (*DNSOption) ProtoMessage() {} +func (m *DNSOption) Reset() { *m = DNSOption{} } +func (m *DNSOption) String() string { return proto.CompactTextString(m) } +func (*DNSOption) ProtoMessage() {} +func (*DNSOption) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{2} } func (m *DNSOption) GetServers() []string { if m != nil { @@ -304,17 +314,18 @@ type PortMapping struct { // The protocol of the port mapping. Protocol *Protocol `protobuf:"varint,2,opt,name=protocol,enum=runtime.Protocol" json:"protocol,omitempty"` // The port number within the container. - ContainerPort *int32 `protobuf:"varint,3,opt,name=container_port" json:"container_port,omitempty"` + ContainerPort *int32 `protobuf:"varint,3,opt,name=container_port,json=containerPort" json:"container_port,omitempty"` // The port number on the host. - HostPort *int32 `protobuf:"varint,4,opt,name=host_port" json:"host_port,omitempty"` + HostPort *int32 `protobuf:"varint,4,opt,name=host_port,json=hostPort" json:"host_port,omitempty"` // The host IP. - HostIp *string `protobuf:"bytes,5,opt,name=host_ip" json:"host_ip,omitempty"` + HostIp *string `protobuf:"bytes,5,opt,name=host_ip,json=hostIp" json:"host_ip,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *PortMapping) Reset() { *m = PortMapping{} } -func (m *PortMapping) String() string { return proto.CompactTextString(m) } -func (*PortMapping) ProtoMessage() {} +func (m *PortMapping) Reset() { *m = PortMapping{} } +func (m *PortMapping) String() string { return proto.CompactTextString(m) } +func (*PortMapping) ProtoMessage() {} +func (*PortMapping) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{3} } func (m *PortMapping) GetName() string { if m != nil && m.Name != nil { @@ -356,19 +367,20 @@ type Mount struct { // The name of the volume mount. Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // The path of the mount within the container. - ContainerPath *string `protobuf:"bytes,2,opt,name=container_path" json:"container_path,omitempty"` + ContainerPath *string `protobuf:"bytes,2,opt,name=container_path,json=containerPath" json:"container_path,omitempty"` // The path of the mount on the host. - HostPath *string `protobuf:"bytes,3,opt,name=host_path" json:"host_path,omitempty"` + HostPath *string `protobuf:"bytes,3,opt,name=host_path,json=hostPath" json:"host_path,omitempty"` // If set, the mount is read-only. Readonly *bool `protobuf:"varint,4,opt,name=readonly" json:"readonly,omitempty"` // If set, the mount needs SELinux relabeling - SelinuxRelabel *bool `protobuf:"varint,5,opt,name=selinux_relabel" json:"selinux_relabel,omitempty"` + SelinuxRelabel *bool `protobuf:"varint,5,opt,name=selinux_relabel,json=selinuxRelabel" json:"selinux_relabel,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *Mount) Reset() { *m = Mount{} } -func (m *Mount) String() string { return proto.CompactTextString(m) } -func (*Mount) ProtoMessage() {} +func (m *Mount) Reset() { *m = Mount{} } +func (m *Mount) String() string { return proto.CompactTextString(m) } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{4} } func (m *Mount) GetName() string { if m != nil && m.Name != nil { @@ -405,78 +417,21 @@ func (m *Mount) GetSelinuxRelabel() bool { return false } -// ResourceRequirements contains a set of resources -// Valid resources are: -// - cpu, in cores. (500m = .5 cores) -// - memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) -type ResourceRequirements struct { - // The maximum amount of compute resources allowed. - Limits *float64 `protobuf:"fixed64,1,opt,name=limits" json:"limits,omitempty"` - // The minimum amount of compute resources required. - // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value - Requests *float64 `protobuf:"fixed64,2,opt,name=requests" json:"requests,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } -func (m *ResourceRequirements) String() string { return proto.CompactTextString(m) } -func (*ResourceRequirements) ProtoMessage() {} - -func (m *ResourceRequirements) GetLimits() float64 { - if m != nil && m.Limits != nil { - return *m.Limits - } - return 0 -} - -func (m *ResourceRequirements) GetRequests() float64 { - if m != nil && m.Requests != nil { - return *m.Requests - } - return 0 -} - -// PodSandboxResources contains the CPU/memory resource requirements. -type PodSandboxResources struct { - // CPU resource requirement. - Cpu *ResourceRequirements `protobuf:"bytes,1,opt,name=cpu" json:"cpu,omitempty"` - // Memory resource requirement. - Memory *ResourceRequirements `protobuf:"bytes,2,opt,name=memory" json:"memory,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PodSandboxResources) Reset() { *m = PodSandboxResources{} } -func (m *PodSandboxResources) String() string { return proto.CompactTextString(m) } -func (*PodSandboxResources) ProtoMessage() {} - -func (m *PodSandboxResources) GetCpu() *ResourceRequirements { - if m != nil { - return m.Cpu - } - return nil -} - -func (m *PodSandboxResources) GetMemory() *ResourceRequirements { - if m != nil { - return m.Memory - } - return nil -} - // NamespaceOption provides options for Linux namespaces. type NamespaceOption struct { // If set, use the host's network namespace. - HostNetwork *bool `protobuf:"varint,1,opt,name=host_network" json:"host_network,omitempty"` + HostNetwork *bool `protobuf:"varint,1,opt,name=host_network,json=hostNetwork" json:"host_network,omitempty"` // If set, use the host's pid namesapce. - HostPid *bool `protobuf:"varint,2,opt,name=host_pid" json:"host_pid,omitempty"` + HostPid *bool `protobuf:"varint,2,opt,name=host_pid,json=hostPid" json:"host_pid,omitempty"` // If set, use the host's ipc namespace. - HostIpc *bool `protobuf:"varint,3,opt,name=host_ipc" json:"host_ipc,omitempty"` + HostIpc *bool `protobuf:"varint,3,opt,name=host_ipc,json=hostIpc" json:"host_ipc,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *NamespaceOption) Reset() { *m = NamespaceOption{} } -func (m *NamespaceOption) String() string { return proto.CompactTextString(m) } -func (*NamespaceOption) ProtoMessage() {} +func (m *NamespaceOption) Reset() { *m = NamespaceOption{} } +func (m *NamespaceOption) String() string { return proto.CompactTextString(m) } +func (*NamespaceOption) ProtoMessage() {} +func (*NamespaceOption) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{5} } func (m *NamespaceOption) GetHostNetwork() bool { if m != nil && m.HostNetwork != nil { @@ -505,16 +460,17 @@ type LinuxPodSandboxConfig struct { // The parent cgroup of the pod sandbox. // The cgroupfs style syntax will be used, but the container runtime can // convert it to systemd semantices if needed. - CgroupParent *string `protobuf:"bytes,1,opt,name=cgroup_parent" json:"cgroup_parent,omitempty"` + CgroupParent *string `protobuf:"bytes,1,opt,name=cgroup_parent,json=cgroupParent" json:"cgroup_parent,omitempty"` // The configurations for the sandbox's namespaces. // This will be used only if the PodSandbox uses namespace for isolation. - NamespaceOptions *NamespaceOption `protobuf:"bytes,2,opt,name=namespace_options" json:"namespace_options,omitempty"` + NamespaceOptions *NamespaceOption `protobuf:"bytes,2,opt,name=namespace_options,json=namespaceOptions" json:"namespace_options,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *LinuxPodSandboxConfig) Reset() { *m = LinuxPodSandboxConfig{} } -func (m *LinuxPodSandboxConfig) String() string { return proto.CompactTextString(m) } -func (*LinuxPodSandboxConfig) ProtoMessage() {} +func (m *LinuxPodSandboxConfig) Reset() { *m = LinuxPodSandboxConfig{} } +func (m *LinuxPodSandboxConfig) String() string { return proto.CompactTextString(m) } +func (*LinuxPodSandboxConfig) ProtoMessage() {} +func (*LinuxPodSandboxConfig) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{6} } func (m *LinuxPodSandboxConfig) GetCgroupParent() string { if m != nil && m.CgroupParent != nil { @@ -530,11 +486,63 @@ func (m *LinuxPodSandboxConfig) GetNamespaceOptions() *NamespaceOption { return nil } +// PodSandboxMetadata holds all necessary information for building the sandbox name. +// The container runtime is encouraged to expose the metadata associated with the +// PodSandbox in its user interface for better user experience. E.g., runtime can +// construct a unique PodSandboxName based on the metadata. +type PodSandboxMetadata struct { + // The pod name of the sandbox. Same as the pod name in the PodSpec. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The pod uid of the sandbox. Same as the pod UID in the PodSpec. + Uid *string `protobuf:"bytes,2,opt,name=uid" json:"uid,omitempty"` + // The pod namespace of the sandbox. Same as the pod namespace in the PodSpec. + Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` + // The attempt number of creating the sandbox. + Attempt *uint32 `protobuf:"varint,4,opt,name=attempt" json:"attempt,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PodSandboxMetadata) Reset() { *m = PodSandboxMetadata{} } +func (m *PodSandboxMetadata) String() string { return proto.CompactTextString(m) } +func (*PodSandboxMetadata) ProtoMessage() {} +func (*PodSandboxMetadata) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{7} } + +func (m *PodSandboxMetadata) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *PodSandboxMetadata) GetUid() string { + if m != nil && m.Uid != nil { + return *m.Uid + } + return "" +} + +func (m *PodSandboxMetadata) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + +func (m *PodSandboxMetadata) GetAttempt() uint32 { + if m != nil && m.Attempt != nil { + return *m.Attempt + } + return 0 +} + // PodSandboxConfig holds all the required and optional fields for creating a // sandbox. type PodSandboxConfig struct { - // The name of the sandbox. - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The metadata of the sandbox. This information will uniquely identify + // the sandbox, and the runtime should leverage this to ensure correct + // operation. The runtime may also use this information to improve UX, such + // as by constructing a readable name. + Metadata *PodSandboxMetadata `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // The hostname of the sandbox. Hostname *string `protobuf:"bytes,2,opt,name=hostname" json:"hostname,omitempty"` // Path to the directory on the host in which container log files are @@ -552,37 +560,31 @@ type PodSandboxConfig struct { // container logs are under active discussion in // https://issues.k8s.io/24677. There *may* be future change of direction // for logging as the discussion carries on. - LogDirectory *string `protobuf:"bytes,3,opt,name=log_directory" json:"log_directory,omitempty"` + LogDirectory *string `protobuf:"bytes,3,opt,name=log_directory,json=logDirectory" json:"log_directory,omitempty"` // The DNS options for the sandbox. - DnsOptions *DNSOption `protobuf:"bytes,4,opt,name=dns_options" json:"dns_options,omitempty"` + DnsOptions *DNSOption `protobuf:"bytes,4,opt,name=dns_options,json=dnsOptions" json:"dns_options,omitempty"` // The port mappings for the sandbox. - PortMappings []*PortMapping `protobuf:"bytes,5,rep,name=port_mappings" json:"port_mappings,omitempty"` - // Resources specifies the resource limits for the sandbox (i.e., the - // aggregate cpu/memory resources limits of all containers). - // Note: On a Linux host, kubelet will create a pod-level cgroup and pass - // it as the cgroup parent for the PodSandbox. For some runtimes, this is - // sufficient. For others, e.g., hypervisor-based runtimes, explicit - // resource limits for the sandbox are needed at creation time. - Resources *PodSandboxResources `protobuf:"bytes,6,opt,name=resources" json:"resources,omitempty"` + PortMappings []*PortMapping `protobuf:"bytes,5,rep,name=port_mappings,json=portMappings" json:"port_mappings,omitempty"` // Labels are key value pairs that may be used to scope and select individual resources. - Labels map[string]string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Labels map[string]string `protobuf:"bytes,6,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Annotations is an unstructured key value map that may be set by external // tools to store and retrieve arbitrary metadata. - Annotations map[string]string `protobuf:"bytes,8,rep,name=annotations" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Annotations map[string]string `protobuf:"bytes,7,rep,name=annotations" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Optional configurations specific to Linux hosts. - Linux *LinuxPodSandboxConfig `protobuf:"bytes,9,opt,name=linux" json:"linux,omitempty"` + Linux *LinuxPodSandboxConfig `protobuf:"bytes,8,opt,name=linux" json:"linux,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *PodSandboxConfig) Reset() { *m = PodSandboxConfig{} } -func (m *PodSandboxConfig) String() string { return proto.CompactTextString(m) } -func (*PodSandboxConfig) ProtoMessage() {} +func (m *PodSandboxConfig) Reset() { *m = PodSandboxConfig{} } +func (m *PodSandboxConfig) String() string { return proto.CompactTextString(m) } +func (*PodSandboxConfig) ProtoMessage() {} +func (*PodSandboxConfig) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{8} } -func (m *PodSandboxConfig) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (m *PodSandboxConfig) GetMetadata() *PodSandboxMetadata { + if m != nil { + return m.Metadata } - return "" + return nil } func (m *PodSandboxConfig) GetHostname() string { @@ -613,13 +615,6 @@ func (m *PodSandboxConfig) GetPortMappings() []*PortMapping { return nil } -func (m *PodSandboxConfig) GetResources() *PodSandboxResources { - if m != nil { - return m.Resources - } - return nil -} - func (m *PodSandboxConfig) GetLabels() map[string]string { if m != nil { return m.Labels @@ -647,9 +642,10 @@ type CreatePodSandboxRequest struct { XXX_unrecognized []byte `json:"-"` } -func (m *CreatePodSandboxRequest) Reset() { *m = CreatePodSandboxRequest{} } -func (m *CreatePodSandboxRequest) String() string { return proto.CompactTextString(m) } -func (*CreatePodSandboxRequest) ProtoMessage() {} +func (m *CreatePodSandboxRequest) Reset() { *m = CreatePodSandboxRequest{} } +func (m *CreatePodSandboxRequest) String() string { return proto.CompactTextString(m) } +func (*CreatePodSandboxRequest) ProtoMessage() {} +func (*CreatePodSandboxRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{9} } func (m *CreatePodSandboxRequest) GetConfig() *PodSandboxConfig { if m != nil { @@ -660,13 +656,14 @@ func (m *CreatePodSandboxRequest) GetConfig() *PodSandboxConfig { type CreatePodSandboxResponse struct { // The id of the PodSandBox - PodSandboxId *string `protobuf:"bytes,1,opt,name=pod_sandbox_id" json:"pod_sandbox_id,omitempty"` + PodSandboxId *string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId" json:"pod_sandbox_id,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *CreatePodSandboxResponse) Reset() { *m = CreatePodSandboxResponse{} } -func (m *CreatePodSandboxResponse) String() string { return proto.CompactTextString(m) } -func (*CreatePodSandboxResponse) ProtoMessage() {} +func (m *CreatePodSandboxResponse) Reset() { *m = CreatePodSandboxResponse{} } +func (m *CreatePodSandboxResponse) String() string { return proto.CompactTextString(m) } +func (*CreatePodSandboxResponse) ProtoMessage() {} +func (*CreatePodSandboxResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{10} } func (m *CreatePodSandboxResponse) GetPodSandboxId() string { if m != nil && m.PodSandboxId != nil { @@ -677,13 +674,14 @@ func (m *CreatePodSandboxResponse) GetPodSandboxId() string { type StopPodSandboxRequest struct { // The id of the PodSandBox - PodSandboxId *string `protobuf:"bytes,1,opt,name=pod_sandbox_id" json:"pod_sandbox_id,omitempty"` + PodSandboxId *string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId" json:"pod_sandbox_id,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *StopPodSandboxRequest) Reset() { *m = StopPodSandboxRequest{} } -func (m *StopPodSandboxRequest) String() string { return proto.CompactTextString(m) } -func (*StopPodSandboxRequest) ProtoMessage() {} +func (m *StopPodSandboxRequest) Reset() { *m = StopPodSandboxRequest{} } +func (m *StopPodSandboxRequest) String() string { return proto.CompactTextString(m) } +func (*StopPodSandboxRequest) ProtoMessage() {} +func (*StopPodSandboxRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{11} } func (m *StopPodSandboxRequest) GetPodSandboxId() string { if m != nil && m.PodSandboxId != nil { @@ -696,19 +694,21 @@ type StopPodSandboxResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *StopPodSandboxResponse) Reset() { *m = StopPodSandboxResponse{} } -func (m *StopPodSandboxResponse) String() string { return proto.CompactTextString(m) } -func (*StopPodSandboxResponse) ProtoMessage() {} +func (m *StopPodSandboxResponse) Reset() { *m = StopPodSandboxResponse{} } +func (m *StopPodSandboxResponse) String() string { return proto.CompactTextString(m) } +func (*StopPodSandboxResponse) ProtoMessage() {} +func (*StopPodSandboxResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{12} } type RemovePodSandboxRequest struct { // The id of the PodSandBox - PodSandboxId *string `protobuf:"bytes,1,opt,name=pod_sandbox_id" json:"pod_sandbox_id,omitempty"` + PodSandboxId *string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId" json:"pod_sandbox_id,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *RemovePodSandboxRequest) Reset() { *m = RemovePodSandboxRequest{} } -func (m *RemovePodSandboxRequest) String() string { return proto.CompactTextString(m) } -func (*RemovePodSandboxRequest) ProtoMessage() {} +func (m *RemovePodSandboxRequest) Reset() { *m = RemovePodSandboxRequest{} } +func (m *RemovePodSandboxRequest) String() string { return proto.CompactTextString(m) } +func (*RemovePodSandboxRequest) ProtoMessage() {} +func (*RemovePodSandboxRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{13} } func (m *RemovePodSandboxRequest) GetPodSandboxId() string { if m != nil && m.PodSandboxId != nil { @@ -721,19 +721,21 @@ type RemovePodSandboxResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *RemovePodSandboxResponse) Reset() { *m = RemovePodSandboxResponse{} } -func (m *RemovePodSandboxResponse) String() string { return proto.CompactTextString(m) } -func (*RemovePodSandboxResponse) ProtoMessage() {} +func (m *RemovePodSandboxResponse) Reset() { *m = RemovePodSandboxResponse{} } +func (m *RemovePodSandboxResponse) String() string { return proto.CompactTextString(m) } +func (*RemovePodSandboxResponse) ProtoMessage() {} +func (*RemovePodSandboxResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{14} } type PodSandboxStatusRequest struct { // The id of the PodSandBox - PodSandboxId *string `protobuf:"bytes,1,opt,name=pod_sandbox_id" json:"pod_sandbox_id,omitempty"` + PodSandboxId *string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId" json:"pod_sandbox_id,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *PodSandboxStatusRequest) Reset() { *m = PodSandboxStatusRequest{} } -func (m *PodSandboxStatusRequest) String() string { return proto.CompactTextString(m) } -func (*PodSandboxStatusRequest) ProtoMessage() {} +func (m *PodSandboxStatusRequest) Reset() { *m = PodSandboxStatusRequest{} } +func (m *PodSandboxStatusRequest) String() string { return proto.CompactTextString(m) } +func (*PodSandboxStatusRequest) ProtoMessage() {} +func (*PodSandboxStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{15} } func (m *PodSandboxStatusRequest) GetPodSandboxId() string { if m != nil && m.PodSandboxId != nil { @@ -749,9 +751,10 @@ type PodSandboxNetworkStatus struct { XXX_unrecognized []byte `json:"-"` } -func (m *PodSandboxNetworkStatus) Reset() { *m = PodSandboxNetworkStatus{} } -func (m *PodSandboxNetworkStatus) String() string { return proto.CompactTextString(m) } -func (*PodSandboxNetworkStatus) ProtoMessage() {} +func (m *PodSandboxNetworkStatus) Reset() { *m = PodSandboxNetworkStatus{} } +func (m *PodSandboxNetworkStatus) String() string { return proto.CompactTextString(m) } +func (*PodSandboxNetworkStatus) ProtoMessage() {} +func (*PodSandboxNetworkStatus) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{16} } func (m *PodSandboxNetworkStatus) GetIp() string { if m != nil && m.Ip != nil { @@ -769,9 +772,10 @@ type Namespace struct { XXX_unrecognized []byte `json:"-"` } -func (m *Namespace) Reset() { *m = Namespace{} } -func (m *Namespace) String() string { return proto.CompactTextString(m) } -func (*Namespace) ProtoMessage() {} +func (m *Namespace) Reset() { *m = Namespace{} } +func (m *Namespace) String() string { return proto.CompactTextString(m) } +func (*Namespace) ProtoMessage() {} +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{17} } func (m *Namespace) GetNetwork() string { if m != nil && m.Network != nil { @@ -794,9 +798,10 @@ type LinuxPodSandboxStatus struct { XXX_unrecognized []byte `json:"-"` } -func (m *LinuxPodSandboxStatus) Reset() { *m = LinuxPodSandboxStatus{} } -func (m *LinuxPodSandboxStatus) String() string { return proto.CompactTextString(m) } -func (*LinuxPodSandboxStatus) ProtoMessage() {} +func (m *LinuxPodSandboxStatus) Reset() { *m = LinuxPodSandboxStatus{} } +func (m *LinuxPodSandboxStatus) String() string { return proto.CompactTextString(m) } +func (*LinuxPodSandboxStatus) ProtoMessage() {} +func (*LinuxPodSandboxStatus) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{18} } func (m *LinuxPodSandboxStatus) GetNamespaces() *Namespace { if m != nil { @@ -809,12 +814,12 @@ func (m *LinuxPodSandboxStatus) GetNamespaces() *Namespace { type PodSandboxStatus struct { // ID of the sandbox. Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - // Name of the sandbox - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // Metadata of the sandbox. + Metadata *PodSandboxMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata,omitempty"` // State of the sandbox. State *PodSandBoxState `protobuf:"varint,3,opt,name=state,enum=runtime.PodSandBoxState" json:"state,omitempty"` // Creation timestamp of the sandbox - CreatedAt *int64 `protobuf:"varint,4,opt,name=created_at" json:"created_at,omitempty"` + CreatedAt *int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` // Network contains network status if network is handled by the runtime. Network *PodSandboxNetworkStatus `protobuf:"bytes,5,opt,name=network" json:"network,omitempty"` // Linux specific status to a pod sandbox. @@ -827,9 +832,10 @@ type PodSandboxStatus struct { XXX_unrecognized []byte `json:"-"` } -func (m *PodSandboxStatus) Reset() { *m = PodSandboxStatus{} } -func (m *PodSandboxStatus) String() string { return proto.CompactTextString(m) } -func (*PodSandboxStatus) ProtoMessage() {} +func (m *PodSandboxStatus) Reset() { *m = PodSandboxStatus{} } +func (m *PodSandboxStatus) String() string { return proto.CompactTextString(m) } +func (*PodSandboxStatus) ProtoMessage() {} +func (*PodSandboxStatus) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{19} } func (m *PodSandboxStatus) GetId() string { if m != nil && m.Id != nil { @@ -838,11 +844,11 @@ func (m *PodSandboxStatus) GetId() string { return "" } -func (m *PodSandboxStatus) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (m *PodSandboxStatus) GetMetadata() *PodSandboxMetadata { + if m != nil { + return m.Metadata } - return "" + return nil } func (m *PodSandboxStatus) GetState() PodSandBoxState { @@ -893,9 +899,10 @@ type PodSandboxStatusResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *PodSandboxStatusResponse) Reset() { *m = PodSandboxStatusResponse{} } -func (m *PodSandboxStatusResponse) String() string { return proto.CompactTextString(m) } -func (*PodSandboxStatusResponse) ProtoMessage() {} +func (m *PodSandboxStatusResponse) Reset() { *m = PodSandboxStatusResponse{} } +func (m *PodSandboxStatusResponse) String() string { return proto.CompactTextString(m) } +func (*PodSandboxStatusResponse) ProtoMessage() {} +func (*PodSandboxStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{20} } func (m *PodSandboxStatusResponse) GetStatus() *PodSandboxStatus { if m != nil { @@ -916,13 +923,14 @@ type PodSandboxFilter struct { // LabelSelector to select matches. // Only api.MatchLabels is supported for now and the requirements // are ANDed. MatchExpressions is not supported yet. - LabelSelector map[string]string `protobuf:"bytes,4,rep,name=label_selector" json:"label_selector,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + LabelSelector map[string]string `protobuf:"bytes,4,rep,name=label_selector,json=labelSelector" json:"label_selector,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_unrecognized []byte `json:"-"` } -func (m *PodSandboxFilter) Reset() { *m = PodSandboxFilter{} } -func (m *PodSandboxFilter) String() string { return proto.CompactTextString(m) } -func (*PodSandboxFilter) ProtoMessage() {} +func (m *PodSandboxFilter) Reset() { *m = PodSandboxFilter{} } +func (m *PodSandboxFilter) String() string { return proto.CompactTextString(m) } +func (*PodSandboxFilter) ProtoMessage() {} +func (*PodSandboxFilter) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{21} } func (m *PodSandboxFilter) GetName() string { if m != nil && m.Name != nil { @@ -958,9 +966,10 @@ type ListPodSandboxRequest struct { XXX_unrecognized []byte `json:"-"` } -func (m *ListPodSandboxRequest) Reset() { *m = ListPodSandboxRequest{} } -func (m *ListPodSandboxRequest) String() string { return proto.CompactTextString(m) } -func (*ListPodSandboxRequest) ProtoMessage() {} +func (m *ListPodSandboxRequest) Reset() { *m = ListPodSandboxRequest{} } +func (m *ListPodSandboxRequest) String() string { return proto.CompactTextString(m) } +func (*ListPodSandboxRequest) ProtoMessage() {} +func (*ListPodSandboxRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{22} } func (m *ListPodSandboxRequest) GetFilter() *PodSandboxFilter { if m != nil { @@ -973,20 +982,21 @@ func (m *ListPodSandboxRequest) GetFilter() *PodSandboxFilter { type PodSandbox struct { // The id of the PodSandbox Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - // The name of the PodSandbox - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // Metadata of the sandbox + Metadata *PodSandboxMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata,omitempty"` // The state of the PodSandbox State *PodSandBoxState `protobuf:"varint,3,opt,name=state,enum=runtime.PodSandBoxState" json:"state,omitempty"` // Creation timestamps of the sandbox - CreatedAt *int64 `protobuf:"varint,4,opt,name=created_at" json:"created_at,omitempty"` + CreatedAt *int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` // The labels of the PodSandbox Labels map[string]string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_unrecognized []byte `json:"-"` } -func (m *PodSandbox) Reset() { *m = PodSandbox{} } -func (m *PodSandbox) String() string { return proto.CompactTextString(m) } -func (*PodSandbox) ProtoMessage() {} +func (m *PodSandbox) Reset() { *m = PodSandbox{} } +func (m *PodSandbox) String() string { return proto.CompactTextString(m) } +func (*PodSandbox) ProtoMessage() {} +func (*PodSandbox) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{23} } func (m *PodSandbox) GetId() string { if m != nil && m.Id != nil { @@ -995,11 +1005,11 @@ func (m *PodSandbox) GetId() string { return "" } -func (m *PodSandbox) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (m *PodSandbox) GetMetadata() *PodSandboxMetadata { + if m != nil { + return m.Metadata } - return "" + return nil } func (m *PodSandbox) GetState() PodSandBoxState { @@ -1029,9 +1039,10 @@ type ListPodSandboxResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *ListPodSandboxResponse) Reset() { *m = ListPodSandboxResponse{} } -func (m *ListPodSandboxResponse) String() string { return proto.CompactTextString(m) } -func (*ListPodSandboxResponse) ProtoMessage() {} +func (m *ListPodSandboxResponse) Reset() { *m = ListPodSandboxResponse{} } +func (m *ListPodSandboxResponse) String() string { return proto.CompactTextString(m) } +func (*ListPodSandboxResponse) ProtoMessage() {} +func (*ListPodSandboxResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{24} } func (m *ListPodSandboxResponse) GetItems() []*PodSandbox { if m != nil { @@ -1049,9 +1060,10 @@ type ImageSpec struct { XXX_unrecognized []byte `json:"-"` } -func (m *ImageSpec) Reset() { *m = ImageSpec{} } -func (m *ImageSpec) String() string { return proto.CompactTextString(m) } -func (*ImageSpec) ProtoMessage() {} +func (m *ImageSpec) Reset() { *m = ImageSpec{} } +func (m *ImageSpec) String() string { return proto.CompactTextString(m) } +func (*ImageSpec) ProtoMessage() {} +func (*ImageSpec) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{25} } func (m *ImageSpec) GetImage() string { if m != nil && m.Image != nil { @@ -1066,9 +1078,10 @@ type KeyValue struct { XXX_unrecognized []byte `json:"-"` } -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{26} } func (m *KeyValue) GetKey() string { if m != nil && m.Key != nil { @@ -1090,21 +1103,22 @@ func (m *KeyValue) GetValue() string { // directly. type LinuxContainerResources struct { // CPU CFS (Completely Fair Scheduler) period - CpuPeriod *int64 `protobuf:"varint,1,opt,name=cpu_period" json:"cpu_period,omitempty"` + CpuPeriod *int64 `protobuf:"varint,1,opt,name=cpu_period,json=cpuPeriod" json:"cpu_period,omitempty"` // CPU CFS (Completely Fair Scheduler) quota - CpuQuota *int64 `protobuf:"varint,2,opt,name=cpu_quota" json:"cpu_quota,omitempty"` + CpuQuota *int64 `protobuf:"varint,2,opt,name=cpu_quota,json=cpuQuota" json:"cpu_quota,omitempty"` // CPU shares (relative weight vs. other containers) - CpuShares *int64 `protobuf:"varint,3,opt,name=cpu_shares" json:"cpu_shares,omitempty"` + CpuShares *int64 `protobuf:"varint,3,opt,name=cpu_shares,json=cpuShares" json:"cpu_shares,omitempty"` // Memory limit in bytes - MemoryLimitInBytes *int64 `protobuf:"varint,4,opt,name=memory_limit_in_bytes" json:"memory_limit_in_bytes,omitempty"` + MemoryLimitInBytes *int64 `protobuf:"varint,4,opt,name=memory_limit_in_bytes,json=memoryLimitInBytes" json:"memory_limit_in_bytes,omitempty"` // OOMScoreAdj adjusts the oom-killer score. - OomScoreAdj *int64 `protobuf:"varint,5,opt,name=oom_score_adj" json:"oom_score_adj,omitempty"` + OomScoreAdj *int64 `protobuf:"varint,5,opt,name=oom_score_adj,json=oomScoreAdj" json:"oom_score_adj,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *LinuxContainerResources) Reset() { *m = LinuxContainerResources{} } -func (m *LinuxContainerResources) String() string { return proto.CompactTextString(m) } -func (*LinuxContainerResources) ProtoMessage() {} +func (m *LinuxContainerResources) Reset() { *m = LinuxContainerResources{} } +func (m *LinuxContainerResources) String() string { return proto.CompactTextString(m) } +func (*LinuxContainerResources) ProtoMessage() {} +func (*LinuxContainerResources) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{27} } func (m *LinuxContainerResources) GetCpuPeriod() int64 { if m != nil && m.CpuPeriod != nil { @@ -1150,9 +1164,10 @@ type SELinuxOption struct { XXX_unrecognized []byte `json:"-"` } -func (m *SELinuxOption) Reset() { *m = SELinuxOption{} } -func (m *SELinuxOption) String() string { return proto.CompactTextString(m) } -func (*SELinuxOption) ProtoMessage() {} +func (m *SELinuxOption) Reset() { *m = SELinuxOption{} } +func (m *SELinuxOption) String() string { return proto.CompactTextString(m) } +func (*SELinuxOption) ProtoMessage() {} +func (*SELinuxOption) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{28} } func (m *SELinuxOption) GetUser() string { if m != nil && m.User != nil { @@ -1185,15 +1200,16 @@ func (m *SELinuxOption) GetLevel() string { // Capability contains the container capabilities to add or drop type Capability struct { // List of capabilities to add. - AddCapabilities []string `protobuf:"bytes,1,rep,name=add_capabilities" json:"add_capabilities,omitempty"` + AddCapabilities []string `protobuf:"bytes,1,rep,name=add_capabilities,json=addCapabilities" json:"add_capabilities,omitempty"` // List of capabilities to drop. - DropCapabilities []string `protobuf:"bytes,2,rep,name=drop_capabilities" json:"drop_capabilities,omitempty"` + DropCapabilities []string `protobuf:"bytes,2,rep,name=drop_capabilities,json=dropCapabilities" json:"drop_capabilities,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *Capability) Reset() { *m = Capability{} } -func (m *Capability) String() string { return proto.CompactTextString(m) } -func (*Capability) ProtoMessage() {} +func (m *Capability) Reset() { *m = Capability{} } +func (m *Capability) String() string { return proto.CompactTextString(m) } +func (*Capability) ProtoMessage() {} +func (*Capability) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{29} } func (m *Capability) GetAddCapabilities() []string { if m != nil { @@ -1217,15 +1233,16 @@ type LinuxContainerConfig struct { // Capabilities to add or drop. Capabilities *Capability `protobuf:"bytes,2,opt,name=capabilities" json:"capabilities,omitempty"` // Optional SELinux context to be applied. - SelinuxOptions *SELinuxOption `protobuf:"bytes,3,opt,name=selinux_options" json:"selinux_options,omitempty"` + SelinuxOptions *SELinuxOption `protobuf:"bytes,3,opt,name=selinux_options,json=selinuxOptions" json:"selinux_options,omitempty"` // User contains the user for the container process. User *LinuxUser `protobuf:"bytes,4,opt,name=user" json:"user,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *LinuxContainerConfig) Reset() { *m = LinuxContainerConfig{} } -func (m *LinuxContainerConfig) String() string { return proto.CompactTextString(m) } -func (*LinuxContainerConfig) ProtoMessage() {} +func (m *LinuxContainerConfig) Reset() { *m = LinuxContainerConfig{} } +func (m *LinuxContainerConfig) String() string { return proto.CompactTextString(m) } +func (*LinuxContainerConfig) ProtoMessage() {} +func (*LinuxContainerConfig) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{30} } func (m *LinuxContainerConfig) GetResources() *LinuxContainerResources { if m != nil { @@ -1261,13 +1278,14 @@ type LinuxUser struct { // gid specifies the group ID the container process has. Gid *int64 `protobuf:"varint,2,opt,name=gid" json:"gid,omitempty"` // additional_gids specifies additional GIDs the container process has. - AdditionalGids []int64 `protobuf:"varint,3,rep,name=additional_gids" json:"additional_gids,omitempty"` + AdditionalGids []int64 `protobuf:"varint,3,rep,name=additional_gids,json=additionalGids" json:"additional_gids,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *LinuxUser) Reset() { *m = LinuxUser{} } -func (m *LinuxUser) String() string { return proto.CompactTextString(m) } -func (*LinuxUser) ProtoMessage() {} +func (m *LinuxUser) Reset() { *m = LinuxUser{} } +func (m *LinuxUser) String() string { return proto.CompactTextString(m) } +func (*LinuxUser) ProtoMessage() {} +func (*LinuxUser) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{31} } func (m *LinuxUser) GetUid() int64 { if m != nil && m.Uid != nil { @@ -1290,9 +1308,46 @@ func (m *LinuxUser) GetAdditionalGids() []int64 { return nil } -type ContainerConfig struct { - // Name of the container. +// ContainerMetadata holds all necessary information for building the container +// name. The container runtime is encouraged to expose the metadata in its user +// interface for better user experience. E.g., runtime can construct a unique +// container name based on the metadata. Note that (name, attempt) is unique +// within a sandbox for the entire lifetime of the sandbox. +type ContainerMetadata struct { + // The name of the container. Same as the container name in the PodSpec. Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The attempt number of creating the container. + Attempt *uint32 `protobuf:"varint,2,opt,name=attempt" json:"attempt,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ContainerMetadata) Reset() { *m = ContainerMetadata{} } +func (m *ContainerMetadata) String() string { return proto.CompactTextString(m) } +func (*ContainerMetadata) ProtoMessage() {} +func (*ContainerMetadata) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{32} } + +func (m *ContainerMetadata) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ContainerMetadata) GetAttempt() uint32 { + if m != nil && m.Attempt != nil { + return *m.Attempt + } + return 0 +} + +// ContainerConfig holds all the required and optional fields for creating a +// container. +type ContainerConfig struct { + // The metadata of the container. This information will uniquely identify + // the container, and the runtime should leverage this to ensure correct + // operation. The runtime may also use this information to improve UX, such + // as by constructing a readable name. + Metadata *ContainerMetadata `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` // Image to use. Image *ImageSpec `protobuf:"bytes,2,opt,name=image" json:"image,omitempty"` // Command to execute (i.e., entrypoint for docker) @@ -1300,7 +1355,7 @@ type ContainerConfig struct { // Args for the Command (i.e., command for docker) Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` // Current working directory of the command. - WorkingDir *string `protobuf:"bytes,5,opt,name=working_dir" json:"working_dir,omitempty"` + WorkingDir *string `protobuf:"bytes,5,opt,name=working_dir,json=workingDir" json:"working_dir,omitempty"` // List of environment variable to set in the container Envs []*KeyValue `protobuf:"bytes,6,rep,name=envs" json:"envs,omitempty"` // Mounts specifies mounts for the container @@ -1319,7 +1374,7 @@ type ContainerConfig struct { // Processes in privileged containers are essentially equivalent to root on the host. Privileged *bool `protobuf:"varint,10,opt,name=privileged" json:"privileged,omitempty"` // If set, the root filesystem of the container is read-only. - ReadonlyRootfs *bool `protobuf:"varint,11,opt,name=readonly_rootfs" json:"readonly_rootfs,omitempty"` + ReadonlyRootfs *bool `protobuf:"varint,11,opt,name=readonly_rootfs,json=readonlyRootfs" json:"readonly_rootfs,omitempty"` // Path relative to PodSandboxConfig.LogDirectory for container to store // the log (STDOUT and STDERR) on the host. // E.g., @@ -1330,28 +1385,29 @@ type ContainerConfig struct { // container logs are under active discussion in // https://issues.k8s.io/24677. There *may* be future change of direction // for logging as the discussion carries on. - LogPath *string `protobuf:"bytes,12,opt,name=log_path" json:"log_path,omitempty"` + LogPath *string `protobuf:"bytes,12,opt,name=log_path,json=logPath" json:"log_path,omitempty"` // Variables for interactive containers, these have very specialized // use-cases (e.g. debugging). // TODO: Determine if we need to continue supporting these fields that are // part of Kubernetes's Container Spec. Stdin *bool `protobuf:"varint,13,opt,name=stdin" json:"stdin,omitempty"` - StdinOnce *bool `protobuf:"varint,14,opt,name=stdin_once" json:"stdin_once,omitempty"` + StdinOnce *bool `protobuf:"varint,14,opt,name=stdin_once,json=stdinOnce" json:"stdin_once,omitempty"` Tty *bool `protobuf:"varint,15,opt,name=tty" json:"tty,omitempty"` // Linux contains configuration specific to Linux containers. Linux *LinuxContainerConfig `protobuf:"bytes,16,opt,name=linux" json:"linux,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *ContainerConfig) Reset() { *m = ContainerConfig{} } -func (m *ContainerConfig) String() string { return proto.CompactTextString(m) } -func (*ContainerConfig) ProtoMessage() {} +func (m *ContainerConfig) Reset() { *m = ContainerConfig{} } +func (m *ContainerConfig) String() string { return proto.CompactTextString(m) } +func (*ContainerConfig) ProtoMessage() {} +func (*ContainerConfig) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{33} } -func (m *ContainerConfig) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (m *ContainerConfig) GetMetadata() *ContainerMetadata { + if m != nil { + return m.Metadata } - return "" + return nil } func (m *ContainerConfig) GetImage() *ImageSpec { @@ -1461,17 +1517,18 @@ func (m *ContainerConfig) GetLinux() *LinuxContainerConfig { type CreateContainerRequest struct { // The id of the PodSandbox - PodSandboxId *string `protobuf:"bytes,1,opt,name=pod_sandbox_id" json:"pod_sandbox_id,omitempty"` + PodSandboxId *string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId" json:"pod_sandbox_id,omitempty"` // The config of the container Config *ContainerConfig `protobuf:"bytes,2,opt,name=config" json:"config,omitempty"` // The config of the PodSandbox - SandboxConfig *PodSandboxConfig `protobuf:"bytes,3,opt,name=sandbox_config" json:"sandbox_config,omitempty"` + SandboxConfig *PodSandboxConfig `protobuf:"bytes,3,opt,name=sandbox_config,json=sandboxConfig" json:"sandbox_config,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } -func (m *CreateContainerRequest) String() string { return proto.CompactTextString(m) } -func (*CreateContainerRequest) ProtoMessage() {} +func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } +func (m *CreateContainerRequest) String() string { return proto.CompactTextString(m) } +func (*CreateContainerRequest) ProtoMessage() {} +func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{34} } func (m *CreateContainerRequest) GetPodSandboxId() string { if m != nil && m.PodSandboxId != nil { @@ -1496,13 +1553,14 @@ func (m *CreateContainerRequest) GetSandboxConfig() *PodSandboxConfig { type CreateContainerResponse struct { // The id of the created container - ContainerId *string `protobuf:"bytes,1,opt,name=container_id" json:"container_id,omitempty"` + ContainerId *string `protobuf:"bytes,1,opt,name=container_id,json=containerId" json:"container_id,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} } -func (m *CreateContainerResponse) String() string { return proto.CompactTextString(m) } -func (*CreateContainerResponse) ProtoMessage() {} +func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} } +func (m *CreateContainerResponse) String() string { return proto.CompactTextString(m) } +func (*CreateContainerResponse) ProtoMessage() {} +func (*CreateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{35} } func (m *CreateContainerResponse) GetContainerId() string { if m != nil && m.ContainerId != nil { @@ -1513,13 +1571,14 @@ func (m *CreateContainerResponse) GetContainerId() string { type StartContainerRequest struct { // The id of the container - ContainerId *string `protobuf:"bytes,1,opt,name=container_id" json:"container_id,omitempty"` + ContainerId *string `protobuf:"bytes,1,opt,name=container_id,json=containerId" json:"container_id,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *StartContainerRequest) Reset() { *m = StartContainerRequest{} } -func (m *StartContainerRequest) String() string { return proto.CompactTextString(m) } -func (*StartContainerRequest) ProtoMessage() {} +func (m *StartContainerRequest) Reset() { *m = StartContainerRequest{} } +func (m *StartContainerRequest) String() string { return proto.CompactTextString(m) } +func (*StartContainerRequest) ProtoMessage() {} +func (*StartContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{36} } func (m *StartContainerRequest) GetContainerId() string { if m != nil && m.ContainerId != nil { @@ -1532,21 +1591,23 @@ type StartContainerResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *StartContainerResponse) Reset() { *m = StartContainerResponse{} } -func (m *StartContainerResponse) String() string { return proto.CompactTextString(m) } -func (*StartContainerResponse) ProtoMessage() {} +func (m *StartContainerResponse) Reset() { *m = StartContainerResponse{} } +func (m *StartContainerResponse) String() string { return proto.CompactTextString(m) } +func (*StartContainerResponse) ProtoMessage() {} +func (*StartContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{37} } type StopContainerRequest struct { // The id of the container - ContainerId *string `protobuf:"bytes,1,opt,name=container_id" json:"container_id,omitempty"` + ContainerId *string `protobuf:"bytes,1,opt,name=container_id,json=containerId" json:"container_id,omitempty"` // Timeout in seconds to stop the container Timeout *int64 `protobuf:"varint,2,opt,name=timeout" json:"timeout,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *StopContainerRequest) Reset() { *m = StopContainerRequest{} } -func (m *StopContainerRequest) String() string { return proto.CompactTextString(m) } -func (*StopContainerRequest) ProtoMessage() {} +func (m *StopContainerRequest) Reset() { *m = StopContainerRequest{} } +func (m *StopContainerRequest) String() string { return proto.CompactTextString(m) } +func (*StopContainerRequest) ProtoMessage() {} +func (*StopContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{38} } func (m *StopContainerRequest) GetContainerId() string { if m != nil && m.ContainerId != nil { @@ -1566,19 +1627,21 @@ type StopContainerResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *StopContainerResponse) Reset() { *m = StopContainerResponse{} } -func (m *StopContainerResponse) String() string { return proto.CompactTextString(m) } -func (*StopContainerResponse) ProtoMessage() {} +func (m *StopContainerResponse) Reset() { *m = StopContainerResponse{} } +func (m *StopContainerResponse) String() string { return proto.CompactTextString(m) } +func (*StopContainerResponse) ProtoMessage() {} +func (*StopContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{39} } type RemoveContainerRequest struct { // The id of the container - ContainerId *string `protobuf:"bytes,1,opt,name=container_id" json:"container_id,omitempty"` + ContainerId *string `protobuf:"bytes,1,opt,name=container_id,json=containerId" json:"container_id,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *RemoveContainerRequest) Reset() { *m = RemoveContainerRequest{} } -func (m *RemoveContainerRequest) String() string { return proto.CompactTextString(m) } -func (*RemoveContainerRequest) ProtoMessage() {} +func (m *RemoveContainerRequest) Reset() { *m = RemoveContainerRequest{} } +func (m *RemoveContainerRequest) String() string { return proto.CompactTextString(m) } +func (*RemoveContainerRequest) ProtoMessage() {} +func (*RemoveContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{40} } func (m *RemoveContainerRequest) GetContainerId() string { if m != nil && m.ContainerId != nil { @@ -1591,9 +1654,10 @@ type RemoveContainerResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *RemoveContainerResponse) Reset() { *m = RemoveContainerResponse{} } -func (m *RemoveContainerResponse) String() string { return proto.CompactTextString(m) } -func (*RemoveContainerResponse) ProtoMessage() {} +func (m *RemoveContainerResponse) Reset() { *m = RemoveContainerResponse{} } +func (m *RemoveContainerResponse) String() string { return proto.CompactTextString(m) } +func (*RemoveContainerResponse) ProtoMessage() {} +func (*RemoveContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{41} } // ContainerFilter is used to filter containers. // All those fields are combined with 'AND' @@ -1602,20 +1666,21 @@ type ContainerFilter struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // ID of the container. Id *string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` - // State of the contianer. + // State of the container. State *ContainerState `protobuf:"varint,3,opt,name=state,enum=runtime.ContainerState" json:"state,omitempty"` // The id of the pod sandbox - PodSandboxId *string `protobuf:"bytes,4,opt,name=pod_sandbox_id" json:"pod_sandbox_id,omitempty"` + PodSandboxId *string `protobuf:"bytes,4,opt,name=pod_sandbox_id,json=podSandboxId" json:"pod_sandbox_id,omitempty"` // LabelSelector to select matches. // Only api.MatchLabels is supported for now and the requirements // are ANDed. MatchExpressions is not supported yet. - LabelSelector map[string]string `protobuf:"bytes,5,rep,name=label_selector" json:"label_selector,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + LabelSelector map[string]string `protobuf:"bytes,5,rep,name=label_selector,json=labelSelector" json:"label_selector,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_unrecognized []byte `json:"-"` } -func (m *ContainerFilter) Reset() { *m = ContainerFilter{} } -func (m *ContainerFilter) String() string { return proto.CompactTextString(m) } -func (*ContainerFilter) ProtoMessage() {} +func (m *ContainerFilter) Reset() { *m = ContainerFilter{} } +func (m *ContainerFilter) String() string { return proto.CompactTextString(m) } +func (*ContainerFilter) ProtoMessage() {} +func (*ContainerFilter) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{42} } func (m *ContainerFilter) GetName() string { if m != nil && m.Name != nil { @@ -1657,9 +1722,10 @@ type ListContainersRequest struct { XXX_unrecognized []byte `json:"-"` } -func (m *ListContainersRequest) Reset() { *m = ListContainersRequest{} } -func (m *ListContainersRequest) String() string { return proto.CompactTextString(m) } -func (*ListContainersRequest) ProtoMessage() {} +func (m *ListContainersRequest) Reset() { *m = ListContainersRequest{} } +func (m *ListContainersRequest) String() string { return proto.CompactTextString(m) } +func (*ListContainersRequest) ProtoMessage() {} +func (*ListContainersRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{43} } func (m *ListContainersRequest) GetFilter() *ContainerFilter { if m != nil { @@ -1674,23 +1740,27 @@ type Container struct { // The ID of the container, used by the container runtime to identify // a container. Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - // The name of the container - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // The metadata of the container. + Metadata *ContainerMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata,omitempty"` // The spec of the image Image *ImageSpec `protobuf:"bytes,3,opt,name=image" json:"image,omitempty"` // Reference to the image in use. For most runtimes, this should be an // image ID. - ImageRef *string `protobuf:"bytes,4,opt,name=image_ref" json:"image_ref,omitempty"` + ImageRef *string `protobuf:"bytes,4,opt,name=image_ref,json=imageRef" json:"image_ref,omitempty"` // State is the state of the container. State *ContainerState `protobuf:"varint,5,opt,name=state,enum=runtime.ContainerState" json:"state,omitempty"` // Labels are key value pairs that may be used to scope and select individual resources. - Labels map[string]string `protobuf:"bytes,6,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Labels map[string]string `protobuf:"bytes,6,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Annotations is an unstructured key value map that may be set by external + // tools to store and retrieve arbitrary metadata. + Annotations map[string]string `protobuf:"bytes,7,rep,name=annotations" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_unrecognized []byte `json:"-"` } -func (m *Container) Reset() { *m = Container{} } -func (m *Container) String() string { return proto.CompactTextString(m) } -func (*Container) ProtoMessage() {} +func (m *Container) Reset() { *m = Container{} } +func (m *Container) String() string { return proto.CompactTextString(m) } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{44} } func (m *Container) GetId() string { if m != nil && m.Id != nil { @@ -1699,11 +1769,11 @@ func (m *Container) GetId() string { return "" } -func (m *Container) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (m *Container) GetMetadata() *ContainerMetadata { + if m != nil { + return m.Metadata } - return "" + return nil } func (m *Container) GetImage() *ImageSpec { @@ -1734,15 +1804,23 @@ func (m *Container) GetLabels() map[string]string { return nil } +func (m *Container) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + type ListContainersResponse struct { // List of containers Containers []*Container `protobuf:"bytes,1,rep,name=containers" json:"containers,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *ListContainersResponse) Reset() { *m = ListContainersResponse{} } -func (m *ListContainersResponse) String() string { return proto.CompactTextString(m) } -func (*ListContainersResponse) ProtoMessage() {} +func (m *ListContainersResponse) Reset() { *m = ListContainersResponse{} } +func (m *ListContainersResponse) String() string { return proto.CompactTextString(m) } +func (*ListContainersResponse) ProtoMessage() {} +func (*ListContainersResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{45} } func (m *ListContainersResponse) GetContainers() []*Container { if m != nil { @@ -1753,13 +1831,14 @@ func (m *ListContainersResponse) GetContainers() []*Container { type ContainerStatusRequest struct { // The id of the container - ContainerId *string `protobuf:"bytes,1,opt,name=container_id" json:"container_id,omitempty"` + ContainerId *string `protobuf:"bytes,1,opt,name=container_id,json=containerId" json:"container_id,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *ContainerStatusRequest) Reset() { *m = ContainerStatusRequest{} } -func (m *ContainerStatusRequest) String() string { return proto.CompactTextString(m) } -func (*ContainerStatusRequest) ProtoMessage() {} +func (m *ContainerStatusRequest) Reset() { *m = ContainerStatusRequest{} } +func (m *ContainerStatusRequest) String() string { return proto.CompactTextString(m) } +func (*ContainerStatusRequest) ProtoMessage() {} +func (*ContainerStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{46} } func (m *ContainerStatusRequest) GetContainerId() string { if m != nil && m.ContainerId != nil { @@ -1772,23 +1851,23 @@ func (m *ContainerStatusRequest) GetContainerId() string { type ContainerStatus struct { // ID of the container. Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - // Name of the container. - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // Metadata of the container. + Metadata *ContainerMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata,omitempty"` // Status of the container. State *ContainerState `protobuf:"varint,3,opt,name=state,enum=runtime.ContainerState" json:"state,omitempty"` // Creation time of the container. - CreatedAt *int64 `protobuf:"varint,4,opt,name=created_at" json:"created_at,omitempty"` + CreatedAt *int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` // Start time of the container. - StartedAt *int64 `protobuf:"varint,5,opt,name=started_at" json:"started_at,omitempty"` + StartedAt *int64 `protobuf:"varint,5,opt,name=started_at,json=startedAt" json:"started_at,omitempty"` // Finish time of the container. - FinishedAt *int64 `protobuf:"varint,6,opt,name=finished_at" json:"finished_at,omitempty"` + FinishedAt *int64 `protobuf:"varint,6,opt,name=finished_at,json=finishedAt" json:"finished_at,omitempty"` // Exit code of the container. - ExitCode *int32 `protobuf:"varint,7,opt,name=exit_code" json:"exit_code,omitempty"` + ExitCode *int32 `protobuf:"varint,7,opt,name=exit_code,json=exitCode" json:"exit_code,omitempty"` // The spec of the image Image *ImageSpec `protobuf:"bytes,8,opt,name=image" json:"image,omitempty"` // Reference to the image in use. For most runtimes, this should be an // image ID - ImageRef *string `protobuf:"bytes,9,opt,name=image_ref" json:"image_ref,omitempty"` + ImageRef *string `protobuf:"bytes,9,opt,name=image_ref,json=imageRef" json:"image_ref,omitempty"` // A string explains why container is in such a status. Reason *string `protobuf:"bytes,10,opt,name=reason" json:"reason,omitempty"` // Labels are key value pairs that may be used to scope and select individual resources. @@ -1800,9 +1879,10 @@ type ContainerStatus struct { XXX_unrecognized []byte `json:"-"` } -func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } -func (m *ContainerStatus) String() string { return proto.CompactTextString(m) } -func (*ContainerStatus) ProtoMessage() {} +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (m *ContainerStatus) String() string { return proto.CompactTextString(m) } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{47} } func (m *ContainerStatus) GetId() string { if m != nil && m.Id != nil { @@ -1811,11 +1891,11 @@ func (m *ContainerStatus) GetId() string { return "" } -func (m *ContainerStatus) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (m *ContainerStatus) GetMetadata() *ContainerMetadata { + if m != nil { + return m.Metadata } - return "" + return nil } func (m *ContainerStatus) GetState() ContainerState { @@ -1901,9 +1981,10 @@ type ContainerStatusResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *ContainerStatusResponse) Reset() { *m = ContainerStatusResponse{} } -func (m *ContainerStatusResponse) String() string { return proto.CompactTextString(m) } -func (*ContainerStatusResponse) ProtoMessage() {} +func (m *ContainerStatusResponse) Reset() { *m = ContainerStatusResponse{} } +func (m *ContainerStatusResponse) String() string { return proto.CompactTextString(m) } +func (*ContainerStatusResponse) ProtoMessage() {} +func (*ContainerStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{48} } func (m *ContainerStatusResponse) GetStatus() *ContainerStatus { if m != nil { @@ -1914,7 +1995,7 @@ func (m *ContainerStatusResponse) GetStatus() *ContainerStatus { type ExecRequest struct { // The id of the container - ContainerId *string `protobuf:"bytes,1,opt,name=container_id" json:"container_id,omitempty"` + ContainerId *string `protobuf:"bytes,1,opt,name=container_id,json=containerId" json:"container_id,omitempty"` // The cmd to execute Cmd []string `protobuf:"bytes,2,rep,name=cmd" json:"cmd,omitempty"` // Whether use tty @@ -1924,9 +2005,10 @@ type ExecRequest struct { XXX_unrecognized []byte `json:"-"` } -func (m *ExecRequest) Reset() { *m = ExecRequest{} } -func (m *ExecRequest) String() string { return proto.CompactTextString(m) } -func (*ExecRequest) ProtoMessage() {} +func (m *ExecRequest) Reset() { *m = ExecRequest{} } +func (m *ExecRequest) String() string { return proto.CompactTextString(m) } +func (*ExecRequest) ProtoMessage() {} +func (*ExecRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{49} } func (m *ExecRequest) GetContainerId() string { if m != nil && m.ContainerId != nil { @@ -1964,9 +2046,10 @@ type ExecResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *ExecResponse) Reset() { *m = ExecResponse{} } -func (m *ExecResponse) String() string { return proto.CompactTextString(m) } -func (*ExecResponse) ProtoMessage() {} +func (m *ExecResponse) Reset() { *m = ExecResponse{} } +func (m *ExecResponse) String() string { return proto.CompactTextString(m) } +func (*ExecResponse) ProtoMessage() {} +func (*ExecResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{50} } func (m *ExecResponse) GetStdout() []byte { if m != nil { @@ -1988,9 +2071,10 @@ type ImageFilter struct { XXX_unrecognized []byte `json:"-"` } -func (m *ImageFilter) Reset() { *m = ImageFilter{} } -func (m *ImageFilter) String() string { return proto.CompactTextString(m) } -func (*ImageFilter) ProtoMessage() {} +func (m *ImageFilter) Reset() { *m = ImageFilter{} } +func (m *ImageFilter) String() string { return proto.CompactTextString(m) } +func (*ImageFilter) ProtoMessage() {} +func (*ImageFilter) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{51} } func (m *ImageFilter) GetImage() *ImageSpec { if m != nil { @@ -2005,9 +2089,10 @@ type ListImagesRequest struct { XXX_unrecognized []byte `json:"-"` } -func (m *ListImagesRequest) Reset() { *m = ListImagesRequest{} } -func (m *ListImagesRequest) String() string { return proto.CompactTextString(m) } -func (*ListImagesRequest) ProtoMessage() {} +func (m *ListImagesRequest) Reset() { *m = ListImagesRequest{} } +func (m *ListImagesRequest) String() string { return proto.CompactTextString(m) } +func (*ListImagesRequest) ProtoMessage() {} +func (*ListImagesRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{52} } func (m *ListImagesRequest) GetFilter() *ImageFilter { if m != nil { @@ -2021,17 +2106,18 @@ type Image struct { // ID of the image. Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` // Other names by which this image is known. - RepoTags []string `protobuf:"bytes,2,rep,name=repo_tags" json:"repo_tags,omitempty"` + RepoTags []string `protobuf:"bytes,2,rep,name=repo_tags,json=repoTags" json:"repo_tags,omitempty"` // Digests by which this image is known. - RepoDigests []string `protobuf:"bytes,3,rep,name=repo_digests" json:"repo_digests,omitempty"` + RepoDigests []string `protobuf:"bytes,3,rep,name=repo_digests,json=repoDigests" json:"repo_digests,omitempty"` // The size of the image in bytes. Size_ *uint64 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *Image) Reset() { *m = Image{} } -func (m *Image) String() string { return proto.CompactTextString(m) } -func (*Image) ProtoMessage() {} +func (m *Image) Reset() { *m = Image{} } +func (m *Image) String() string { return proto.CompactTextString(m) } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{53} } func (m *Image) GetId() string { if m != nil && m.Id != nil { @@ -2067,9 +2153,10 @@ type ListImagesResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *ListImagesResponse) Reset() { *m = ListImagesResponse{} } -func (m *ListImagesResponse) String() string { return proto.CompactTextString(m) } -func (*ListImagesResponse) ProtoMessage() {} +func (m *ListImagesResponse) Reset() { *m = ListImagesResponse{} } +func (m *ListImagesResponse) String() string { return proto.CompactTextString(m) } +func (*ListImagesResponse) ProtoMessage() {} +func (*ListImagesResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{54} } func (m *ListImagesResponse) GetImages() []*Image { if m != nil { @@ -2084,9 +2171,10 @@ type ImageStatusRequest struct { XXX_unrecognized []byte `json:"-"` } -func (m *ImageStatusRequest) Reset() { *m = ImageStatusRequest{} } -func (m *ImageStatusRequest) String() string { return proto.CompactTextString(m) } -func (*ImageStatusRequest) ProtoMessage() {} +func (m *ImageStatusRequest) Reset() { *m = ImageStatusRequest{} } +func (m *ImageStatusRequest) String() string { return proto.CompactTextString(m) } +func (*ImageStatusRequest) ProtoMessage() {} +func (*ImageStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{55} } func (m *ImageStatusRequest) GetImage() *ImageSpec { if m != nil { @@ -2101,9 +2189,10 @@ type ImageStatusResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *ImageStatusResponse) Reset() { *m = ImageStatusResponse{} } -func (m *ImageStatusResponse) String() string { return proto.CompactTextString(m) } -func (*ImageStatusResponse) ProtoMessage() {} +func (m *ImageStatusResponse) Reset() { *m = ImageStatusResponse{} } +func (m *ImageStatusResponse) String() string { return proto.CompactTextString(m) } +func (*ImageStatusResponse) ProtoMessage() {} +func (*ImageStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{56} } func (m *ImageStatusResponse) GetImage() *Image { if m != nil { @@ -2117,18 +2206,19 @@ type AuthConfig struct { Username *string `protobuf:"bytes,1,opt,name=username" json:"username,omitempty"` Password *string `protobuf:"bytes,2,opt,name=password" json:"password,omitempty"` Auth *string `protobuf:"bytes,3,opt,name=auth" json:"auth,omitempty"` - ServerAddress *string `protobuf:"bytes,4,opt,name=server_address" json:"server_address,omitempty"` + ServerAddress *string `protobuf:"bytes,4,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"` // IdentityToken is used to authenticate the user and get // an access token for the registry. - IdentityToken *string `protobuf:"bytes,5,opt,name=identity_token" json:"identity_token,omitempty"` + IdentityToken *string `protobuf:"bytes,5,opt,name=identity_token,json=identityToken" json:"identity_token,omitempty"` // RegistryToken is a bearer token to be sent to a registry - RegistryToken *string `protobuf:"bytes,6,opt,name=registry_token" json:"registry_token,omitempty"` + RegistryToken *string `protobuf:"bytes,6,opt,name=registry_token,json=registryToken" json:"registry_token,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *AuthConfig) Reset() { *m = AuthConfig{} } -func (m *AuthConfig) String() string { return proto.CompactTextString(m) } -func (*AuthConfig) ProtoMessage() {} +func (m *AuthConfig) Reset() { *m = AuthConfig{} } +func (m *AuthConfig) String() string { return proto.CompactTextString(m) } +func (*AuthConfig) ProtoMessage() {} +func (*AuthConfig) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{57} } func (m *AuthConfig) GetUsername() string { if m != nil && m.Username != nil { @@ -2178,13 +2268,14 @@ type PullImageRequest struct { // The auth config for pulling image Auth *AuthConfig `protobuf:"bytes,2,opt,name=auth" json:"auth,omitempty"` // The config of the PodSandbox, which is used to pull image in PodSandbox context - SandboxConfig *PodSandboxConfig `protobuf:"bytes,3,opt,name=sandbox_config" json:"sandbox_config,omitempty"` + SandboxConfig *PodSandboxConfig `protobuf:"bytes,3,opt,name=sandbox_config,json=sandboxConfig" json:"sandbox_config,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *PullImageRequest) Reset() { *m = PullImageRequest{} } -func (m *PullImageRequest) String() string { return proto.CompactTextString(m) } -func (*PullImageRequest) ProtoMessage() {} +func (m *PullImageRequest) Reset() { *m = PullImageRequest{} } +func (m *PullImageRequest) String() string { return proto.CompactTextString(m) } +func (*PullImageRequest) ProtoMessage() {} +func (*PullImageRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{58} } func (m *PullImageRequest) GetImage() *ImageSpec { if m != nil { @@ -2211,9 +2302,10 @@ type PullImageResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *PullImageResponse) Reset() { *m = PullImageResponse{} } -func (m *PullImageResponse) String() string { return proto.CompactTextString(m) } -func (*PullImageResponse) ProtoMessage() {} +func (m *PullImageResponse) Reset() { *m = PullImageResponse{} } +func (m *PullImageResponse) String() string { return proto.CompactTextString(m) } +func (*PullImageResponse) ProtoMessage() {} +func (*PullImageResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{59} } type RemoveImageRequest struct { // The spec of the image @@ -2221,9 +2313,10 @@ type RemoveImageRequest struct { XXX_unrecognized []byte `json:"-"` } -func (m *RemoveImageRequest) Reset() { *m = RemoveImageRequest{} } -func (m *RemoveImageRequest) String() string { return proto.CompactTextString(m) } -func (*RemoveImageRequest) ProtoMessage() {} +func (m *RemoveImageRequest) Reset() { *m = RemoveImageRequest{} } +func (m *RemoveImageRequest) String() string { return proto.CompactTextString(m) } +func (*RemoveImageRequest) ProtoMessage() {} +func (*RemoveImageRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{60} } func (m *RemoveImageRequest) GetImage() *ImageSpec { if m != nil { @@ -2236,9 +2329,10 @@ type RemoveImageResponse struct { XXX_unrecognized []byte `json:"-"` } -func (m *RemoveImageResponse) Reset() { *m = RemoveImageResponse{} } -func (m *RemoveImageResponse) String() string { return proto.CompactTextString(m) } -func (*RemoveImageResponse) ProtoMessage() {} +func (m *RemoveImageResponse) Reset() { *m = RemoveImageResponse{} } +func (m *RemoveImageResponse) String() string { return proto.CompactTextString(m) } +func (*RemoveImageResponse) ProtoMessage() {} +func (*RemoveImageResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{61} } func init() { proto.RegisterType((*VersionRequest)(nil), "runtime.VersionRequest") @@ -2246,10 +2340,9 @@ func init() { proto.RegisterType((*DNSOption)(nil), "runtime.DNSOption") proto.RegisterType((*PortMapping)(nil), "runtime.PortMapping") proto.RegisterType((*Mount)(nil), "runtime.Mount") - proto.RegisterType((*ResourceRequirements)(nil), "runtime.ResourceRequirements") - proto.RegisterType((*PodSandboxResources)(nil), "runtime.PodSandboxResources") proto.RegisterType((*NamespaceOption)(nil), "runtime.NamespaceOption") proto.RegisterType((*LinuxPodSandboxConfig)(nil), "runtime.LinuxPodSandboxConfig") + proto.RegisterType((*PodSandboxMetadata)(nil), "runtime.PodSandboxMetadata") proto.RegisterType((*PodSandboxConfig)(nil), "runtime.PodSandboxConfig") proto.RegisterType((*CreatePodSandboxRequest)(nil), "runtime.CreatePodSandboxRequest") proto.RegisterType((*CreatePodSandboxResponse)(nil), "runtime.CreatePodSandboxResponse") @@ -2274,6 +2367,7 @@ func init() { proto.RegisterType((*Capability)(nil), "runtime.Capability") proto.RegisterType((*LinuxContainerConfig)(nil), "runtime.LinuxContainerConfig") proto.RegisterType((*LinuxUser)(nil), "runtime.LinuxUser") + proto.RegisterType((*ContainerMetadata)(nil), "runtime.ContainerMetadata") proto.RegisterType((*ContainerConfig)(nil), "runtime.ContainerConfig") proto.RegisterType((*CreateContainerRequest)(nil), "runtime.CreateContainerRequest") proto.RegisterType((*CreateContainerResponse)(nil), "runtime.CreateContainerResponse") @@ -2312,6 +2406,10 @@ func init() { var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for RuntimeService service type RuntimeServiceClient interface { @@ -2537,148 +2635,220 @@ func RegisterRuntimeServiceServer(s *grpc.Server, srv RuntimeServiceServer) { s.RegisterService(&_RuntimeService_serviceDesc, srv) } -func _RuntimeService_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _RuntimeService_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VersionRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RuntimeServiceServer).Version(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RuntimeServiceServer).Version(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.RuntimeService/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).Version(ctx, req.(*VersionRequest)) + } + return interceptor(ctx, in, info, handler) } -func _RuntimeService_CreatePodSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _RuntimeService_CreatePodSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreatePodSandboxRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RuntimeServiceServer).CreatePodSandbox(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RuntimeServiceServer).CreatePodSandbox(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.RuntimeService/CreatePodSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).CreatePodSandbox(ctx, req.(*CreatePodSandboxRequest)) + } + return interceptor(ctx, in, info, handler) } -func _RuntimeService_StopPodSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _RuntimeService_StopPodSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StopPodSandboxRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RuntimeServiceServer).StopPodSandbox(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RuntimeServiceServer).StopPodSandbox(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.RuntimeService/StopPodSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).StopPodSandbox(ctx, req.(*StopPodSandboxRequest)) + } + return interceptor(ctx, in, info, handler) } -func _RuntimeService_RemovePodSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _RuntimeService_RemovePodSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RemovePodSandboxRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RuntimeServiceServer).RemovePodSandbox(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RuntimeServiceServer).RemovePodSandbox(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.RuntimeService/RemovePodSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).RemovePodSandbox(ctx, req.(*RemovePodSandboxRequest)) + } + return interceptor(ctx, in, info, handler) } -func _RuntimeService_PodSandboxStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _RuntimeService_PodSandboxStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PodSandboxStatusRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RuntimeServiceServer).PodSandboxStatus(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RuntimeServiceServer).PodSandboxStatus(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.RuntimeService/PodSandboxStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).PodSandboxStatus(ctx, req.(*PodSandboxStatusRequest)) + } + return interceptor(ctx, in, info, handler) } -func _RuntimeService_ListPodSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _RuntimeService_ListPodSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListPodSandboxRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RuntimeServiceServer).ListPodSandbox(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RuntimeServiceServer).ListPodSandbox(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.RuntimeService/ListPodSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ListPodSandbox(ctx, req.(*ListPodSandboxRequest)) + } + return interceptor(ctx, in, info, handler) } -func _RuntimeService_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _RuntimeService_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateContainerRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RuntimeServiceServer).CreateContainer(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RuntimeServiceServer).CreateContainer(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.RuntimeService/CreateContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).CreateContainer(ctx, req.(*CreateContainerRequest)) + } + return interceptor(ctx, in, info, handler) } -func _RuntimeService_StartContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _RuntimeService_StartContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StartContainerRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RuntimeServiceServer).StartContainer(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RuntimeServiceServer).StartContainer(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.RuntimeService/StartContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).StartContainer(ctx, req.(*StartContainerRequest)) + } + return interceptor(ctx, in, info, handler) } -func _RuntimeService_StopContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _RuntimeService_StopContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StopContainerRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RuntimeServiceServer).StopContainer(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RuntimeServiceServer).StopContainer(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.RuntimeService/StopContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).StopContainer(ctx, req.(*StopContainerRequest)) + } + return interceptor(ctx, in, info, handler) } -func _RuntimeService_RemoveContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _RuntimeService_RemoveContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RemoveContainerRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RuntimeServiceServer).RemoveContainer(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RuntimeServiceServer).RemoveContainer(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.RuntimeService/RemoveContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).RemoveContainer(ctx, req.(*RemoveContainerRequest)) + } + return interceptor(ctx, in, info, handler) } -func _RuntimeService_ListContainers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _RuntimeService_ListContainers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListContainersRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RuntimeServiceServer).ListContainers(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RuntimeServiceServer).ListContainers(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.RuntimeService/ListContainers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ListContainers(ctx, req.(*ListContainersRequest)) + } + return interceptor(ctx, in, info, handler) } -func _RuntimeService_ContainerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _RuntimeService_ContainerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ContainerStatusRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RuntimeServiceServer).ContainerStatus(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RuntimeServiceServer).ContainerStatus(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.RuntimeService/ContainerStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ContainerStatus(ctx, req.(*ContainerStatusRequest)) + } + return interceptor(ctx, in, info, handler) } func _RuntimeService_Exec_Handler(srv interface{}, stream grpc.ServerStream) error { @@ -2768,6 +2938,7 @@ var _RuntimeService_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, + Metadata: fileDescriptorApi, } // Client API for ImageService service @@ -2846,52 +3017,76 @@ func RegisterImageServiceServer(s *grpc.Server, srv ImageServiceServer) { s.RegisterService(&_ImageService_serviceDesc, srv) } -func _ImageService_ListImages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ImageService_ListImages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListImagesRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ImageServiceServer).ListImages(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ImageServiceServer).ListImages(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.ImageService/ListImages", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).ListImages(ctx, req.(*ListImagesRequest)) + } + return interceptor(ctx, in, info, handler) } -func _ImageService_ImageStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ImageService_ImageStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ImageStatusRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ImageServiceServer).ImageStatus(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ImageServiceServer).ImageStatus(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.ImageService/ImageStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).ImageStatus(ctx, req.(*ImageStatusRequest)) + } + return interceptor(ctx, in, info, handler) } -func _ImageService_PullImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ImageService_PullImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PullImageRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ImageServiceServer).PullImage(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ImageServiceServer).PullImage(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.ImageService/PullImage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).PullImage(ctx, req.(*PullImageRequest)) + } + return interceptor(ctx, in, info, handler) } -func _ImageService_RemoveImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _ImageService_RemoveImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RemoveImageRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(ImageServiceServer).RemoveImage(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(ImageServiceServer).RemoveImage(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.ImageService/RemoveImage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).RemoveImage(ctx, req.(*RemoveImageRequest)) + } + return interceptor(ctx, in, info, handler) } var _ImageService_serviceDesc = grpc.ServiceDesc{ @@ -2915,5 +3110,187 @@ var _ImageService_serviceDesc = grpc.ServiceDesc{ Handler: _ImageService_RemoveImage_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptorApi, +} + +var fileDescriptorApi = []byte{ + // 2829 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x5a, 0x4b, 0x73, 0x1b, 0xc7, + 0x11, 0x16, 0x08, 0x82, 0x04, 0x1a, 0x04, 0x08, 0x8e, 0x28, 0x12, 0x82, 0x5e, 0xd4, 0xda, 0x4e, + 0x24, 0xda, 0x66, 0x29, 0x4c, 0x4a, 0x8a, 0x64, 0x5b, 0x16, 0x4d, 0x32, 0x2a, 0x5a, 0x14, 0xc5, + 0x2c, 0x24, 0xc5, 0x3a, 0xa1, 0x56, 0xd8, 0x21, 0xb9, 0x12, 0xb0, 0xbb, 0xde, 0x5d, 0x30, 0x62, + 0xae, 0xf9, 0x05, 0xb9, 0xe5, 0x98, 0x83, 0xab, 0x72, 0xca, 0x25, 0x55, 0xb9, 0xe4, 0x9a, 0x5b, + 0x52, 0xf9, 0x3d, 0xc9, 0x25, 0x55, 0xe9, 0x79, 0xec, 0xec, 0xec, 0x03, 0x14, 0x21, 0x57, 0xd9, + 0xba, 0xed, 0x74, 0xf7, 0xcc, 0xf4, 0x74, 0xf7, 0xf4, 0xf4, 0x37, 0xb3, 0x50, 0xb3, 0x7c, 0x67, + 0xcd, 0x0f, 0xbc, 0xc8, 0x23, 0xb3, 0xc1, 0xc8, 0x8d, 0x9c, 0x21, 0x35, 0x56, 0xa1, 0xf9, 0x9c, + 0x06, 0xa1, 0xe3, 0xb9, 0x26, 0xfd, 0x76, 0x44, 0xc3, 0x88, 0xb4, 0x61, 0xf6, 0x58, 0x50, 0xda, + 0xa5, 0x95, 0xd2, 0x8d, 0x9a, 0x19, 0x37, 0x8d, 0x3f, 0x97, 0x60, 0x5e, 0x09, 0x87, 0xbe, 0xe7, + 0x86, 0x74, 0xbc, 0x34, 0xb9, 0x0e, 0x73, 0x72, 0x92, 0x9e, 0x6b, 0x0d, 0x69, 0x7b, 0x8a, 0xb3, + 0xeb, 0x92, 0xb6, 0x87, 0x24, 0xf2, 0x53, 0x98, 0x8f, 0x45, 0xe2, 0x41, 0xca, 0x5c, 0xaa, 0x29, + 0xc9, 0x72, 0x36, 0xb2, 0x06, 0xe7, 0x63, 0x41, 0x5c, 0x83, 0x12, 0x9e, 0xe6, 0xc2, 0x0b, 0x92, + 0xb5, 0xe1, 0x3b, 0x52, 0xde, 0xd8, 0x80, 0xda, 0xd6, 0x5e, 0xf7, 0x89, 0x1f, 0xb1, 0xce, 0xa8, + 0x62, 0x48, 0x03, 0xd6, 0x07, 0x55, 0x2c, 0x33, 0x15, 0x65, 0x93, 0x74, 0xa0, 0x1a, 0x52, 0x2b, + 0xe8, 0x1f, 0xd1, 0x10, 0xd5, 0x63, 0x2c, 0xd5, 0x36, 0xfe, 0x52, 0x82, 0xfa, 0xbe, 0x17, 0x44, + 0x8f, 0x2d, 0xdf, 0x77, 0xdc, 0x43, 0x42, 0x60, 0x9a, 0x2f, 0x43, 0xac, 0x92, 0x7f, 0x93, 0x4f, + 0xa1, 0xca, 0xcd, 0xd9, 0xf7, 0x06, 0x7c, 0x79, 0xcd, 0xf5, 0x85, 0x35, 0xa9, 0xcc, 0xda, 0xbe, + 0x64, 0x98, 0x4a, 0x84, 0x7c, 0x04, 0xcd, 0xbe, 0xe7, 0x46, 0x96, 0xe3, 0xd2, 0xa0, 0xe7, 0xe3, + 0xd8, 0x7c, 0xb5, 0x15, 0xb3, 0xa1, 0xa8, 0x6c, 0x42, 0x72, 0x09, 0x6a, 0x47, 0x5e, 0x18, 0x09, + 0x89, 0x69, 0x2e, 0x51, 0x65, 0x04, 0xce, 0x5c, 0x86, 0x59, 0xce, 0x74, 0xfc, 0x76, 0x85, 0x6b, + 0x32, 0xc3, 0x9a, 0x3b, 0xbe, 0xf1, 0x5d, 0x09, 0x2a, 0x8f, 0x3d, 0x9c, 0xbc, 0x50, 0xd3, 0xf4, + 0xd4, 0x56, 0x74, 0x24, 0xdd, 0xa1, 0x4d, 0x8d, 0xc4, 0x64, 0x6a, 0x26, 0x21, 0x5c, 0x21, 0xa6, + 0x66, 0x4c, 0xb4, 0x56, 0x40, 0x2d, 0xdb, 0x73, 0x07, 0x27, 0x5c, 0xad, 0xaa, 0xa9, 0xda, 0xcc, + 0x93, 0x21, 0x1d, 0x38, 0xee, 0xe8, 0x4d, 0x2f, 0xa0, 0x03, 0xeb, 0x25, 0x1d, 0x70, 0xf5, 0xaa, + 0x66, 0x53, 0x92, 0x4d, 0x41, 0x35, 0x5e, 0xc1, 0x3c, 0x73, 0x7d, 0xe8, 0x5b, 0x7d, 0x2a, 0xfd, + 0x83, 0x81, 0xc2, 0x27, 0x75, 0x69, 0xf4, 0x5b, 0x2f, 0x78, 0xcd, 0xf5, 0xae, 0x9a, 0x75, 0x46, + 0xdb, 0x13, 0x24, 0x72, 0x11, 0xaa, 0x42, 0x2f, 0xc7, 0xe6, 0x8a, 0x57, 0x4d, 0x6e, 0x85, 0x7d, + 0xc7, 0x56, 0x2c, 0xc7, 0xef, 0x73, 0x8d, 0x25, 0x6b, 0xc7, 0xef, 0x1b, 0xbf, 0x2f, 0xc1, 0x85, + 0x5d, 0x36, 0xf9, 0xbe, 0x67, 0x77, 0x2d, 0xd7, 0x7e, 0xe9, 0xbd, 0xd9, 0xf4, 0xdc, 0x03, 0xe7, + 0x90, 0x7c, 0x00, 0x8d, 0xfe, 0x61, 0xe0, 0x8d, 0x7c, 0x5c, 0x69, 0x40, 0xdd, 0x48, 0xda, 0x6a, + 0x4e, 0x10, 0xf7, 0x39, 0x8d, 0x6c, 0xc3, 0x82, 0x1b, 0xab, 0xda, 0xf3, 0xb8, 0xae, 0x21, 0x9f, + 0xbd, 0xbe, 0xde, 0x56, 0x6e, 0xce, 0x2c, 0xc6, 0x6c, 0xb9, 0x69, 0x42, 0x68, 0x04, 0x40, 0x92, + 0xf9, 0x1f, 0xd3, 0xc8, 0xb2, 0xad, 0xc8, 0x2a, 0x74, 0x52, 0x0b, 0xca, 0x23, 0xb9, 0xc0, 0x9a, + 0xc9, 0x3e, 0xc9, 0x65, 0xa8, 0xa9, 0xf1, 0xa4, 0x3f, 0x12, 0x02, 0x0b, 0x6c, 0x2b, 0x8a, 0xe8, + 0xd0, 0x17, 0x61, 0xd2, 0x30, 0xe3, 0xa6, 0xf1, 0xf7, 0x69, 0x68, 0xe5, 0x16, 0x7d, 0x07, 0xaa, + 0x43, 0x39, 0x3d, 0x9f, 0xb6, 0xbe, 0x7e, 0x29, 0x89, 0xd6, 0x9c, 0x86, 0xa6, 0x12, 0x66, 0x8e, + 0x67, 0x26, 0xd5, 0x76, 0xb1, 0x6a, 0x33, 0x4b, 0x0e, 0xbc, 0xc3, 0x9e, 0xed, 0x04, 0xb4, 0x1f, + 0x79, 0xc1, 0x89, 0xd4, 0x72, 0x0e, 0x89, 0x5b, 0x31, 0x8d, 0xfc, 0x1c, 0xea, 0xb6, 0x1b, 0x2a, + 0x1b, 0x4e, 0xf3, 0xc9, 0x89, 0x9a, 0x5c, 0x6d, 0x55, 0x13, 0x50, 0x4c, 0xda, 0x8d, 0xdc, 0x85, + 0x06, 0xdb, 0x01, 0xbd, 0xa1, 0xd8, 0x80, 0x21, 0x06, 0x54, 0x19, 0xbb, 0x2d, 0x6a, 0x3a, 0xab, + 0xdd, 0x69, 0xce, 0xf9, 0x49, 0x23, 0x24, 0x5f, 0xc0, 0x0c, 0x8f, 0xb6, 0xb0, 0x3d, 0xc3, 0xfb, + 0x7c, 0x54, 0xb0, 0x4e, 0x61, 0x94, 0xb5, 0x5d, 0x2e, 0xb7, 0xed, 0x46, 0xc1, 0x89, 0x29, 0x3b, + 0x91, 0x5d, 0xa8, 0x5b, 0xae, 0xeb, 0x45, 0x96, 0x50, 0x77, 0x96, 0x8f, 0xb1, 0x3a, 0x7e, 0x8c, + 0x8d, 0x44, 0x58, 0x0c, 0xa4, 0x77, 0x27, 0xbf, 0x80, 0x0a, 0xdf, 0x01, 0xed, 0x2a, 0x5f, 0xf6, + 0x55, 0x35, 0x4e, 0x61, 0x68, 0x9a, 0x42, 0xb8, 0x73, 0x17, 0xea, 0x9a, 0x6a, 0x2c, 0x34, 0x5e, + 0xd3, 0x13, 0x19, 0x2d, 0xec, 0x93, 0x2c, 0x42, 0xe5, 0xd8, 0x1a, 0x8c, 0x62, 0x8f, 0x88, 0xc6, + 0xbd, 0xa9, 0x5f, 0x96, 0x3a, 0xf7, 0xa1, 0x95, 0xd5, 0x68, 0x92, 0xfe, 0xc6, 0x2e, 0x2c, 0x6f, + 0xe2, 0xc6, 0x8e, 0x68, 0xa2, 0x5b, 0x7c, 0x36, 0xfc, 0x0c, 0x66, 0xfa, 0x5c, 0x4d, 0x19, 0x40, + 0x17, 0xc7, 0x1a, 0xc5, 0x94, 0x82, 0xc6, 0x03, 0x68, 0xe7, 0x47, 0x93, 0x87, 0xc7, 0x87, 0xd0, + 0xf4, 0x3d, 0xbb, 0x17, 0x0a, 0x72, 0x0f, 0x63, 0x5f, 0xee, 0x43, 0x5f, 0xc9, 0xee, 0xd8, 0xc6, + 0x17, 0x70, 0xa1, 0x1b, 0x79, 0x7e, 0x5e, 0x9b, 0xb3, 0x75, 0x6f, 0xc3, 0x52, 0xb6, 0xbb, 0x98, + 0xde, 0xf8, 0x12, 0x96, 0x4d, 0x3a, 0xf4, 0x8e, 0xe9, 0xbb, 0x0e, 0xdd, 0x81, 0x76, 0x7e, 0x80, + 0x64, 0xf0, 0x84, 0xda, 0x45, 0x67, 0x8c, 0xc2, 0xc9, 0x06, 0xbf, 0xa9, 0x0f, 0x20, 0x13, 0xa1, + 0x18, 0x87, 0x34, 0x61, 0x0a, 0xf3, 0xbf, 0xe8, 0x84, 0x5f, 0xc6, 0x0b, 0xa8, 0xed, 0xe9, 0x59, + 0x41, 0xcf, 0xa4, 0x78, 0xdc, 0xc9, 0x26, 0x59, 0x87, 0xd9, 0xb3, 0xa6, 0xb1, 0x58, 0xd0, 0x78, + 0x94, 0x4b, 0xa1, 0x52, 0x87, 0x75, 0x00, 0x95, 0x89, 0x42, 0x19, 0x0e, 0x24, 0x3f, 0x9e, 0xa9, + 0x49, 0x19, 0xdf, 0xa5, 0xd2, 0x92, 0xb6, 0x18, 0x5b, 0x2d, 0xc6, 0x4e, 0xa5, 0xa9, 0xa9, 0x49, + 0xd2, 0xd4, 0x1a, 0x54, 0x42, 0x1c, 0x52, 0x24, 0xca, 0xa6, 0xb6, 0x38, 0xd9, 0xeb, 0x2b, 0x31, + 0x25, 0x35, 0x85, 0x18, 0xb9, 0x02, 0xd0, 0xe7, 0x91, 0x69, 0xf7, 0x2c, 0x91, 0x41, 0xcb, 0x66, + 0x4d, 0x52, 0x36, 0x22, 0x72, 0x2f, 0xb1, 0x63, 0x85, 0xab, 0xb1, 0x52, 0xa0, 0x46, 0xca, 0x2f, + 0x89, 0xa5, 0xd5, 0x9e, 0x9f, 0x39, 0x7d, 0xcf, 0xcb, 0x7e, 0x42, 0x58, 0x4b, 0x5b, 0xb3, 0x63, + 0xd3, 0x96, 0xe8, 0x71, 0x96, 0xb4, 0x55, 0x1d, 0x9b, 0xb6, 0xe4, 0x18, 0xa7, 0xa6, 0xad, 0x1f, + 0x33, 0x01, 0x3d, 0x86, 0x76, 0x7e, 0xeb, 0xc8, 0x94, 0x81, 0x19, 0x28, 0xe4, 0x94, 0x53, 0x32, + 0x90, 0xec, 0x22, 0x05, 0x8d, 0xff, 0x95, 0xf4, 0xa8, 0xfb, 0x95, 0x33, 0x88, 0x68, 0x50, 0x78, + 0xfe, 0x8a, 0x48, 0x9c, 0x52, 0x91, 0x38, 0x69, 0x40, 0x75, 0xa1, 0xc9, 0x5d, 0xd1, 0xc3, 0x9a, + 0x87, 0x9f, 0x7c, 0x18, 0x54, 0xcc, 0x07, 0x9f, 0x14, 0xe8, 0x28, 0xd4, 0x10, 0x7e, 0xec, 0x4a, + 0x71, 0xe1, 0x85, 0xc6, 0x40, 0xa7, 0x75, 0x1e, 0x00, 0xc9, 0x0b, 0x4d, 0x64, 0xce, 0xaf, 0xd9, + 0x16, 0x66, 0xe5, 0x63, 0x41, 0x36, 0x3f, 0xe0, 0x6a, 0x9c, 0x62, 0x4b, 0xa1, 0xa7, 0x29, 0x05, + 0x8d, 0x3f, 0x4e, 0x01, 0x24, 0xcc, 0xf7, 0x76, 0xef, 0xde, 0x51, 0x3b, 0x49, 0x14, 0x0d, 0xd7, + 0x0a, 0xb4, 0x28, 0xda, 0x43, 0xdf, 0x23, 0xea, 0x8d, 0x4d, 0x58, 0xca, 0x9a, 0x59, 0xc6, 0xec, + 0x4d, 0xa8, 0x38, 0x58, 0x97, 0x09, 0xf8, 0x51, 0x5f, 0x3f, 0x5f, 0xa0, 0x8c, 0x29, 0x24, 0x8c, + 0xeb, 0x50, 0xdb, 0x19, 0x5a, 0x87, 0xb4, 0xeb, 0xd3, 0x3e, 0x9b, 0xcb, 0x61, 0x0d, 0x39, 0xbf, + 0x68, 0x18, 0xeb, 0x50, 0x7d, 0x44, 0x4f, 0x9e, 0xb3, 0x79, 0xcf, 0xaa, 0x9f, 0xf1, 0xaf, 0x12, + 0x2c, 0xf3, 0xd4, 0xb3, 0x19, 0x97, 0xfb, 0xa8, 0x9c, 0x37, 0x0a, 0x30, 0x29, 0x73, 0x53, 0xfa, + 0xa3, 0x9e, 0x4f, 0x03, 0xc7, 0x13, 0xbe, 0x64, 0xa6, 0xf4, 0x47, 0xfb, 0x9c, 0xc0, 0x20, 0x01, + 0x63, 0x7f, 0x3b, 0xf2, 0xa4, 0x4f, 0xcb, 0x66, 0x15, 0x09, 0xbf, 0x66, 0xed, 0xb8, 0x6f, 0x78, + 0x84, 0x15, 0x73, 0xc8, 0x7d, 0x27, 0xfa, 0x76, 0x39, 0x01, 0x03, 0xec, 0xc2, 0x10, 0xcf, 0xc7, + 0xe0, 0xa4, 0x37, 0x70, 0x86, 0x0e, 0xd6, 0xe8, 0x6e, 0xef, 0xe5, 0x49, 0x44, 0x43, 0xe9, 0x30, + 0x22, 0x98, 0xbb, 0x8c, 0xb7, 0xe3, 0x7e, 0xc5, 0x38, 0xc4, 0x80, 0x86, 0xe7, 0x0d, 0x7b, 0x61, + 0xdf, 0x0b, 0x10, 0xeb, 0xd9, 0xaf, 0x78, 0xee, 0x2d, 0x9b, 0x75, 0x24, 0x76, 0x19, 0x6d, 0xc3, + 0x7e, 0x65, 0x58, 0xd0, 0xe8, 0x6e, 0xf3, 0xe5, 0x48, 0x04, 0x81, 0x9b, 0x79, 0x14, 0xca, 0x30, + 0xc6, 0xcd, 0xcc, 0xbe, 0x19, 0x2d, 0xf0, 0x06, 0xb1, 0x1d, 0xf8, 0x37, 0xa3, 0x45, 0x27, 0x7e, + 0x5c, 0x49, 0xf3, 0x6f, 0x66, 0xb0, 0x01, 0x3d, 0x46, 0xbc, 0x22, 0xc0, 0xa4, 0x68, 0x18, 0x36, + 0xc0, 0xa6, 0xe5, 0x5b, 0x2f, 0x9d, 0x81, 0x13, 0x9d, 0xa0, 0x03, 0x5b, 0x96, 0x6d, 0xf7, 0xfa, + 0x31, 0xc5, 0xa1, 0x31, 0x94, 0x9c, 0x47, 0xfa, 0xa6, 0x46, 0x26, 0x1f, 0xc3, 0x82, 0x1d, 0x78, + 0x7e, 0x5a, 0x56, 0x60, 0xcb, 0x16, 0x63, 0xe8, 0xc2, 0xc6, 0x7f, 0x4b, 0xb0, 0x98, 0x76, 0x8b, + 0x2c, 0xd5, 0xef, 0x43, 0x2d, 0x88, 0x1d, 0x24, 0x37, 0xe7, 0x4a, 0xfa, 0x0c, 0xc9, 0x3b, 0xd2, + 0x4c, 0xba, 0x60, 0xfc, 0xcf, 0x65, 0x14, 0x28, 0xa5, 0x02, 0x2f, 0x59, 0x9b, 0x99, 0x12, 0x24, + 0x5f, 0x26, 0x38, 0x2e, 0x2e, 0x15, 0xca, 0xbc, 0xef, 0x92, 0xea, 0x9b, 0x32, 0xbd, 0xc2, 0x77, + 0x71, 0xd5, 0xfe, 0x13, 0xe9, 0x8a, 0x6c, 0x8d, 0xcf, 0xfb, 0x3c, 0x43, 0x8e, 0x70, 0x8f, 0xf1, + 0x0d, 0xd4, 0x14, 0x29, 0x06, 0x3e, 0x22, 0xf6, 0x38, 0xf0, 0x41, 0xca, 0xa1, 0xcc, 0xc5, 0x48, + 0xc1, 0x4f, 0x86, 0x30, 0xd1, 0xd6, 0x0e, 0x9b, 0xc5, 0x1a, 0xf4, 0x90, 0xc2, 0x34, 0x2b, 0x23, + 0xb7, 0x99, 0x90, 0x1f, 0x22, 0x15, 0xb1, 0xff, 0x82, 0x32, 0xce, 0xa9, 0x70, 0x4b, 0x83, 0x4f, + 0x53, 0x69, 0xf8, 0xf4, 0x9f, 0x0a, 0xcc, 0x67, 0x5d, 0x72, 0x3b, 0x87, 0x9e, 0x3a, 0x89, 0x39, + 0xb3, 0xf3, 0x69, 0x99, 0xed, 0x46, 0xbc, 0x89, 0xa7, 0x32, 0x16, 0x51, 0xfb, 0x5c, 0x6e, 0x6c, + 0xa6, 0x4f, 0xdf, 0x1b, 0x0e, 0x31, 0x21, 0xf0, 0x95, 0x61, 0xe1, 0x26, 0x9b, 0x4c, 0x7b, 0x2b, + 0x38, 0x0c, 0xf9, 0x71, 0x82, 0xda, 0xb3, 0x6f, 0x72, 0x0d, 0xea, 0xac, 0xd4, 0x40, 0xbc, 0xc3, + 0xc0, 0x97, 0xbc, 0x0c, 0x00, 0x49, 0x42, 0xe8, 0x85, 0x90, 0x7f, 0x9a, 0xba, 0xc7, 0x31, 0x04, + 0x4a, 0x2e, 0x26, 0xe2, 0xe4, 0x61, 0x72, 0x36, 0x3a, 0x6c, 0x66, 0xc8, 0xae, 0x0d, 0xe2, 0xa2, + 0xa3, 0xa9, 0x04, 0xf9, 0x6d, 0x82, 0x29, 0xb9, 0xe4, 0x73, 0x95, 0x52, 0x45, 0x61, 0xf1, 0x61, + 0x7e, 0xf5, 0xa7, 0x40, 0xaa, 0x47, 0xe9, 0xda, 0xa4, 0xc6, 0x87, 0xb8, 0x39, 0x76, 0x88, 0xd3, + 0x11, 0xd5, 0x55, 0x00, 0x3f, 0x70, 0x8e, 0x9d, 0x01, 0x3d, 0xa4, 0x76, 0x1b, 0x38, 0xe8, 0xd7, + 0x28, 0xfc, 0x5a, 0x49, 0x5e, 0x4c, 0xf4, 0x02, 0xcf, 0x8b, 0x0e, 0xc2, 0x76, 0x5d, 0x5c, 0x46, + 0xc4, 0x64, 0x93, 0x53, 0xd9, 0xdd, 0x01, 0x03, 0xaf, 0xfc, 0xb6, 0x63, 0x4e, 0xd4, 0xca, 0xd8, + 0xe6, 0x97, 0x1d, 0x8b, 0xec, 0x40, 0xb2, 0x1d, 0xb7, 0xdd, 0xe0, 0x3d, 0x45, 0x83, 0xe5, 0x3b, + 0xfe, 0xd1, 0xf3, 0x5c, 0x04, 0xe4, 0x4d, 0xce, 0xaa, 0x71, 0xca, 0x13, 0x24, 0xb0, 0xa8, 0x8d, + 0xa2, 0x93, 0xf6, 0x3c, 0xa7, 0xb3, 0x4f, 0x44, 0xbe, 0xb2, 0x10, 0x6c, 0x71, 0xef, 0x5f, 0x19, + 0xb3, 0x89, 0xdf, 0x1b, 0xec, 0xf7, 0xd7, 0x12, 0x2c, 0x09, 0xb8, 0xa6, 0x25, 0x98, 0x09, 0x50, + 0x0b, 0xb9, 0xa5, 0x10, 0x62, 0x16, 0x62, 0x64, 0x17, 0x2b, 0xe5, 0xc8, 0x03, 0x68, 0xc6, 0x63, + 0xca, 0x9e, 0xe5, 0xb7, 0x61, 0xcb, 0x46, 0xa8, 0x37, 0x8d, 0xcf, 0x63, 0xc0, 0xaa, 0x27, 0x45, + 0x71, 0xf4, 0x5e, 0xc7, 0x44, 0xa8, 0xee, 0xbd, 0x94, 0xca, 0x75, 0x45, 0x43, 0x9c, 0x75, 0x8f, + 0xc1, 0x4b, 0x2b, 0x88, 0x72, 0x0b, 0x3e, 0x43, 0x5f, 0x8e, 0x2d, 0xd3, 0x7d, 0x25, 0xfc, 0xeb, + 0xc2, 0x22, 0x43, 0x9d, 0xef, 0x30, 0x28, 0xcb, 0x03, 0x6c, 0xd9, 0xde, 0x28, 0x92, 0xf9, 0x2f, + 0x6e, 0x1a, 0xcb, 0x02, 0x09, 0xe7, 0x67, 0xfb, 0x0c, 0x96, 0x04, 0x10, 0x7d, 0x97, 0x45, 0x5c, + 0x8c, 0x61, 0x70, 0x7e, 0xdc, 0x3f, 0x4d, 0x69, 0x89, 0x70, 0x82, 0xca, 0xf9, 0xd3, 0x74, 0x39, + 0xb7, 0x9c, 0x0f, 0x82, 0x54, 0x35, 0x97, 0x0f, 0xad, 0xe9, 0x82, 0xd0, 0x32, 0x73, 0xe5, 0xb5, + 0x28, 0xee, 0x3e, 0xce, 0x8f, 0xfe, 0x03, 0x56, 0xd7, 0x3b, 0xa2, 0xba, 0x56, 0x53, 0x2b, 0x94, + 0x7f, 0x2b, 0x53, 0x5d, 0xb7, 0xc7, 0xa9, 0xa9, 0x8a, 0xeb, 0x7f, 0x94, 0xa1, 0xa6, 0x78, 0xb9, + 0xda, 0xfa, 0x76, 0xae, 0xb6, 0x9e, 0xf0, 0x00, 0x2a, 0xbf, 0xed, 0x00, 0xc2, 0x52, 0x8f, 0x7f, + 0xf4, 0x02, 0x7a, 0x20, 0x3d, 0x50, 0xe5, 0x04, 0x93, 0x1e, 0x24, 0x2e, 0xad, 0x9c, 0xc9, 0xa5, + 0xb7, 0x33, 0x57, 0x70, 0x57, 0xf3, 0xf2, 0x85, 0x07, 0xc5, 0x76, 0xd1, 0xdd, 0xdb, 0x07, 0x05, + 0x9d, 0xdf, 0x5b, 0xf4, 0xba, 0x2b, 0x70, 0x80, 0x1e, 0x10, 0x32, 0x19, 0xad, 0x63, 0xb5, 0xac, + 0xa8, 0x12, 0x0c, 0x90, 0xfc, 0xd2, 0x4c, 0x4d, 0x8a, 0xed, 0xec, 0x94, 0x81, 0x93, 0x5b, 0xa4, + 0x33, 0xec, 0xec, 0x3f, 0xe8, 0x75, 0xcc, 0x98, 0xeb, 0x96, 0x77, 0x0d, 0xab, 0x09, 0xb7, 0xf8, + 0x5b, 0x00, 0x1b, 0x3f, 0x58, 0x31, 0x91, 0x0a, 0xb6, 0xa8, 0xf9, 0x6b, 0x92, 0x82, 0x6c, 0x2c, + 0x76, 0x0e, 0x1c, 0xd7, 0x09, 0x8f, 0x04, 0x7f, 0x86, 0xf3, 0x21, 0x26, 0x6d, 0xf0, 0x37, 0x13, + 0xfa, 0x06, 0x11, 0x46, 0xdf, 0xb3, 0x29, 0x06, 0x0d, 0x7f, 0x33, 0x61, 0x84, 0x4d, 0x6c, 0x27, + 0x3b, 0xa0, 0x3a, 0xd1, 0x0e, 0xa8, 0x65, 0x76, 0xc0, 0x12, 0xcc, 0xa0, 0xbe, 0xa1, 0xe7, 0xf2, + 0x92, 0xa3, 0x66, 0xca, 0x96, 0x56, 0x19, 0xd5, 0xc7, 0x55, 0x46, 0xa7, 0xdc, 0xda, 0x64, 0x2a, + 0xa3, 0xb9, 0x71, 0x95, 0xd1, 0x59, 0x2e, 0x6d, 0xb4, 0x62, 0xae, 0x71, 0x5a, 0x31, 0xf7, 0x63, + 0x6e, 0x8f, 0x47, 0x78, 0x58, 0x67, 0x03, 0x5a, 0xee, 0x8f, 0x5b, 0x99, 0xbb, 0x9d, 0xf6, 0x38, + 0x2b, 0xa8, 0xab, 0x9d, 0x57, 0x50, 0xdf, 0x7e, 0x83, 0xee, 0x3b, 0xfb, 0xe1, 0x8a, 0xaa, 0xf6, + 0x87, 0xb6, 0x44, 0x64, 0xec, 0x33, 0x2e, 0xda, 0xca, 0x49, 0xd1, 0xa6, 0x6a, 0x3f, 0x16, 0xa6, + 0x73, 0xb2, 0xf6, 0x33, 0xee, 0xc3, 0x9c, 0x98, 0x4b, 0x6a, 0xbb, 0xc4, 0xb4, 0xb5, 0xd9, 0x29, + 0x5d, 0xe2, 0x62, 0xb2, 0x25, 0xe9, 0x34, 0x08, 0xf8, 0xda, 0x05, 0x1d, 0x5b, 0xc6, 0x1d, 0xa8, + 0xf3, 0x78, 0x93, 0xc7, 0xe8, 0x0d, 0x1d, 0xdc, 0x9f, 0x16, 0x94, 0x0c, 0xd0, 0xb0, 0x84, 0xc2, + 0xe9, 0x6a, 0xf7, 0x7f, 0x92, 0x39, 0x5d, 0x16, 0xd3, 0xfd, 0x33, 0x27, 0xcb, 0x6b, 0xa8, 0x70, + 0x72, 0x6e, 0xf7, 0x5f, 0x62, 0x40, 0xd3, 0xf7, 0x7a, 0x91, 0x75, 0xa8, 0x9e, 0x40, 0x19, 0xe1, + 0x29, 0xb6, 0xf9, 0x0b, 0x2e, 0x63, 0xda, 0x0e, 0x4e, 0x1c, 0x85, 0x12, 0x95, 0xd4, 0x19, 0x6d, + 0x4b, 0x90, 0x58, 0x31, 0x10, 0x3a, 0xbf, 0xa3, 0xdc, 0x52, 0xd3, 0x26, 0xff, 0xc6, 0x72, 0x8c, + 0xe8, 0xfa, 0x4a, 0x73, 0x61, 0x68, 0xf2, 0xe5, 0xc4, 0x89, 0xaf, 0x99, 0x56, 0xd8, 0x94, 0x5c, + 0x34, 0x33, 0x11, 0x16, 0x48, 0x25, 0xbb, 0xb3, 0x5b, 0xeb, 0x33, 0x38, 0x9f, 0xea, 0xaf, 0x9e, + 0x1a, 0x52, 0x03, 0x64, 0x67, 0x97, 0x9d, 0xff, 0x5d, 0x02, 0xd8, 0x18, 0x45, 0x47, 0x12, 0xf3, + 0x75, 0xa0, 0xca, 0xc0, 0xaa, 0x56, 0xee, 0xa8, 0x36, 0xe3, 0xf9, 0x56, 0x18, 0x22, 0xe0, 0x8a, + 0x0b, 0x1f, 0xd5, 0xe6, 0x78, 0x6d, 0xa4, 0x5e, 0x50, 0xf9, 0x37, 0x7b, 0x81, 0x15, 0xcf, 0xce, + 0x3d, 0xc4, 0xab, 0x88, 0xd4, 0x43, 0x79, 0xc2, 0x36, 0x04, 0x75, 0x43, 0x10, 0x99, 0x98, 0x63, + 0x53, 0x54, 0x2d, 0x3a, 0xe9, 0x45, 0xde, 0x6b, 0xea, 0x4a, 0x64, 0xd7, 0x88, 0xa9, 0x4f, 0x19, + 0x91, 0x89, 0x05, 0xf4, 0x10, 0xad, 0x1c, 0xc4, 0x62, 0x33, 0x42, 0x2c, 0xa6, 0x72, 0x31, 0xf6, + 0x62, 0xdf, 0xda, 0x1f, 0x0d, 0x06, 0x62, 0x91, 0x93, 0xda, 0x12, 0x81, 0x94, 0x58, 0x47, 0xf6, + 0xfa, 0x20, 0x31, 0x91, 0x5c, 0xdc, 0xf7, 0xaf, 0xe1, 0xcf, 0xc3, 0x82, 0xa6, 0xa8, 0x2c, 0x3f, + 0x31, 0x16, 0x44, 0x65, 0xfa, 0x6e, 0xfa, 0x1b, 0x17, 0xe0, 0x7c, 0xaa, 0xbf, 0x18, 0x76, 0xf5, + 0x32, 0x54, 0xe3, 0xd7, 0x79, 0x32, 0x0b, 0xe5, 0xa7, 0x9b, 0xfb, 0xad, 0x73, 0xec, 0xe3, 0xd9, + 0xd6, 0x7e, 0xab, 0xb4, 0xba, 0x0a, 0xf3, 0x99, 0x4b, 0x47, 0x52, 0x83, 0x8a, 0xb9, 0xbd, 0xb1, + 0xf5, 0x02, 0xc5, 0xe6, 0xa0, 0xba, 0xf7, 0xe4, 0xa9, 0x68, 0x95, 0x56, 0x37, 0xa1, 0x99, 0x3e, + 0xee, 0x48, 0x1d, 0x66, 0x37, 0x91, 0xfb, 0x74, 0x7b, 0x0b, 0x85, 0xb1, 0x61, 0x3e, 0xdb, 0xdb, + 0xdb, 0xd9, 0x7b, 0xd8, 0x2a, 0x11, 0x80, 0x99, 0xed, 0x6f, 0x76, 0x18, 0x63, 0x8a, 0x31, 0x9e, + 0xed, 0x3d, 0xda, 0x7b, 0xf2, 0x9b, 0xbd, 0x56, 0x79, 0xfd, 0x9f, 0x55, 0x68, 0x9a, 0x62, 0x09, + 0x5d, 0x8c, 0x05, 0x07, 0x81, 0xe4, 0x7d, 0x98, 0x8d, 0x7f, 0x7d, 0x48, 0x0e, 0xd6, 0xf4, 0x7f, + 0x1a, 0x9d, 0x76, 0x9e, 0x21, 0xcd, 0x76, 0x8e, 0xbc, 0x80, 0x56, 0xf6, 0xd1, 0x8d, 0x24, 0x17, + 0x48, 0x63, 0x5e, 0xf7, 0x3a, 0xd7, 0x4f, 0x91, 0x50, 0x43, 0x77, 0xa1, 0x99, 0x7e, 0x4e, 0x23, + 0x49, 0x69, 0x57, 0xf8, 0x4c, 0xd7, 0xb9, 0x36, 0x96, 0xaf, 0xeb, 0x9b, 0x7d, 0x48, 0xd3, 0xf4, + 0x1d, 0xf3, 0x48, 0xa7, 0xe9, 0x3b, 0xf6, 0x15, 0x8e, 0x0f, 0x9d, 0x7b, 0x72, 0x5a, 0x19, 0xff, + 0x68, 0x90, 0x1b, 0x7a, 0xdc, 0x4b, 0x84, 0x30, 0x45, 0xfa, 0xc6, 0x97, 0xe8, 0x0f, 0x3d, 0x05, + 0x37, 0xee, 0x9a, 0x29, 0x8a, 0xaf, 0x8a, 0x71, 0xd0, 0xe7, 0x58, 0xb2, 0xa5, 0xc1, 0x2c, 0xb9, + 0x96, 0xf1, 0x4b, 0x16, 0xe4, 0x75, 0x56, 0xc6, 0x0b, 0xa4, 0xfd, 0xa6, 0x43, 0xd5, 0x94, 0xdf, + 0x0a, 0xf0, 0x6f, 0xca, 0x6f, 0x85, 0x18, 0xf7, 0x1c, 0xd9, 0x87, 0x46, 0x0a, 0x90, 0x92, 0x2b, + 0x29, 0x5f, 0xe7, 0x86, 0xbc, 0x3a, 0x8e, 0xad, 0x2f, 0x3f, 0x03, 0x46, 0xb5, 0xe5, 0x17, 0x63, + 0xdc, 0xce, 0xca, 0x78, 0x81, 0xac, 0xaf, 0x92, 0xaa, 0x3c, 0xe3, 0xab, 0x1c, 0x7e, 0xcb, 0xf8, + 0x2a, 0x5f, 0xce, 0x4b, 0x5f, 0x65, 0xca, 0xeb, 0x6b, 0x63, 0x6b, 0x96, 0xbc, 0xaf, 0x8a, 0xcb, + 0x20, 0x1c, 0xf7, 0x2e, 0x4c, 0xb3, 0x52, 0x83, 0x24, 0x87, 0xba, 0x56, 0xe5, 0x74, 0x2e, 0x64, + 0xa8, 0x71, 0xb7, 0x1b, 0xa5, 0x5b, 0xa5, 0xf5, 0xbf, 0x4d, 0xc1, 0x9c, 0xc8, 0x83, 0x32, 0x95, + 0x3c, 0x04, 0x48, 0x4e, 0x63, 0xd2, 0x49, 0x2d, 0x2a, 0x55, 0x52, 0x74, 0x2e, 0x15, 0xf2, 0x94, + 0x52, 0x5f, 0xcb, 0xfa, 0x45, 0x2e, 0xf4, 0x52, 0x26, 0xed, 0xa6, 0x16, 0x79, 0xb9, 0x98, 0xa9, + 0xc6, 0xda, 0x82, 0x9a, 0xca, 0xf6, 0x44, 0x3b, 0x24, 0x32, 0x47, 0x55, 0xa7, 0x53, 0xc4, 0xd2, + 0x35, 0xd2, 0xd2, 0xbb, 0xa6, 0x51, 0xfe, 0xd0, 0xd0, 0x34, 0x2a, 0x38, 0x11, 0x8c, 0x73, 0xff, + 0x0f, 0x00, 0x00, 0xff, 0xff, 0x42, 0xad, 0xb8, 0x8d, 0x1c, 0x27, 0x00, 0x00, } diff --git a/vendor/github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime/api.proto b/vendor/github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime/api.proto index 9fe3b7ca..8e3e4856 100644 --- a/vendor/github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime/api.proto +++ b/vendor/github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime/api.proto @@ -112,26 +112,6 @@ message Mount { optional bool selinux_relabel = 5; } -// ResourceRequirements contains a set of resources -// Valid resources are: -// - cpu, in cores. (500m = .5 cores) -// - memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) -message ResourceRequirements { - // The maximum amount of compute resources allowed. - optional double limits = 1; - // The minimum amount of compute resources required. - // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value - optional double requests = 2; -} - -// PodSandboxResources contains the CPU/memory resource requirements. -message PodSandboxResources { - // CPU resource requirement. - optional ResourceRequirements cpu = 1; - // Memory resource requirement. - optional ResourceRequirements memory = 2; -} - // NamespaceOption provides options for Linux namespaces. message NamespaceOption { // If set, use the host's network namespace. @@ -154,11 +134,29 @@ message LinuxPodSandboxConfig { optional NamespaceOption namespace_options = 2; } +// PodSandboxMetadata holds all necessary information for building the sandbox name. +// The container runtime is encouraged to expose the metadata associated with the +// PodSandbox in its user interface for better user experience. E.g., runtime can +// construct a unique PodSandboxName based on the metadata. +message PodSandboxMetadata { + // The pod name of the sandbox. Same as the pod name in the PodSpec. + optional string name = 1; + // The pod uid of the sandbox. Same as the pod UID in the PodSpec. + optional string uid = 2; + // The pod namespace of the sandbox. Same as the pod namespace in the PodSpec. + optional string namespace = 3; + // The attempt number of creating the sandbox. + optional uint32 attempt = 4; +} + // PodSandboxConfig holds all the required and optional fields for creating a // sandbox. message PodSandboxConfig { - // The name of the sandbox. - optional string name = 1; + // The metadata of the sandbox. This information will uniquely identify + // the sandbox, and the runtime should leverage this to ensure correct + // operation. The runtime may also use this information to improve UX, such + // as by constructing a readable name. + optional PodSandboxMetadata metadata = 1; // The hostname of the sandbox. optional string hostname = 2; // Path to the directory on the host in which container log files are @@ -181,20 +179,13 @@ message PodSandboxConfig { optional DNSOption dns_options = 4; // The port mappings for the sandbox. repeated PortMapping port_mappings = 5; - // Resources specifies the resource limits for the sandbox (i.e., the - // aggregate cpu/memory resources limits of all containers). - // Note: On a Linux host, kubelet will create a pod-level cgroup and pass - // it as the cgroup parent for the PodSandbox. For some runtimes, this is - // sufficient. For others, e.g., hypervisor-based runtimes, explicit - // resource limits for the sandbox are needed at creation time. - optional PodSandboxResources resources = 6; // Labels are key value pairs that may be used to scope and select individual resources. - map labels = 7; + map labels = 6; // Annotations is an unstructured key value map that may be set by external // tools to store and retrieve arbitrary metadata. - map annotations = 8; + map annotations = 7; // Optional configurations specific to Linux hosts. - optional LinuxPodSandboxConfig linux = 9; + optional LinuxPodSandboxConfig linux = 8; } message CreatePodSandboxRequest { @@ -255,8 +246,8 @@ enum PodSandBoxState { message PodSandboxStatus { // ID of the sandbox. optional string id = 1; - // Name of the sandbox - optional string name = 2; + // Metadata of the sandbox. + optional PodSandboxMetadata metadata = 2; // State of the sandbox. optional PodSandBoxState state = 3; // Creation timestamp of the sandbox @@ -302,8 +293,8 @@ message ListPodSandboxRequest { message PodSandbox { // The id of the PodSandbox optional string id = 1; - // The name of the PodSandbox - optional string name = 2; + // Metadata of the sandbox + optional PodSandboxMetadata metadata = 2; // The state of the PodSandbox optional PodSandBoxState state = 3; // Creation timestamps of the sandbox @@ -385,9 +376,26 @@ message LinuxUser { repeated int64 additional_gids = 3; } -message ContainerConfig { - // Name of the container. +// ContainerMetadata holds all necessary information for building the container +// name. The container runtime is encouraged to expose the metadata in its user +// interface for better user experience. E.g., runtime can construct a unique +// container name based on the metadata. Note that (name, attempt) is unique +// within a sandbox for the entire lifetime of the sandbox. +message ContainerMetadata { + // The name of the container. Same as the container name in the PodSpec. optional string name = 1; + // The attempt number of creating the container. + optional uint32 attempt = 2; +} + +// ContainerConfig holds all the required and optional fields for creating a +// container. +message ContainerConfig { + // The metadata of the container. This information will uniquely identify + // the container, and the runtime should leverage this to ensure correct + // operation. The runtime may also use this information to improve UX, such + // as by constructing a readable name. + optional ContainerMetadata metadata = 1 ; // Image to use. optional ImageSpec image = 2; // Command to execute (i.e., entrypoint for docker) @@ -491,7 +499,7 @@ message ContainerFilter { optional string name = 1; // ID of the container. optional string id = 2; - // State of the contianer. + // State of the container. optional ContainerState state = 3; // The id of the pod sandbox optional string pod_sandbox_id = 4; @@ -511,8 +519,8 @@ message Container { // The ID of the container, used by the container runtime to identify // a container. optional string id = 1; - // The name of the container - optional string name = 2; + // The metadata of the container. + optional ContainerMetadata metadata = 2; // The spec of the image optional ImageSpec image = 3; // Reference to the image in use. For most runtimes, this should be an @@ -522,6 +530,9 @@ message Container { optional ContainerState state = 5; // Labels are key value pairs that may be used to scope and select individual resources. map labels = 6; + // Annotations is an unstructured key value map that may be set by external + // tools to store and retrieve arbitrary metadata. + map annotations = 7; } message ListContainersResponse { @@ -538,8 +549,8 @@ message ContainerStatusRequest { message ContainerStatus { // ID of the container. optional string id = 1; - // Name of the container. - optional string name = 2; + // Metadata of the container. + optional ContainerMetadata metadata = 2; // Status of the container. optional ContainerState state = 3; // Creation time of the container. diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index e59c800f..547e238a 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -7,6 +7,7 @@ package http2 import ( + "crypto/tls" "net/http" "sync" ) @@ -17,21 +18,50 @@ type ClientConnPool interface { MarkDead(*ClientConn) } +// clientConnPoolIdleCloser is the interface implemented by ClientConnPool +// implementations which can close their idle connections. +type clientConnPoolIdleCloser interface { + ClientConnPool + closeIdleConnections() +} + +var ( + _ clientConnPoolIdleCloser = (*clientConnPool)(nil) + _ clientConnPoolIdleCloser = noDialClientConnPool{} +) + +// TODO: use singleflight for dialing and addConnCalls? type clientConnPool struct { - t *Transport + t *Transport + mu sync.Mutex // TODO: maybe switch to RWMutex // TODO: add support for sharing conns based on cert names // (e.g. share conn for googleapis.com and appspot.com) - conns map[string][]*ClientConn // key is host:port - dialing map[string]*dialCall // currently in-flight dials - keys map[*ClientConn][]string + conns map[string][]*ClientConn // key is host:port + dialing map[string]*dialCall // currently in-flight dials + keys map[*ClientConn][]string + addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls } func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, true) + return p.getClientConn(req, addr, dialOnMiss) } +const ( + dialOnMiss = true + noDialOnMiss = false +) + func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + if req.Close && dialOnMiss { + // It gets its own connection. + cc, err := p.t.dialClientConn(addr) + if err != nil { + return nil, err + } + cc.singleUse = true + return cc, nil + } p.mu.Lock() for _, cc := range p.conns[addr] { if cc.CanTakeNewRequest() { @@ -85,6 +115,64 @@ func (c *dialCall) dial(addr string) { c.p.mu.Unlock() } +// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't +// already exist. It coalesces concurrent calls with the same key. +// This is used by the http1 Transport code when it creates a new connection. Because +// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know +// the protocol), it can get into a situation where it has multiple TLS connections. +// This code decides which ones live or die. +// The return value used is whether c was used. +// c is never closed. +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { + p.mu.Lock() + for _, cc := range p.conns[key] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return false, nil + } + } + call, dup := p.addConnCalls[key] + if !dup { + if p.addConnCalls == nil { + p.addConnCalls = make(map[string]*addConnCall) + } + call = &addConnCall{ + p: p, + done: make(chan struct{}), + } + p.addConnCalls[key] = call + go call.run(t, key, c) + } + p.mu.Unlock() + + <-call.done + if call.err != nil { + return false, call.err + } + return !dup, nil +} + +type addConnCall struct { + p *clientConnPool + done chan struct{} // closed when done + err error +} + +func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { + cc, err := t.NewClientConn(tc) + + p := c.p + p.mu.Lock() + if err != nil { + c.err = err + } else { + p.addConnLocked(key, cc) + } + delete(p.addConnCalls, key) + p.mu.Unlock() + close(c.done) +} + func (p *clientConnPool) addConn(key string, cc *ClientConn) { p.mu.Lock() p.addConnLocked(key, cc) @@ -156,3 +244,12 @@ func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { } return out } + +// noDialClientConnPool is an implementation of http2.ClientConnPool +// which never dials. We let the HTTP/1.1 client dial and use its TLS +// connection instead. +type noDialClientConnPool struct{ *clientConnPool } + +func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, noDialOnMiss) +} diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go index f8c879cb..4f720f53 100644 --- a/vendor/golang.org/x/net/http2/configure_transport.go +++ b/vendor/golang.org/x/net/http2/configure_transport.go @@ -12,11 +12,15 @@ import ( "net/http" ) -func configureTransport(t1 *http.Transport) error { +func configureTransport(t1 *http.Transport) (*Transport, error) { connPool := new(clientConnPool) - t2 := &Transport{ConnPool: noDialClientConnPool{connPool}} + t2 := &Transport{ + ConnPool: noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { - return err + return nil, err } if t1.TLSClientConfig == nil { t1.TLSClientConfig = new(tls.Config) @@ -28,12 +32,17 @@ func configureTransport(t1 *http.Transport) error { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - cc, err := t2.NewClientConn(c) - if err != nil { - c.Close() + addr := authorityAddr("https", authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() return erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() } - connPool.addConn(authorityAddr(authority), cc) return t2 } if m := t1.TLSNextProto; len(m) == 0 { @@ -43,7 +52,7 @@ func configureTransport(t1 *http.Transport) error { } else { m["h2"] = upgradeFn } - return nil + return t2, nil } // registerHTTPSProtocol calls Transport.RegisterProtocol but @@ -58,16 +67,6 @@ func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) return nil } -// noDialClientConnPool is an implementation of http2.ClientConnPool -// which never dials. We let the HTTP/1.1 client dial and use its TLS -// connection instead. -type noDialClientConnPool struct{ *clientConnPool } - -func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - const doDial = false - return p.getClientConn(req, addr, doDial) -} - // noDialH2RoundTripper is a RoundTripper which only tries to complete the request // if there's already has a cached connection to the host. type noDialH2RoundTripper struct{ t *Transport } diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go index 1f8de65e..71a4e290 100644 --- a/vendor/golang.org/x/net/http2/errors.go +++ b/vendor/golang.org/x/net/http2/errors.go @@ -4,7 +4,10 @@ package http2 -import "fmt" +import ( + "errors" + "fmt" +) // An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. type ErrCode uint32 @@ -75,3 +78,45 @@ func (e StreamError) Error() string { type goAwayFlowError struct{} func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } + +// connErrorReason wraps a ConnectionError with an informative error about why it occurs. + +// Errors of this type are only returned by the frame parser functions +// and converted into ConnectionError(ErrCodeProtocol). +type connError struct { + Code ErrCode + Reason string +} + +func (e connError) Error() string { + return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) +} + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 6b8a74f0..981d407a 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -10,7 +10,12 @@ import ( "errors" "fmt" "io" + "log" + "strings" "sync" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" ) const frameHeaderLen = 9 @@ -171,6 +176,12 @@ func (h FrameHeader) Header() FrameHeader { return h } func (h FrameHeader) String() string { var buf bytes.Buffer buf.WriteString("[FrameHeader ") + h.writeDebug(&buf) + buf.WriteByte(']') + return buf.String() +} + +func (h FrameHeader) writeDebug(buf *bytes.Buffer) { buf.WriteString(h.Type.String()) if h.Flags != 0 { buf.WriteString(" flags=") @@ -187,15 +198,14 @@ func (h FrameHeader) String() string { if name != "" { buf.WriteString(name) } else { - fmt.Fprintf(&buf, "0x%x", 1<>16), byte(length>>8), byte(length)) + if logFrameWrites { + f.logWrite() + } + n, err := f.w.Write(f.wbuf) if err == nil && n != len(f.wbuf) { err = io.ErrShortWrite @@ -317,6 +366,24 @@ func (f *Framer) endWrite() error { return err } +func (f *Framer) logWrite() { + if f.debugFramer == nil { + f.debugFramerBuf = new(bytes.Buffer) + f.debugFramer = NewFramer(nil, f.debugFramerBuf) + f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below + // Let us read anything, even if we accidentally wrote it + // in the wrong order: + f.debugFramer.AllowIllegalReads = true + } + f.debugFramerBuf.Write(f.wbuf) + fr, err := f.debugFramer.ReadFrame() + if err != nil { + log.Printf("http2: Framer %p: failed to decode just-written frame", f) + return + } + log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) +} + func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } @@ -332,8 +399,9 @@ const ( // NewFramer returns a Framer that writes frames to w and reads them from r. func NewFramer(w io.Writer, r io.Reader) *Framer { fr := &Framer{ - w: w, - r: r, + w: w, + r: r, + logReads: logFrameReads, } fr.getReadBuf = func(size uint32) []byte { if cap(fr.readBuf) >= int(size) { @@ -357,15 +425,39 @@ func (fr *Framer) SetMaxReadFrameSize(v uint32) { fr.maxReadSize = v } +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (fr *Framer) ErrorDetail() error { + return fr.errDetail +} + // ErrFrameTooLarge is returned from Framer.ReadFrame when the peer // sends a frame that is larger than declared with SetMaxReadFrameSize. var ErrFrameTooLarge = errors.New("http2: frame too large") +// terminalReadFrameError reports whether err is an unrecoverable +// error from ReadFrame and no other frames should be read. +func terminalReadFrameError(err error) bool { + if _, ok := err.(StreamError); ok { + return false + } + return err != nil +} + // ReadFrame reads a single frame. The returned Frame is only valid // until the next call to ReadFrame. -// If the frame is larger than previously set with SetMaxReadFrameSize, -// the returned error is ErrFrameTooLarge. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. func (fr *Framer) ReadFrame() (Frame, error) { + fr.errDetail = nil if fr.lastFrame != nil { fr.lastFrame.invalidate() } @@ -382,12 +474,71 @@ func (fr *Framer) ReadFrame() (Frame, error) { } f, err := typeFrameParser(fh.Type)(fh, payload) if err != nil { + if ce, ok := err.(connError); ok { + return nil, fr.connError(ce.Code, ce.Reason) + } return nil, err } - fr.lastFrame = f + if err := fr.checkFrameOrder(f); err != nil { + return nil, err + } + if fr.logReads { + log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + } + if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { + return fr.readMetaFrame(f.(*HeadersFrame)) + } return f, nil } +// connError returns ConnectionError(code) but first +// stashes away a public reason to the caller can optionally relay it +// to the peer before hanging up on them. This might help others debug +// their implementations. +func (fr *Framer) connError(code ErrCode, reason string) error { + fr.errDetail = errors.New(reason) + return ConnectionError(code) +} + +// checkFrameOrder reports an error if f is an invalid frame to return +// next from ReadFrame. Mostly it checks whether HEADERS and +// CONTINUATION frames are contiguous. +func (fr *Framer) checkFrameOrder(f Frame) error { + last := fr.lastFrame + fr.lastFrame = f + if fr.AllowIllegalReads { + return nil + } + + fh := f.Header() + if fr.lastHeaderStream != 0 { + if fh.Type != FrameContinuation { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", + fh.Type, fh.StreamID, + last.Header().Type, fr.lastHeaderStream)) + } + if fh.StreamID != fr.lastHeaderStream { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", + fh.StreamID, fr.lastHeaderStream)) + } + } else if fh.Type == FrameContinuation { + return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) + } + + switch fh.Type { + case FrameHeaders, FrameContinuation: + if fh.Flags.Has(FlagHeadersEndHeaders) { + fr.lastHeaderStream = 0 + } else { + fr.lastHeaderStream = fh.StreamID + } + } + + return nil +} + // A DataFrame conveys arbitrary, variable-length sequences of octets // associated with a stream. // See http://http2.github.io/http2-spec/#rfc.section.6.1 @@ -416,7 +567,7 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { // field is 0x0, the recipient MUST respond with a // connection error (Section 5.4.1) of type // PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) + return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} } f := &DataFrame{ FrameHeader: fh, @@ -434,13 +585,20 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { // length of the frame payload, the recipient MUST // treat this as a connection error. // Filed: https://github.com/http2/http2-spec/issues/610 - return nil, ConnectionError(ErrCodeProtocol) + return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} } f.data = payload[:len(payload)-int(padSize)] return f, nil } -var errStreamID = errors.New("invalid streamid") +var ( + errStreamID = errors.New("invalid stream ID") + errDepStreamID = errors.New("invalid dependent stream ID") +) + +func validStreamIDOrZero(streamID uint32) bool { + return streamID&(1<<31) == 0 +} func validStreamID(streamID uint32) bool { return streamID != 0 && streamID&(1<<31) == 0 @@ -741,7 +899,7 @@ func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { // is received whose stream identifier field is 0x0, the recipient MUST // respond with a connection error (Section 5.4.1) of type // PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) + return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} } var padLength uint8 if fh.Flags.Has(FlagHeadersPadded) { @@ -827,8 +985,8 @@ func (f *Framer) WriteHeaders(p HeadersFrameParam) error { } if !p.Priority.IsZero() { v := p.Priority.StreamDep - if !validStreamID(v) && !f.AllowIllegalWrites { - return errors.New("invalid dependent stream id") + if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { + return errDepStreamID } if p.Priority.Exclusive { v |= 1 << 31 @@ -871,10 +1029,10 @@ func (p PriorityParam) IsZero() bool { func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) + return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} } if len(payload) != 5 { - return nil, ConnectionError(ErrCodeFrameSize) + return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} } v := binary.BigEndian.Uint32(payload[:4]) streamID := v & 0x7fffffff // mask off high bit @@ -896,6 +1054,9 @@ func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } + if !validStreamIDOrZero(p.StreamDep) { + return errDepStreamID + } f.startWrite(FramePriority, 0, streamID) v := p.StreamDep if p.Exclusive { @@ -944,13 +1105,12 @@ type ContinuationFrame struct { } func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} + } return &ContinuationFrame{fh, p}, nil } -func (f *ContinuationFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagDataEndStream) -} - func (f *ContinuationFrame) HeaderBlockFragment() []byte { f.checkValid() return f.headerFragBuf @@ -1112,3 +1272,236 @@ type streamEnder interface { type headersEnder interface { HeadersEnded() bool } + +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) + } + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if fr.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + var remainSize = fr.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := fr.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(fr.maxHeaderStringLen()) + hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if !httplex.ValidHeaderFieldValue(hf.Value) { + invalid = headerFieldValueError(hf.Value) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validWireHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + }) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := fr.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + fr.errDetail = invalid + return nil, StreamError{mh.StreamID, ErrCodeProtocol} + } + if err := mh.checkPseudos(); err != nil { + fr.errDetail = err + return nil, StreamError{mh.StreamID, ErrCodeProtocol} + } + return mh, nil +} + +func summarizeFrame(f Frame) string { + var buf bytes.Buffer + f.Header().writeDebug(&buf) + switch f := f.(type) { + case *SettingsFrame: + n := 0 + f.ForeachSetting(func(s Setting) error { + n++ + if n == 1 { + buf.WriteString(", settings:") + } + fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) + return nil + }) + if n > 0 { + buf.Truncate(buf.Len() - 1) // remove trailing comma + } + case *DataFrame: + data := f.Data() + const max = 256 + if len(data) > max { + data = data[:max] + } + fmt.Fprintf(&buf, " data=%q", data) + if len(f.Data()) > max { + fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) + } + case *WindowUpdateFrame: + if f.StreamID == 0 { + buf.WriteString(" (conn)") + } + fmt.Fprintf(&buf, " incr=%v", f.Increment) + case *PingFrame: + fmt.Fprintf(&buf, " ping=%q", f.Data[:]) + case *GoAwayFrame: + fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", + f.LastStreamID, f.ErrCode, f.debugData) + case *RSTStreamFrame: + fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) + } + return buf.String() +} diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go new file mode 100644 index 00000000..2b72855f --- /dev/null +++ b/vendor/golang.org/x/net/http2/go16.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.6 + +package http2 + +import ( + "crypto/tls" + "net/http" + "time" +) + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return t1.ExpectContinueTimeout +} + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +func isBadCipher(cipher uint16) bool { + switch cipher { + case tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: + // Reject cipher suites from Appendix A. + // "This list includes those cipher suites that do not + // offer an ephemeral key exchange and those that are + // based on the TLS null, stream or block cipher type" + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go new file mode 100644 index 00000000..730319dd --- /dev/null +++ b/vendor/golang.org/x/net/http2/go17.go @@ -0,0 +1,94 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package http2 + +import ( + "context" + "net" + "net/http" + "net/http/httptrace" + "time" +) + +type contextContext interface { + context.Context +} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + ctx, cancel = context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) + if hs := opts.baseConfig(); hs != nil { + ctx = context.WithValue(ctx, http.ServerContextKey, hs) + } + return +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return context.WithCancel(ctx) +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req.WithContext(ctx) +} + +type clientTrace httptrace.ClientTrace + +func reqContext(r *http.Request) context.Context { return r.Context() } + +func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } + +func traceGotConn(req *http.Request, cc *ClientConn) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GotConn == nil { + return + } + ci := httptrace.GotConnInfo{Conn: cc.tconn} + cc.mu.Lock() + ci.Reused = cc.nextStreamID > 1 + ci.WasIdle = len(cc.streams) == 0 && ci.Reused + if ci.WasIdle && !cc.lastActive.IsZero() { + ci.IdleTime = time.Now().Sub(cc.lastActive) + } + cc.mu.Unlock() + + trace.GotConn(ci) +} + +func traceWroteHeaders(trace *clientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func traceGot100Continue(trace *clientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func traceWait100Continue(trace *clientTrace) { + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } +} + +func traceWroteRequest(trace *clientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func traceFirstResponseByte(trace *clientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } +} + +func requestTrace(req *http.Request) *clientTrace { + trace := httptrace.ContextClientTrace(req.Context()) + return (*clientTrace)(trace) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go index 80d621cf..f9bb0339 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -144,7 +144,7 @@ func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { // shouldIndex reports whether f should be indexed. func (e *Encoder) shouldIndex(f HeaderField) bool { - return !f.Sensitive && f.size() <= e.dynTab.maxSize + return !f.Sensitive && f.Size() <= e.dynTab.maxSize } // appendIndexed appends index i, as encoded in "Indexed Header Field" diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index 329a8d03..8aa197ad 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -41,7 +41,24 @@ type HeaderField struct { Sensitive bool } -func (hf *HeaderField) size() uint32 { +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7540 section 5.2. +func (hf HeaderField) Size() uint32 { // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 // "The size of the dynamic table is the sum of the size of // its entries. The size of an entry is the sum of its name's @@ -163,7 +180,7 @@ func (dt *dynamicTable) setMaxSize(v uint32) { func (dt *dynamicTable) add(f HeaderField) { dt.ents = append(dt.ents, f) - dt.size += f.size() + dt.size += f.Size() dt.evict() } @@ -171,7 +188,7 @@ func (dt *dynamicTable) add(f HeaderField) { func (dt *dynamicTable) evict() { base := dt.ents // keep base pointer of slice for dt.size > dt.maxSize { - dt.size -= dt.ents[0].size() + dt.size -= dt.ents[0].Size() dt.ents = dt.ents[1:] } diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go index eb4b1f05..8850e394 100644 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -48,12 +48,16 @@ var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") // maxLen bytes will return ErrStringLength. func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { n := rootHuffmanNode - cur, nbits := uint(0), uint8(0) + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) for _, b := range v { cur = cur<<8 | uint(b) - nbits += 8 - for nbits >= 8 { - idx := byte(cur >> (nbits - 8)) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) n = n.children[idx] if n == nil { return ErrInvalidHuffman @@ -63,22 +67,40 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { return ErrStringLength } buf.WriteByte(n.sym) - nbits -= n.codeLen + cbits -= n.codeLen n = rootHuffmanNode + sbits = cbits } else { - nbits -= 8 + cbits -= 8 } } } - for nbits > 0 { - n = n.children[byte(cur<<(8-nbits))] - if n.children != nil || n.codeLen > nbits { + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { break } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } buf.WriteByte(n.sym) - nbits -= n.codeLen + cbits -= n.codeLen n = rootHuffmanNode + sbits = cbits } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + if mask := uint(1<= 127 || ('A' <= r && r <= 'Z') { + if !httplex.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { return false } } @@ -257,7 +289,7 @@ func mustUint31(v int32) uint32 { } // bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC2616, section 4.4. +// permits a body. See RFC 2616, section 4.4. func bodyAllowedForStatus(status int) bool { switch { case status >= 100 && status <= 199: @@ -269,3 +301,51 @@ func bodyAllowedForStatus(status int) bool { } return true } + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owners, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go index dbf6033f..efd2e128 100644 --- a/vendor/golang.org/x/net/http2/not_go16.go +++ b/vendor/golang.org/x/net/http2/not_go16.go @@ -6,8 +6,41 @@ package http2 -import "net/http" +import ( + "crypto/tls" + "net/http" + "time" +) -func configureTransport(t1 *http.Transport) error { - return errTransportVersion +func configureTransport(t1 *http.Transport) (*Transport, error) { + return nil, errTransportVersion +} + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return 0 + +} + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +func isBadCipher(cipher uint16) bool { + switch cipher { + case tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: + // Reject cipher suites from Appendix A. + // "This list includes those cipher suites that do not + // offer an ephemeral key exchange and those that are + // based on the TLS null, stream or block cipher type" + return true + default: + return false + } } diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go new file mode 100644 index 00000000..28df0c16 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go17.go @@ -0,0 +1,51 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package http2 + +import ( + "net" + "net/http" +) + +type contextContext interface{} + +type fakeContext struct{} + +func (fakeContext) Done() <-chan struct{} { return nil } +func (fakeContext) Err() error { panic("should not be called") } + +func reqContext(r *http.Request) fakeContext { + return fakeContext{} +} + +func setResponseUncompressed(res *http.Response) { + // Nothing. +} + +type clientTrace struct{} + +func requestTrace(*http.Request) *clientTrace { return nil } +func traceGotConn(*http.Request, *ClientConn) {} +func traceFirstResponseByte(*clientTrace) {} +func traceWroteHeaders(*clientTrace) {} +func traceWroteRequest(*clientTrace, error) {} +func traceGot100Continue(trace *clientTrace) {} +func traceWait100Continue(trace *clientTrace) {} + +func nop() {} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + return nil, nop +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return ctx, nop +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 69446e7a..53b7a1da 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -29,6 +29,12 @@ type pipeBuffer interface { io.Reader } +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + return p.b.Len() +} + // Read waits until data is available and copies bytes // from the buffer into p. func (p *pipe) Read(d []byte) (n int, err error) { diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index f82082b0..f368738f 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -6,8 +6,8 @@ // instead, and make sure that on close we close all open // streams. then remove doneServing? -// TODO: finish GOAWAY support. Consider each incoming frame type and -// whether it should be ignored during a shutdown race. +// TODO: re-audit GOAWAY support. Consider each incoming frame type and +// whether it should be ignored during graceful shutdown. // TODO: disconnect idle clients. GFE seems to do 4 minutes. make // configurable? or maximum number of idle clients and remove the @@ -48,6 +48,8 @@ import ( "net/http" "net/textproto" "net/url" + "os" + "reflect" "runtime" "strconv" "strings" @@ -192,28 +194,80 @@ func ConfigureServer(s *http.Server, conf *Server) error { if testHookOnConn != nil { testHookOnConn() } - conf.handleConn(hs, c, h) + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) } s.TLSNextProto[NextProtoTLS] = protoHandler s.TLSNextProto["h2-14"] = protoHandler // temporary; see above. return nil } -func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + sc := &serverConn{ - srv: srv, - hs: hs, + srv: s, + hs: opts.baseConfig(), conn: c, + baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), bw: newBufferedWriter(c), - handler: h, + handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), wantWriteFrameCh: make(chan frameWriteMsg, 8), wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), - advMaxStreams: srv.maxConcurrentStreams(), + advMaxStreams: s.maxConcurrentStreams(), writeSched: writeScheduler{ maxFrameSize: initialMaxFrameSize, }, @@ -222,17 +276,18 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { serveG: newGoroutineLock(), pushEnabled: true, } + sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, nil) - sc.hpackDecoder.SetMaxStringLength(sc.maxHeaderStringLen()) fr := NewFramer(sc.bw, c) - fr.SetMaxReadFrameSize(srv.maxReadFrameSize()) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) sc.framer = fr - if tc, ok := c.(*tls.Conn); ok { + if tc, ok := c.(connectionStater); ok { sc.tlsState = new(tls.ConnectionState) *sc.tlsState = tc.ConnectionState() // 9.2 Use of TLS Features @@ -262,7 +317,7 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { // So for now, do nothing here again. } - if !srv.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -284,32 +339,8 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { sc.serve() } -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} - func (sc *serverConn) rejectConn(err ErrCode, debug string) { - sc.vlogf("REJECTING conn: %v, %s", err, debug) + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) // ignoring errors. hanging up anyway. sc.framer.WriteGoAway(0, err, []byte(debug)) sc.bw.Flush() @@ -323,8 +354,8 @@ type serverConn struct { conn net.Conn bw *bufferedWriter // writing to conn handler http.Handler + baseCtx contextContext framer *Framer - hpackDecoder *hpack.Decoder doneServing chan struct{} // closed when serverConn.serve ends readFrameCh chan readFrameResult // written by serverConn.readFrames wantWriteFrameCh chan frameWriteMsg // from handlers -> serve @@ -351,7 +382,6 @@ type serverConn struct { headerTableSize uint32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - req requestParam // non-zero while reading request headers writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh needsFrameFlush bool // last frame write wasn't a flush writeSched writeScheduler @@ -360,22 +390,13 @@ type serverConn struct { goAwayCode ErrCode shutdownTimerCh <-chan time.Time // nil until used shutdownTimer *time.Timer // nil until used + freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer hpackEncoder *hpack.Encoder } -func (sc *serverConn) maxHeaderStringLen() int { - v := sc.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) - } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 -} - func (sc *serverConn) maxHeaderListSize() uint32 { n := sc.hs.MaxHeaderBytes if n <= 0 { @@ -388,21 +409,6 @@ func (sc *serverConn) maxHeaderListSize() uint32 { return uint32(n + typicalHeaders*perFieldOverhead) } -// requestParam is the state of the next request, initialized over -// potentially several frames HEADERS + zero or more CONTINUATION -// frames. -type requestParam struct { - // stream is non-nil if we're reading (HEADER or CONTINUATION) - // frames for a request (but not DATA). - stream *stream - header http.Header - method, path string - scheme, authority string - sawRegularHeader bool // saw a non-pseudo header already - invalidHeader bool // an invalid header was seen - headerListSize int64 // actually uint32, but easier math this way -} - // stream represents a stream. This is the minimal metadata needed by // the serve goroutine. Most of the actual stream state is owned by // the http.Handler's goroutine in the responseWriter. Because the @@ -412,10 +418,12 @@ type requestParam struct { // responseWriter's state field. type stream struct { // immutable: - sc *serverConn - id uint32 - body *pipe // non-nil if expecting DATA frames - cw closeWaiter // closed wait stream transitions to closed state + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx contextContext + cancelCtx func() // owned by serverConn's serve loop: bodyBytes int64 // body bytes seen so far @@ -429,6 +437,8 @@ type stream struct { sentReset bool // only true once detached from streams map gotReset bool // only true once detacted from streams map gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + reqBuf []byte trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -482,12 +492,55 @@ func (sc *serverConn) logf(format string, args ...interface{}) { } } +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { if err == nil { return } - str := err.Error() - if err == io.EOF || strings.Contains(str, "use of closed network connection") { + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { // Boring, expected errors. sc.vlogf(format, args...) } else { @@ -495,86 +548,6 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { } } -func (sc *serverConn) onNewHeaderField(f hpack.HeaderField) { - sc.serveG.check() - sc.vlogf("got header field %+v", f) - switch { - case !validHeader(f.Name): - sc.req.invalidHeader = true - case strings.HasPrefix(f.Name, ":"): - if sc.req.sawRegularHeader { - sc.logf("pseudo-header after regular header") - sc.req.invalidHeader = true - return - } - var dst *string - switch f.Name { - case ":method": - dst = &sc.req.method - case ":path": - dst = &sc.req.path - case ":scheme": - dst = &sc.req.scheme - case ":authority": - dst = &sc.req.authority - default: - // 8.1.2.1 Pseudo-Header Fields - // "Endpoints MUST treat a request or response - // that contains undefined or invalid - // pseudo-header fields as malformed (Section - // 8.1.2.6)." - sc.logf("invalid pseudo-header %q", f.Name) - sc.req.invalidHeader = true - return - } - if *dst != "" { - sc.logf("duplicate pseudo-header %q sent", f.Name) - sc.req.invalidHeader = true - return - } - *dst = f.Value - default: - sc.req.sawRegularHeader = true - sc.req.header.Add(sc.canonicalHeader(f.Name), f.Value) - const headerFieldOverhead = 32 // per spec - sc.req.headerListSize += int64(len(f.Name)) + int64(len(f.Value)) + headerFieldOverhead - if sc.req.headerListSize > int64(sc.maxHeaderListSize()) { - sc.hpackDecoder.SetEmitEnabled(false) - } - } -} - -func (st *stream) onNewTrailerField(f hpack.HeaderField) { - sc := st.sc - sc.serveG.check() - sc.vlogf("got trailer field %+v", f) - switch { - case !validHeader(f.Name): - // TODO: change hpack signature so this can return - // errors? Or stash an error somewhere on st or sc - // for processHeaderBlockFragment etc to pick up and - // return after the hpack Write/Close. For now just - // ignore. - return - case strings.HasPrefix(f.Name, ":"): - // TODO: same TODO as above. - return - default: - key := sc.canonicalHeader(f.Name) - if st.trailer != nil { - vv := append(st.trailer[key], f.Value) - st.trailer[key] = vv - - // arbitrary; TODO: read spec about header list size limits wrt trailers - const tooBig = 1000 - if len(vv) >= tooBig { - sc.hpackDecoder.SetEmitEnabled(false) - } - - } - } -} - func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() cv, ok := commonCanonHeader[v] @@ -609,10 +582,11 @@ type readFrameResult struct { // It's run on its own goroutine. func (sc *serverConn) readFrames() { gate := make(gate) + gateDone := gate.Done for { f, err := sc.framer.ReadFrame() select { - case sc.readFrameCh <- readFrameResult{f, err, gate.Done}: + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: case <-sc.doneServing: return } @@ -621,6 +595,9 @@ func (sc *serverConn) readFrames() { case <-sc.doneServing: return } + if terminalReadFrameError(err) { + return + } } } @@ -676,7 +653,9 @@ func (sc *serverConn) serve() { defer sc.stopShutdownTimer() defer close(sc.doneServing) // unblocks handlers trying to send - sc.vlogf("HTTP/2 connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } sc.writeFrame(frameWriteMsg{ write: writeSettings{ @@ -693,7 +672,7 @@ func (sc *serverConn) serve() { sc.unackedSettings++ if err := sc.readPreface(); err != nil { - sc.condlogf(err, "error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) return } // Now that we've got the preface, get us out of the @@ -759,7 +738,9 @@ func (sc *serverConn) readPreface() error { return errors.New("timeout waiting for client preface") case err := <-errc: if err == nil { - sc.vlogf("client %v said hello", sc.conn.RemoteAddr()) + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } } return err } @@ -844,7 +825,23 @@ func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error { // If you're not on the serve goroutine, use writeFrameFromHandler instead. func (sc *serverConn) writeFrame(wm frameWriteMsg) { sc.serveG.check() - sc.writeSched.add(wm) + + var ignoreWrite bool + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wm.write.(type) { + case *writeResHeaders: + wm.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wm.stream.wroteHeaders { + ignoreWrite = true + } + } + + if !ignoreWrite { + sc.writeSched.add(wm) + } sc.scheduleFrameWrite() } @@ -1012,18 +1009,6 @@ func (sc *serverConn) resetStream(se StreamError) { } } -// curHeaderStreamID returns the stream ID of the header block we're -// currently in the middle of reading. If this returns non-zero, the -// next frame must be a CONTINUATION with this stream id. -func (sc *serverConn) curHeaderStreamID() uint32 { - sc.serveG.check() - st := sc.req.stream - if st == nil { - return 0 - } - return st.id -} - // processFrameFromReader processes the serve loop's read from readFrameCh from the // frame-reading goroutine. // processFrameFromReader returns whether the connection should be kept open. @@ -1035,7 +1020,7 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFrameSize) return true // goAway will close the loop } - clientGone := err == io.EOF || strings.Contains(err.Error(), "use of closed network connection") + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) if clientGone { // TODO: could we also get into this state if // the peer does a half close @@ -1049,7 +1034,9 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { } } else { f := res.f - sc.vlogf("got %v: %#v", f.Header(), f) + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } err = sc.processFrame(f) if err == nil { return true @@ -1064,14 +1051,14 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: - sc.logf("%v: %v", sc.conn.RemoteAddr(), ev) + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown default: if res.err != nil { - sc.logf("disconnecting; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) } else { - sc.logf("disconnection due to other error: %v", err) + sc.logf("http2: server closing client connection: %v", err) } return false } @@ -1088,21 +1075,11 @@ func (sc *serverConn) processFrame(f Frame) error { sc.sawFirstSettings = true } - if s := sc.curHeaderStreamID(); s != 0 { - if cf, ok := f.(*ContinuationFrame); !ok { - return ConnectionError(ErrCodeProtocol) - } else if cf.Header().StreamID != s { - return ConnectionError(ErrCodeProtocol) - } - } - switch f := f.(type) { case *SettingsFrame: return sc.processSettings(f) - case *HeadersFrame: + case *MetaHeadersFrame: return sc.processHeaders(f) - case *ContinuationFrame: - return sc.processContinuation(f) case *WindowUpdateFrame: return sc.processWindowUpdate(f) case *PingFrame: @@ -1118,7 +1095,7 @@ func (sc *serverConn) processFrame(f Frame) error { // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. return ConnectionError(ErrCodeProtocol) default: - sc.vlogf("Ignoring frame: %v", f.Header()) + sc.vlogf("http2: server ignoring frame: %v", f.Header()) return nil } } @@ -1181,6 +1158,7 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { } if st != nil { st.gotReset = true + st.cancelCtx() sc.closeStream(st, StreamError{f.StreamID, f.ErrCode}) } return nil @@ -1202,6 +1180,18 @@ func (sc *serverConn) closeStream(st *stream, err error) { } st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.forgetStream(st.id) + if st.reqBuf != nil { + // Stash this request body buffer (64k) away for reuse + // by a future POST/PUT/etc. + // + // TODO(bradfitz): share on the server? sync.Pool? + // Server requires locks and might hurt contention. + // sync.Pool might work, or might be worse, depending + // on goroutine CPU migrations. (get and put on + // separate CPUs). Maybe a mix of strategies. But + // this is an easy win for now. + sc.freeRequestBodyBuf = st.reqBuf + } } func (sc *serverConn) processSettings(f *SettingsFrame) error { @@ -1229,7 +1219,9 @@ func (sc *serverConn) processSetting(s Setting) error { if err := s.Valid(); err != nil { return err } - sc.vlogf("processing setting %v", s) + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } switch s.ID { case SettingHeaderTableSize: sc.headerTableSize = s.Val @@ -1248,6 +1240,9 @@ func (sc *serverConn) processSetting(s Setting) error { // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } } return nil } @@ -1353,7 +1348,7 @@ func (st *stream) copyTrailersToHandlerRequest() { } } -func (sc *serverConn) processHeaders(f *HeadersFrame) error { +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { sc.serveG.check() id := f.Header().StreamID if sc.inGoAway { @@ -1382,17 +1377,18 @@ func (sc *serverConn) processHeaders(f *HeadersFrame) error { // endpoint has opened or reserved. [...] An endpoint that // receives an unexpected stream identifier MUST respond with // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - if id <= sc.maxStreamID || sc.req.stream != nil { + if id <= sc.maxStreamID { return ConnectionError(ErrCodeProtocol) } + sc.maxStreamID = id - if id > sc.maxStreamID { - sc.maxStreamID = id - } + ctx, cancelCtx := contextWithCancel(sc.baseCtx) st = &stream{ - sc: sc, - id: id, - state: stateOpen, + sc: sc, + id: id, + state: stateOpen, + ctx: ctx, + cancelCtx: cancelCtx, } if f.StreamEnded() { st.state = stateHalfClosedRemote @@ -1412,49 +1408,6 @@ func (sc *serverConn) processHeaders(f *HeadersFrame) error { if sc.curOpenStreams == 1 { sc.setConnState(http.StateActive) } - sc.req = requestParam{ - stream: st, - header: make(http.Header), - } - sc.hpackDecoder.SetEmitFunc(sc.onNewHeaderField) - sc.hpackDecoder.SetEmitEnabled(true) - return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) -} - -func (st *stream) processTrailerHeaders(f *HeadersFrame) error { - sc := st.sc - sc.serveG.check() - if st.gotTrailerHeader { - return ConnectionError(ErrCodeProtocol) - } - st.gotTrailerHeader = true - return st.processTrailerHeaderBlockFragment(f.HeaderBlockFragment(), f.HeadersEnded()) -} - -func (sc *serverConn) processContinuation(f *ContinuationFrame) error { - sc.serveG.check() - st := sc.streams[f.Header().StreamID] - if st == nil || sc.curHeaderStreamID() != st.id { - return ConnectionError(ErrCodeProtocol) - } - if st.gotTrailerHeader { - return st.processTrailerHeaderBlockFragment(f.HeaderBlockFragment(), f.HeadersEnded()) - } - return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) -} - -func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error { - sc.serveG.check() - if _, err := sc.hpackDecoder.Write(frag); err != nil { - return ConnectionError(ErrCodeCompression) - } - if !end { - return nil - } - if err := sc.hpackDecoder.Close(); err != nil { - return ConnectionError(ErrCodeCompression) - } - defer sc.resetPendingRequest() if sc.curOpenStreams > sc.advMaxStreams { // "Endpoints MUST NOT exceed the limit set by their // peer. An endpoint that receives a HEADERS frame @@ -1474,7 +1427,7 @@ func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bo return StreamError{st.id, ErrCodeRefusedStream} } - rw, req, err := sc.newWriterAndRequest() + rw, req, err := sc.newWriterAndRequest(st, f) if err != nil { return err } @@ -1486,30 +1439,44 @@ func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bo st.declBodyBytes = req.ContentLength handler := sc.handler.ServeHTTP - if !sc.hpackDecoder.EmitEnabled() { + if f.Truncated { // Their header list was too long. Send a 431 error. handler = handleHeaderListTooLong + } else if err := checkValidHTTP2Request(req); err != nil { + handler = new400Handler(err) } go sc.runHandler(rw, req, handler) return nil } -func (st *stream) processTrailerHeaderBlockFragment(frag []byte, end bool) error { +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { sc := st.sc sc.serveG.check() - sc.hpackDecoder.SetEmitFunc(st.onNewTrailerField) - if _, err := sc.hpackDecoder.Write(frag); err != nil { - return ConnectionError(ErrCodeCompression) + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) } - if !end { - return nil + st.gotTrailerHeader = true + if !f.StreamEnded() { + return StreamError{st.id, ErrCodeProtocol} + } + + if len(f.PseudoFields()) > 0 { + return StreamError{st.id, ErrCodeProtocol} + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return StreamError{st.id, ErrCodeProtocol} + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } } - err := sc.hpackDecoder.Close() st.endStream() - if err != nil { - return ConnectionError(ErrCodeCompression) - } return nil } @@ -1554,19 +1521,21 @@ func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority } } -// resetPendingRequest zeros out all state related to a HEADERS frame -// and its zero or more CONTINUATION frames sent to start a new -// request. -func (sc *serverConn) resetPendingRequest() { +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - sc.req = requestParam{} -} -func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, error) { - sc.serveG.check() - rp := &sc.req - if rp.invalidHeader || rp.method == "" || rp.path == "" || - (rp.scheme != "https" && rp.scheme != "http") { + method := f.PseudoValue("method") + path := f.PseudoValue("path") + scheme := f.PseudoValue("scheme") + authority := f.PseudoValue("authority") + + isConnect := method == "CONNECT" + if isConnect { + if path != "" || scheme != "" || authority == "" { + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + } else if method == "" || path == "" || + (scheme != "https" && scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -1577,33 +1546,40 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err // "All HTTP/2 requests MUST include exactly one valid // value for the :method, :scheme, and :path // pseudo-header fields" - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} } - bodyOpen := rp.stream.state == stateOpen - if rp.method == "HEAD" && bodyOpen { + + bodyOpen := !f.StreamEnded() + if method == "HEAD" && bodyOpen { // HEAD requests can't have bodies - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} } var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + + if scheme == "https" { tlsState = sc.tlsState } - authority := rp.authority - if authority == "" { - authority = rp.header.Get("Host") + + header := make(http.Header) + for _, hf := range f.RegularFields() { + header.Add(sc.canonicalHeader(hf.Name), hf.Value) } - needsContinue := rp.header.Get("Expect") == "100-continue" + + if authority == "" { + authority = header.Get("Host") + } + needsContinue := header.Get("Expect") == "100-continue" if needsContinue { - rp.header.Del("Expect") + header.Del("Expect") } // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) + if cookies := header["Cookie"]; len(cookies) > 1 { + header.Set("Cookie", strings.Join(cookies, "; ")) } // Setup Trailers var trailer http.Header - for _, v := range rp.header["Trailer"] { + for _, v := range header["Trailer"] { for _, key := range strings.Split(v, ",") { key = http.CanonicalHeaderKey(strings.TrimSpace(key)) switch key { @@ -1618,25 +1594,32 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err } } } - delete(rp.header, "Trailer") + delete(header, "Trailer") body := &requestBody{ conn: sc, - stream: rp.stream, + stream: st, needsContinue: needsContinue, } - // TODO: handle asterisk '*' requests + test - url, err := url.ParseRequestURI(rp.path) - if err != nil { - // TODO: find the right error code? - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + var url_ *url.URL + var requestURI string + if isConnect { + url_ = &url.URL{Host: authority} + requestURI = authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(path) + if err != nil { + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + requestURI = path } req := &http.Request{ - Method: rp.method, - URL: url, + Method: method, + URL: url_, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: rp.path, + Header: header, + RequestURI: requestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, @@ -1645,12 +1628,18 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err Body: body, Trailer: trailer, } + req = requestWithContext(req, st.ctx) if bodyOpen { + // Disabled, per golang.org/issue/14960: + // st.reqBuf = sc.getRequestBodyBuf() + // TODO: remove this 64k of garbage per request (again, but without a data race): + buf := make([]byte, initialWindowSize) + body.pipe = &pipe{ - b: &fixedBuffer{buf: make([]byte, initialWindowSize)}, // TODO: garbage + b: &fixedBuffer{buf: buf}, } - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := header["Content-Length"]; ok { req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) } else { req.ContentLength = -1 @@ -1663,7 +1652,7 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err rws.conn = sc rws.bw = bwSave rws.bw.Reset(chunkWriter{rws}) - rws.stream = rp.stream + rws.stream = st rws.req = req rws.body = body @@ -1671,10 +1660,20 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err return rw, req, nil } +func (sc *serverConn) getRequestBodyBuf() []byte { + sc.serveG.check() + if buf := sc.freeRequestBodyBuf; buf != nil { + sc.freeRequestBodyBuf = nil + return buf + } + return make([]byte, initialWindowSize) +} + // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { didPanic := true defer func() { + rw.rws.stream.cancelCtx() if didPanic { e := recover() // Same as net/http: @@ -1827,7 +1826,7 @@ type requestBody struct { func (b *requestBody) Close() error { if b.pipe != nil { - b.pipe.CloseWithError(errClosedBody) + b.pipe.BreakWithError(errClosedBody) } b.closed = true return nil @@ -1902,12 +1901,14 @@ func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != // written in the trailers at the end of the response. func (rws *responseWriterState) declareTrailer(k string) { k = http.CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Content-Length", "Trailer": + if !ValidTrailerHeader(k) { // Forbidden by RFC 2616 14.40. + rws.conn.logf("ignoring invalid trailer %q", k) return } - rws.trailers = append(rws.trailers, k) + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } } // writeChunk writes chunks from the bufio.Writer. But because @@ -1975,6 +1976,10 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { return 0, nil } + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + endStream := rws.handlerDone && !rws.hasTrailers() if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. @@ -1995,6 +2000,58 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { return len(p), nil } +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 2616 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + func (w *responseWriter) Flush() { rws := w.rws if rws == nil { @@ -2138,3 +2195,69 @@ func foreachHeaderElement(v string, fn func(string)) { } } } + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2Request checks whether req is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2Request(req *http.Request) error { + for _, h := range connHeaders { + if _, ok := req.Header[h]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", h) + } + } + te := req.Header["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 +func ValidTrailerHeader(name string) bool { + name = http.CanonicalHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 734fd56c..fb8dd997 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -22,8 +22,10 @@ import ( "strconv" "strings" "sync" + "time" "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" ) const ( @@ -39,6 +41,8 @@ const ( // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send // a stream-level WINDOW_UPDATE for at a time. transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" ) // Transport is an HTTP/2 Transport. @@ -73,17 +77,40 @@ type Transport struct { // uncompressed. DisableCompression bool + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allow. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool } -func (t *Transport) disableCompression() bool { - if t.DisableCompression { - return true +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 } - // TODO: also disable if this transport is somehow linked to an http1 Transport - // and it's configured there? - return false + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") @@ -92,7 +119,8 @@ var errTransportVersion = errors.New("http2: ConfigureTransport is only supporte // It requires Go 1.6 or later and returns an error if the net/http package is too old // or if t1 has already been HTTP/2-enabled. func ConfigureTransport(t1 *http.Transport) error { - return configureTransport(t1) // in configure_transport.go (go1.6) or go15.go + _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + return err } func (t *Transport) connPool() ClientConnPool { @@ -111,9 +139,10 @@ func (t *Transport) initConnPool() { // ClientConn is the state of a single HTTP/2 client connection to an // HTTP/2 server. type ClientConn struct { - t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tlsState *tls.ConnectionState // nil only for specialized impls + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + singleUse bool // whether being used for a single http.Request // readLoop goroutine fields: readerDone chan struct{} // closed on error @@ -125,11 +154,14 @@ type ClientConn struct { inflow flow // peer's conn-level flow control closed bool goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string streams map[uint32]*clientStream // client-initiated nextStreamID uint32 bw *bufio.Writer br *bufio.Reader fr *Framer + lastActive time.Time + // Settings from peer: maxFrameSize uint32 maxConcurrentStreams uint32 @@ -138,7 +170,7 @@ type ClientConn struct { henc *hpack.Encoder freeBuf [][]byte - wmu sync.Mutex // held while writing; acquire AFTER wmu if holding both + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both werr error // first write error that has occurred } @@ -147,55 +179,74 @@ type ClientConn struct { type clientStream struct { cc *ClientConn req *http.Request + trace *clientTrace // or nil ID uint32 resc chan resAndError bufPipe pipe // buffered pipe with the flow-controlled response payload requestedGzip bool + on100 func() // optional code to run if get a 100 continue response flow flow // guarded by cc.mu inflow flow // guarded by cc.mu bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read readErr error // sticky read error; owned by transportResponseBody.Read - stopReqBody bool // stop writing req body; guarded by cc.mu + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu peerReset chan struct{} // closed on peer reset resetErr error // populated before peerReset is closed - // owned by clientConnReadLoop: - headersDone bool // got HEADERS w/ END_HEADERS - trailersDone bool // got second HEADERS frame w/ END_HEADERS + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu - trailer http.Header // accumulated trailers - resTrailer http.Header // client's Response.Trailer + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer } -// awaitRequestCancel runs in its own goroutine and waits for the user's -func (cs *clientStream) awaitRequestCancel(cancel <-chan struct{}) { - if cancel == nil { +// awaitRequestCancel runs in its own goroutine and waits for the user +// to cancel a RoundTrip request, its context to expire, or for the +// request to be done (any way it might be removed from the cc.streams +// map: peer reset, successful completion, TCP connection breakage, +// etc) +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + ctx := reqContext(req) + if req.Cancel == nil && ctx.Done() == nil { return } select { - case <-cancel: + case <-req.Cancel: cs.bufPipe.CloseWithError(errRequestCanceled) - case <-cs.bufPipe.Done(): + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + case <-ctx.Done(): + cs.bufPipe.CloseWithError(ctx.Err()) + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + case <-cs.done: } } -// checkReset reports any error sent in a RST_STREAM frame by the -// server. -func (cs *clientStream) checkReset() error { +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { select { case <-cs.peerReset: return cs.resetErr + case <-cs.done: + return errStreamClosed default: return nil } } -func (cs *clientStream) abortRequestBodyWrite() { +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } cc := cs.cc cc.mu.Lock() - cs.stopReqBody = true + cs.stopReqBody = err cc.cond.Broadcast() cc.mu.Unlock() } @@ -231,26 +282,31 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { // authorityAddr returns a given authority (a host/IP, or host:port / ip:port) // and returns a host:port. The port 443 is added if needed. -func authorityAddr(authority string) (addr string) { +func authorityAddr(scheme string, authority string) (addr string) { if _, _, err := net.SplitHostPort(authority); err == nil { return authority } - return net.JoinHostPort(authority, "443") + port := "443" + if scheme == "http" { + port = "80" + } + return net.JoinHostPort(authority, port) } // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if req.URL.Scheme != "https" { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { return nil, errors.New("http2: unsupported scheme") } - addr := authorityAddr(req.URL.Host) + addr := authorityAddr(req.URL.Scheme, req.URL.Host) for { cc, err := t.connPool().GetClientConn(req, addr) if err != nil { - t.vlogf("failed to get client conn: %v", err) + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } + traceGotConn(req, cc) res, err := cc.RoundTrip(req) if shouldRetryRequest(req, err) { continue @@ -267,7 +323,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res // connected from previous requests but are now sitting idle. // It does not interrupt any connections currently in use. func (t *Transport) CloseIdleConnections() { - if cp, ok := t.connPool().(*clientConnPool); ok { + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { cp.closeIdleConnections() } } @@ -300,8 +356,12 @@ func (t *Transport) newTLSConfig(host string) *tls.Config { if t.TLSClientConfig != nil { *cfg = *t.TLSClientConfig } - cfg.NextProtos = []string{NextProtoTLS} // TODO: don't override if already in list - cfg.ServerName = host + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } return cfg } @@ -335,9 +395,22 @@ func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.C return cn, nil } +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 + } + return transportExpectContinueTimeout(t.t1) +} + func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { if VerboseLogs { - t.vlogf("creating client conn to %v", c.RemoteAddr()) + t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr()) } if _, err := c.Write(clientPreface); err != nil { t.vlogf("client preface write error: %v", err) @@ -362,20 +435,26 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) - type connectionStater interface { - ConnectionState() tls.ConnectionState - } if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state } - cc.fr.WriteSettings( - Setting{ID: SettingEnablePush, Val: 0}, - Setting{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - ) + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + cc.fr.WriteSettings(initialSettings...) cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) cc.inflow.add(transportDefaultConnFlow + initialWindowSize) cc.bw.Flush() @@ -404,7 +483,7 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { case SettingInitialWindowSize: cc.initialWindowSize = s.Val default: - // TODO(bradfitz): handle more + // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE? t.vlogf("Unhandled Setting: %v", s) } return nil @@ -417,7 +496,17 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { func (cc *ClientConn) setGoAway(f *GoAwayFrame) { cc.mu.Lock() defer cc.mu.Unlock() + + old := cc.goAway cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } } func (cc *ClientConn) CanTakeNewRequest() bool { @@ -427,7 +516,10 @@ func (cc *ClientConn) CanTakeNewRequest() bool { } func (cc *ClientConn) canTakeNewRequestLocked() bool { - return cc.goAway == nil && + if cc.singleUse && cc.nextStreamID > 1 { + return false + } + return cc.goAway == nil && !cc.closed && int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && cc.nextStreamID < 2147483647 } @@ -508,24 +600,83 @@ func commaSeparatedTrailers(req *http.Request) (string, error) { return "", nil } +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return errors.New("http2: invalid Upgrade request header") + } + if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 { + return errors.New("http2: invalid Transfer-Encoding request header") + } + if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 { + return errors.New("http2: invalid Connection request header") + } + return nil +} + +func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) { + body = req.Body + if body == nil { + return nil, 0 + } + if req.ContentLength != 0 { + return req.Body, req.ContentLength + } + + // We have a body but a zero content length. Test to see if + // it's actually zero or just unset. + var buf [1]byte + n, rerr := io.ReadFull(body, buf[:]) + if rerr != nil && rerr != io.EOF { + return errorReader{rerr}, -1 + } + if n == 1 { + // Oh, guess there is data in this Body Reader after all. + // The ContentLength field just wasn't set. + // Stich the Body back together again, re-attaching our + // consumed byte. + return io.MultiReader(bytes.NewReader(buf[:]), body), -1 + } + // Body is actually zero bytes. + return nil, 0 +} + func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := checkConnHeaders(req); err != nil { + return nil, err + } + trailers, err := commaSeparatedTrailers(req) if err != nil { return nil, err } hasTrailers := trailers != "" + body, contentLen := bodyAndLength(req) + hasBody := body != nil + cc.mu.Lock() + cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { cc.mu.Unlock() return nil, errClientConnUnusable } - cs := cc.newStream() - cs.req = req - hasBody := req.Body != nil - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && req.Header.Get("Range") == "" && @@ -542,32 +693,63 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // We don't request gzip if the request is for a range, since // auto-decoding a portion of a gzipped document will just fail // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true + requestedGzip = true } - // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} - hdrs := cc.encodeHeaders(req, cs.requestedGzip, trailers) + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { + cc.mu.Unlock() + return nil, err + } + + cs := cc.newStream() + cs.req = req + cs.trace = requestTrace(req) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + cc.wmu.Lock() endStream := !hasBody && !hasTrailers werr := cc.writeHeaders(cs.ID, endStream, hdrs) cc.wmu.Unlock() + traceWroteHeaders(cs.trace) cc.mu.Unlock() if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + traceWroteRequest(cs.trace, werr) return nil, werr } - var bodyCopyErrc chan error // result of body copy + var respHeaderTimer <-chan time.Time if hasBody { - bodyCopyErrc = make(chan error, 1) - go func() { - bodyCopyErrc <- cs.writeRequestBody(req.Body) - }() + bodyWriter.scheduleBodyWrite() + } else { + traceWroteRequest(cs.trace, nil) + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } } + readLoopResCh := cs.resc + bodyWritten := false + ctx := reqContext(req) + for { select { - case re := <-cs.resc: + case re := <-readLoopResCh: res := re.res if re.err != nil || res.StatusCode > 299 { // On error or status code 3xx, 4xx, 5xx, etc abort any @@ -579,23 +761,58 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // doesn't, they'll RST_STREAM us soon enough. This is a // heuristic to avoid adding knobs to Transport. Hopefully // we can keep it. - cs.abortRequestBodyWrite() + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) } if re.err != nil { + cc.forgetStreamID(cs.ID) return nil, re.err } res.Request = req res.TLS = cc.tlsState return res, nil - case <-requestCancel(req): - cs.abortRequestBodyWrite() + case <-respHeaderTimer: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, errTimeout + case <-ctx.Done(): + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, ctx.Err() + case <-req.Cancel: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } return nil, errRequestCanceled case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. return nil, cs.resetErr - case err := <-bodyCopyErrc: + case err := <-bodyWriter.resc: if err != nil { return nil, err } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } } } } @@ -631,22 +848,28 @@ func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) return cc.werr } -// errAbortReqBodyWrite is an internal error value. -// It doesn't escape to callers. -var errAbortReqBodyWrite = errors.New("http2: aborting request body write") +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") -func (cs *clientStream) writeRequestBody(body io.ReadCloser) (err error) { + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { cc := cs.cc sentEnd := false // whether we sent the final DATA frame w/ END_STREAM buf := cc.frameScratchBuffer() defer cc.putFrameScratchBuffer(buf) defer func() { + traceWroteRequest(cs.trace, err) // TODO: write h12Compare test showing whether // Request.Body is closed by the Transport, // and in multiple cases: server replies <=299 and >299 // while still writing request body - cerr := body.Close() + cerr := bodyCloser.Close() if err == nil { err = cerr } @@ -669,7 +892,13 @@ func (cs *clientStream) writeRequestBody(body io.ReadCloser) (err error) { for len(remain) > 0 && err == nil { var allowed int32 allowed, err = cs.awaitFlowControl(len(remain)) - if err != nil { + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: return err } cc.wmu.Lock() @@ -729,10 +958,10 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) if cc.closed { return 0, errClientConnClosed } - if cs.stopReqBody { - return 0, errAbortReqBodyWrite + if cs.stopReqBody != nil { + return 0, cs.stopReqBody } - if err := cs.checkReset(); err != nil { + if err := cs.checkResetOrDone(); err != nil { return 0, err } if a := cs.flow.available(); a > 0 { @@ -759,41 +988,107 @@ type badStringError struct { func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } // requires cc.mu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string) []byte { +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() - // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go host := req.Host if host == "" { host = req.URL.Host } + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httplex.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + // 8.1.2.3 Request Pseudo-Header Fields // The :path pseudo-header field includes the path and query parts of the // target URI (the path-absolute production and optionally a '?' character // followed by the query production (see Sections 3.3 and 3.4 of // [RFC3986]). - cc.writeHeader(":authority", host) // probably not right for all sites + cc.writeHeader(":authority", host) cc.writeHeader(":method", req.Method) - cc.writeHeader(":path", req.URL.RequestURI()) - cc.writeHeader(":scheme", "https") + if req.Method != "CONNECT" { + cc.writeHeader(":path", req.URL.RequestURI()) + cc.writeHeader(":scheme", "https") + } if trailers != "" { cc.writeHeader("trailer", trailers) } + var didUA bool for k, vv := range req.Header { lowKey := strings.ToLower(k) - if lowKey == "host" { + switch lowKey { + case "host", "content-length": + // Host is :authority, already sent. + // Content-Length is automatic, set below. continue + case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + case "user-agent": + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } } for _, v := range vv { cc.writeHeader(lowKey, v) } } + if shouldSendReqContentLength(req.Method, contentLength) { + cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) + } if addGzipHeader { cc.writeHeader("accept-encoding", "gzip") } - return cc.hbuf.Bytes() + if !didUA { + cc.writeHeader("user-agent", defaultUserAgent) + } + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } } // requires cc.mu be held. @@ -811,6 +1106,9 @@ func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { } func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) } @@ -826,6 +1124,7 @@ func (cc *ClientConn) newStream() *clientStream { ID: cc.nextStreamID, resc: make(chan resAndError, 1), peerReset: make(chan struct{}), + done: make(chan struct{}), } cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) @@ -836,31 +1135,28 @@ func (cc *ClientConn) newStream() *clientStream { return cs } +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { cc.mu.Lock() defer cc.mu.Unlock() cs := cc.streams[id] - if andRemove { + if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() delete(cc.streams, id) + close(cs.done) + cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl } return cs } // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. type clientConnReadLoop struct { - cc *ClientConn - activeRes map[uint32]*clientStream // keyed by streamID - - // continueStreamID is the stream ID we're waiting for - // continuation frames for. - continueStreamID uint32 - - hdec *hpack.Decoder - - // Fields reset on each HEADERS: - nextRes *http.Response - sawRegHeader bool // saw non-pseudo header - reqMalformed error // non-nil once known to be malformed + cc *ClientConn + activeRes map[uint32]*clientStream // keyed by streamID + closeWhenIdle bool } // readLoop runs in its own goroutine and reads and dispatches frames. @@ -869,8 +1165,6 @@ func (cc *ClientConn) readLoop() { cc: cc, activeRes: make(map[uint32]*clientStream), } - // TODO: figure out henc size - rl.hdec = hpack.NewDecoder(initialHeaderTableSize, rl.onNewHeaderField) defer rl.cleanup() cc.readerErr = rl.run() @@ -881,6 +1175,19 @@ func (cc *ClientConn) readLoop() { } } +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + func (rl *clientConnReadLoop) cleanup() { cc := rl.cc defer cc.tconn.Close() @@ -891,10 +1198,18 @@ func (rl *clientConnReadLoop) cleanup() { // TODO: also do this if we've written the headers but not // gotten a response yet. err := cc.readerErr - if err == io.EOF { - err = io.ErrUnexpectedEOF - } cc.mu.Lock() + if err == io.EOF { + if cc.goAway != nil { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else { + err = io.ErrUnexpectedEOF + } + } for _, cs := range rl.activeRes { cs.bufPipe.CloseWithError(err) } @@ -903,6 +1218,7 @@ func (rl *clientConnReadLoop) cleanup() { case cs.resc <- resAndError{err: err}: default: } + close(cs.done) } cc.closed = true cc.cond.Broadcast() @@ -911,45 +1227,40 @@ func (rl *clientConnReadLoop) cleanup() { func (rl *clientConnReadLoop) run() error { cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a reply for { f, err := cc.fr.ReadFrame() if err != nil { cc.vlogf("Transport readFrame error: (%T) %v", err, err) } if se, ok := err.(StreamError); ok { - // TODO: deal with stream errors from the framer. - return se + if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { + rl.endStreamError(cs, cc.fr.errDetail) + } + continue } else if err != nil { return err } - cc.vlogf("Transport received %v: %#v", f.Header(), f) - - streamID := f.Header().StreamID - - _, isContinue := f.(*ContinuationFrame) - if isContinue { - if streamID != rl.continueStreamID { - cc.logf("Protocol violation: got CONTINUATION with id %d; want %d", streamID, rl.continueStreamID) - return ConnectionError(ErrCodeProtocol) - } - } else if rl.continueStreamID != 0 { - // Continue frames need to be adjacent in the stream - // and we were in the middle of headers. - cc.logf("Protocol violation: got %T for stream %d, want CONTINUATION for %d", f, streamID, rl.continueStreamID) - return ConnectionError(ErrCodeProtocol) + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) } + maybeIdle := false // whether frame might transition us to idle switch f := f.(type) { - case *HeadersFrame: + case *MetaHeadersFrame: err = rl.processHeaders(f) - case *ContinuationFrame: - err = rl.processContinuation(f) + maybeIdle = true + gotReply = true case *DataFrame: err = rl.processData(f) + maybeIdle = true case *GoAwayFrame: err = rl.processGoAway(f) + maybeIdle = true case *RSTStreamFrame: err = rl.processResetStream(f) + maybeIdle = true case *SettingsFrame: err = rl.processSettings(f) case *PushPromiseFrame: @@ -964,82 +1275,116 @@ func (rl *clientConnReadLoop) run() error { if err != nil { return err } + if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { + cc.closeIfIdle() + } } } -func (rl *clientConnReadLoop) processHeaders(f *HeadersFrame) error { - rl.sawRegHeader = false - rl.reqMalformed = nil - rl.nextRes = &http.Response{ +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) + } + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + if res.Body != noBody { + rl.activeRes[cs.ID] = cs + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed non-numeric status pseudo header") + } + + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + header := make(http.Header) + res := &http.Response{ Proto: "HTTP/2.0", ProtoMajor: 2, - Header: make(http.Header), + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), } - return rl.processHeaderBlockFragment(f.HeaderBlockFragment(), f.StreamID, f.HeadersEnded(), f.StreamEnded()) -} - -func (rl *clientConnReadLoop) processContinuation(f *ContinuationFrame) error { - return rl.processHeaderBlockFragment(f.HeaderBlockFragment(), f.StreamID, f.HeadersEnded(), f.StreamEnded()) -} - -func (rl *clientConnReadLoop) processHeaderBlockFragment(frag []byte, streamID uint32, headersEnded, streamEnded bool) error { - cc := rl.cc - cs := cc.streamByID(streamID, streamEnded) - if cs == nil { - // We could return a ConnectionError(ErrCodeProtocol) - // here, except that in the case of us canceling - // client requests, we may also delete from the - // streams map, in which case we forgot that we sent - // this request. So, just ignore any responses for - // now. They might've been in-flight before the - // server got our RST_STREAM. - return nil - } - if cs.headersDone { - rl.hdec.SetEmitFunc(cs.onNewTrailerField) - } else { - rl.hdec.SetEmitFunc(rl.onNewHeaderField) - } - _, err := rl.hdec.Write(frag) - if err != nil { - return ConnectionError(ErrCodeCompression) - } - if !headersEnded { - rl.continueStreamID = cs.ID - return nil - } - // HEADERS (or CONTINUATION) are now over. - rl.continueStreamID = 0 - - if !cs.headersDone { - cs.headersDone = true - } else { - // We're dealing with trailers. (and specifically the - // final frame of headers) - if cs.trailersDone { - // Too many HEADERS frames for this stream. - return ConnectionError(ErrCodeProtocol) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) } - cs.trailersDone = true - if !streamEnded { - // We expect that any header block fragment - // frame for trailers with END_HEADERS also - // has END_STREAM. - return ConnectionError(ErrCodeProtocol) - } - rl.endStream(cs) - return nil } - if rl.reqMalformed != nil { - cs.resc <- resAndError{err: rl.reqMalformed} - rl.cc.writeStreamReset(cs.ID, ErrCodeProtocol, rl.reqMalformed) - return nil - } - - res := rl.nextRes - - if !streamEnded || cs.req.Method == "HEAD" { + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { res.ContentLength = -1 if clens := res.Header["Content-Length"]; len(clens) == 1 { if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { @@ -1054,27 +1399,52 @@ func (rl *clientConnReadLoop) processHeaderBlockFragment(frag []byte, streamID u } } - if streamEnded { + if streamEnded || isHead { res.Body = noBody - } else { - buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage - cs.bufPipe = pipe{b: buf} - cs.bytesRemain = res.ContentLength - res.Body = transportResponseBody{cs} - go cs.awaitRequestCancel(requestCancel(cs.req)) - - if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { - res.Header.Del("Content-Encoding") - res.Header.Del("Content-Length") - res.ContentLength = -1 - res.Body = &gzipReader{body: res.Body} - } + return res, nil } - cs.resTrailer = res.Trailer - rl.activeRes[cs.ID] = cs - cs.resc <- resAndError{res: res} - rl.nextRes = nil // unused now; will be reset next HEADERS frame + buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage + cs.bufPipe = pipe{b: buf} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + setResponseUncompressed(res) + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) return nil } @@ -1125,8 +1495,12 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) { cc.inflow.add(connAdd) } if err == nil { // No need to refresh if the stream is over or failed. - if v := cs.inflow.available(); v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { - streamAdd = transportDefaultStreamFlow - v + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) cs.inflow.add(streamAdd) } } @@ -1160,25 +1534,41 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc cs := cc.streamByID(f.StreamID, f.StreamEnded()) if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. return nil } - data := f.Data() - if VerboseLogs { - rl.cc.logf("DATA: %q", data) - } + if data := f.Data(); len(data) > 0 { + if cs.bufPipe.b == nil { + // Data frame after it's already closed? + cc.logf("http2: Transport received DATA frame for closed stream; closing connection") + return ConnectionError(ErrCodeProtocol) + } - // Check connection-level flow control. - cc.mu.Lock() - if cs.inflow.available() >= int32(len(data)) { - cs.inflow.take(int32(len(data))) - } else { + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(len(data)) { + cs.inflow.take(int32(len(data))) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } cc.mu.Unlock() - return ConnectionError(ErrCodeFlowControl) - } - cc.mu.Unlock() - if _, err := cs.bufPipe.Write(data); err != nil { - return err + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } } if f.StreamEnded() { @@ -1187,16 +1577,34 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { return nil } +var errInvalidTrailers = errors.New("http2: invalid trailers") + func (rl *clientConnReadLoop) endStream(cs *clientStream) { // TODO: check that any declared content-length matches, like // server.go's (*stream).endStream method. - cs.bufPipe.closeWithErrorAndCode(io.EOF, cs.copyTrailers) + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + cs.bufPipe.closeWithErrorAndCode(err, code) delete(rl.activeRes, cs.ID) + if cs.req.Close || cs.req.Header.Get("Connection") == "close" { + rl.closeWhenIdle = true + } } func (cs *clientStream) copyTrailers() { for k, vv := range cs.trailer { - cs.resTrailer[k] = vv + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv } } @@ -1229,7 +1637,7 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { // window size and this one. cc.initialWindowSize = s.Val default: - // TODO(bradfitz): handle more settings? + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. cc.vlogf("Unhandled Setting: %v", s) } return nil @@ -1274,7 +1682,7 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { cs.resetErr = err close(cs.peerReset) cs.bufPipe.CloseWithError(err) - cs.cc.cond.Broadcast() // wake up checkReset via clientStream.awaitFlowControl + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl } delete(rl.activeRes, cs.ID) return nil @@ -1311,74 +1719,14 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) // But that's only in GOAWAY. Invent a new frame type? Is there one already? cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() cc.wmu.Unlock() } -// onNewHeaderField runs on the readLoop goroutine whenever a new -// hpack header field is decoded. -func (rl *clientConnReadLoop) onNewHeaderField(f hpack.HeaderField) { - cc := rl.cc - if VerboseLogs { - cc.logf("Header field: %+v", f) - } - // TODO: enforce max header list size like server. - isPseudo := strings.HasPrefix(f.Name, ":") - if isPseudo { - if rl.sawRegHeader { - rl.reqMalformed = errors.New("http2: invalid pseudo header after regular header") - return - } - switch f.Name { - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - rl.reqMalformed = errors.New("http2: invalid :status") - return - } - rl.nextRes.Status = f.Value + " " + http.StatusText(code) - rl.nextRes.StatusCode = code - default: - // "Endpoints MUST NOT generate pseudo-header - // fields other than those defined in this - // document." - rl.reqMalformed = fmt.Errorf("http2: unknown response pseudo header %q", f.Name) - } - } else { - rl.sawRegHeader = true - key := http.CanonicalHeaderKey(f.Name) - if key == "Trailer" { - t := rl.nextRes.Trailer - if t == nil { - t = make(http.Header) - rl.nextRes.Trailer = t - } - foreachHeaderElement(f.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil - }) - } else { - rl.nextRes.Header.Add(key, f.Value) - } - } -} - -func (cs *clientStream) onNewTrailerField(f hpack.HeaderField) { - isPseudo := strings.HasPrefix(f.Name, ":") - if isPseudo { - // TODO: Bogus. report an error later when we close their body. - // drop for now. - return - } - key := http.CanonicalHeaderKey(f.Name) - if _, ok := cs.resTrailer[key]; ok { - if cs.trailer == nil { - cs.trailer = make(http.Header) - } - const tooBig = 1000 // TODO: arbitrary; use max header list size limits - if cur := cs.trailer[key]; len(cur) < tooBig { - cs.trailer[key] = append(cur, f.Value) - } - } -} +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) func (cc *ClientConn) logf(format string, args ...interface{}) { cc.t.logf(format, args...) @@ -1417,13 +1765,18 @@ func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { // call gzip.NewReader on the first call to Read type gzipReader struct { body io.ReadCloser // underlying Response.Body - zr io.Reader // lazily-initialized gzip reader + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error } func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } if gz.zr == nil { gz.zr, err = gzip.NewReader(gz.body) if err != nil { + gz.zerr = err return 0, err } } @@ -1433,3 +1786,83 @@ func (gz *gzipReader) Read(p []byte) (n int, err error) { func (gz *gzipReader) Close() error { return gz.body.Close() } + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httplex.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + s.timer.Stop() + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 6c8c15a1..27ef0dd4 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -7,11 +7,12 @@ package http2 import ( "bytes" "fmt" + "log" "net/http" - "sort" "time" "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" ) // writeFramer is implemented by any type that is used to write frames. @@ -136,27 +137,31 @@ type writeResHeaders struct { contentLength string } +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + func (w *writeResHeaders) writeFrame(ctx writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() if w.httpResCode != 0 { - enc.WriteField(hpack.HeaderField{ - Name: ":status", - Value: httpCodeString(w.httpResCode), - }) + encKV(enc, ":status", httpCodeString(w.httpResCode)) } encodeHeaders(enc, w.h, w.trailers) if w.contentType != "" { - enc.WriteField(hpack.HeaderField{Name: "content-type", Value: w.contentType}) + encKV(enc, "content-type", w.contentType) } if w.contentLength != "" { - enc.WriteField(hpack.HeaderField{Name: "content-length", Value: w.contentLength}) + encKV(enc, "content-length", w.contentLength) } if w.date != "" { - enc.WriteField(hpack.HeaderField{Name: "date", Value: w.date}) + encKV(enc, "date", w.date) } headerBlock := buf.Bytes() @@ -206,7 +211,7 @@ type write100ContinueHeadersFrame struct { func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) + encKV(enc, ":status", "100") return ctx.Framer().WriteHeaders(HeadersFrameParam{ StreamID: w.streamID, BlockFragment: buf.Bytes(), @@ -225,24 +230,35 @@ func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { } func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { - // TODO: garbage. pool sorters like http1? hot path for 1 key? if keys == nil { - keys = make([]string, 0, len(h)) - for k := range h { - keys = append(keys, k) - } - sort.Strings(keys) + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) } for _, k := range keys { vv := h[k] k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } isTE := k == "transfer-encoding" for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } // TODO: more of "8.1.2.2 Connection-Specific Header Fields" if isTE && v != "trailers" { continue } - enc.WriteField(hpack.HeaderField{Name: k, Value: v}) + encKV(enc, k, v) } } } diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index 3f83776e..88511623 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -1,14 +1,18 @@ language: go -before_install: - - go get github.com/axw/gocov/gocov - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover +go: + - 1.5.4 + - 1.6.3 + - 1.7 -install: - - mkdir -p "$GOPATH/src/google.golang.org" - - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/grpc" +go_import_path: google.golang.org/grpc + +before_install: + - go get -u golang.org/x/tools/cmd/goimports github.com/golang/lint/golint github.com/axw/gocov/gocov github.com/mattn/goveralls golang.org/x/tools/cmd/cover script: + - '! gofmt -s -d -l . 2>&1 | read' + - '! goimports -l . | read' + - '! golint ./... | grep -vE "(_string|\.pb)\.go:"' + - '! go tool vet -all . 2>&1 | grep -vE "constant [0-9]+ not a string in call to Errorf" | grep -vF .pb.go:' # https://github.com/golang/protobuf/issues/214 - make test testrace - - make coverage diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 407d384a..36cd6f75 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -1,16 +1,39 @@ # How to contribute -We definitely welcome patches and contribution to grpc! Here is some guideline +We definitely welcome patches and contribution to grpc! Here are some guidelines and information about how to do so. -## Getting started +## Sending patches -### Legal requirements +### Getting started + +1. Check out the code: + + $ go get google.golang.org/grpc + $ cd $GOPATH/src/google.golang.org/grpc + +1. Create a fork of the grpc-go repository. +1. Add your fork as a remote: + + $ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git + +1. Make changes, commit them. +1. Run the test suite: + + $ make test + +1. Push your changes to your fork: + + $ git push fork ... + +1. Open a pull request. + +## Legal requirements In order to protect both you and ourselves, you will need to sign the [Contributor License Agreement](https://cla.developers.google.com/clas). -### Filing Issues +## Filing Issues When filing an issue, make sure to answer these five questions: 1. What version of Go are you using (`go version`)? diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 12e84e4e..03bb01f0 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -1,15 +1,3 @@ -.PHONY: \ - all \ - deps \ - updatedeps \ - testdeps \ - updatetestdeps \ - build \ - proto \ - test \ - testrace \ - clean \ - all: test testrace deps: @@ -32,9 +20,10 @@ proto: echo "error: protoc not installed" >&2; \ exit 1; \ fi - go get -v github.com/golang/protobuf/protoc-gen-go - for file in $$(git ls-files '*.proto'); do \ - protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \ + go get -u -v github.com/golang/protobuf/protoc-gen-go + # use $$dir as the root for all proto files in the same directory + for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \ + protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \ done test: testdeps @@ -44,7 +33,20 @@ testrace: testdeps go test -v -race -cpu 1,4 google.golang.org/grpc/... clean: - go clean google.golang.org/grpc/... + go clean -i google.golang.org/grpc/... coverage: testdeps ./coverage.sh --coveralls + +.PHONY: \ + all \ + deps \ + updatedeps \ + testdeps \ + updatetestdeps \ + build \ + proto \ + test \ + testrace \ + clean \ + coverage diff --git a/vendor/google.golang.org/grpc/PATENTS b/vendor/google.golang.org/grpc/PATENTS index 619f9dbf..69b47959 100644 --- a/vendor/google.golang.org/grpc/PATENTS +++ b/vendor/google.golang.org/grpc/PATENTS @@ -1,22 +1,22 @@ Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by -Google as part of the GRPC project. +Google as part of the gRPC project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this -implementation of GRPC, where such license applies only to those patent +implementation of gRPC, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this -implementation of GRPC. This grant does not include claims that would be +implementation of gRPC. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of GRPC or any code incorporated within this -implementation of GRPC constitutes direct or contributory patent +that this implementation of gRPC or any code incorporated within this +implementation of gRPC constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of GRPC +rights granted to you under this License for this implementation of gRPC shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 37b05f09..660658be 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -7,7 +7,7 @@ The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open s Installation ------------ -To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run: +To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: ``` $ go get google.golang.org/grpc @@ -16,7 +16,7 @@ $ go get google.golang.org/grpc Prerequisites ------------- -This requires Go 1.4 or above. +This requires Go 1.5 or later . Constraints ----------- @@ -28,5 +28,5 @@ See [API documentation](https://godoc.org/google.golang.org/grpc) for package an Status ------ -Beta release +GA diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 00000000..52f4f10f --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,80 @@ +package grpc + +import ( + "math/rand" + "time" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var ( + DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, + baseDelay: 1.0 * time.Second, + factor: 1.6, + jitter: 0.2, + } +) + +// backoffStrategy defines the methodology for backing off after a grpc +// connection failure. +// +// This is unexported until the gRPC project decides whether or not to allow +// alternative backoff strategies. Once a decision is made, this type and its +// method may be exported. +type backoffStrategy interface { + // backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + backoff(retries int) time.Duration +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration + + // TODO(stevvooe): The following fields are not exported, as allowing + // changes would violate the current gRPC specification for backoff. If + // gRPC decides to allow more interesting backoff strategies, these fields + // may be opened up in the future. + + // baseDelay is the amount of time to wait before retrying after the first + // failure. + baseDelay time.Duration + + // factor is applied to the backoff after each retry. + factor float64 + + // jitter provides a range to randomize backoff delays. + jitter float64 +} + +func setDefaults(bc *BackoffConfig) { + md := bc.MaxDelay + *bc = DefaultBackoffConfig + + if md > 0 { + bc.MaxDelay = md + } +} + +func (bc BackoffConfig) backoff(retries int) (t time.Duration) { + if retries == 0 { + return bc.baseDelay + } + backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.factor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.jitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go new file mode 100644 index 00000000..419e2146 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer.go @@ -0,0 +1,385 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "fmt" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BalancerGetOptions configures a Get call. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type BalancerGetOptions struct { + // BlockingWait specifies whether Get should block when there is no + // connected address. + BlockingWait bool +} + +// Balancer chooses network addresses for RPCs. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Balancer interface { + // Start does the initialization work to bootstrap a Balancer. For example, + // this function may start the name resolution and watch the updates. It will + // be called when dialing. + Start(target string) error + // Up informs the Balancer that gRPC has a connection to the server at + // addr. It returns down which is called once the connection to addr gets + // lost or closed. + // TODO: It is not clear how to construct and take advantage the meaningful error + // parameter for down. Need realistic demands to guide. + Up(addr Address) (down func(error)) + // Get gets the address of a server for the RPC corresponding to ctx. + // i) If it returns a connected address, gRPC internals issues the RPC on the + // connection to this address; + // ii) If it returns an address on which the connection is under construction + // (initiated by Notify(...)) but not connected, gRPC internals + // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or + // Shutdown state; + // or + // * issues RPC on the connection otherwise. + // iii) If it returns an address on which the connection does not exist, gRPC + // internals treats it as an error and will fail the corresponding RPC. + // + // Therefore, the following is the recommended rule when writing a custom Balancer. + // If opts.BlockingWait is true, it should return a connected address or + // block if there is no connected address. It should respect the timeout or + // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast + // RPCs), it should return an address it has notified via Notify(...) immediately + // instead of blocking. + // + // The function returns put which is called once the rpc has completed or failed. + // put can collect and report RPC stats to a remote load balancer. + // + // This function should only return the errors Balancer cannot recover by itself. + // gRPC internals will fail the RPC if an error is returned. + Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) + // Notify returns a channel that is used by gRPC internals to watch the addresses + // gRPC needs to connect. The addresses might be from a name resolver or remote + // load balancer. gRPC internals will compare it with the existing connected + // addresses. If the address Balancer notified is not in the existing connected + // addresses, gRPC starts to connect the address. If an address in the existing + // connected addresses is not in the notification list, the corresponding connection + // is shutdown gracefully. Otherwise, there are no operations to take. Note that + // the Address slice must be the full list of the Addresses which should be connected. + // It is NOT delta. + Notify() <-chan []Address + // Close shuts down the balancer. + Close() error +} + +// downErr implements net.Error. It is constructed by gRPC internals and passed to the down +// call of Balancer. +type downErr struct { + timeout bool + temporary bool + desc string +} + +func (e downErr) Error() string { return e.desc } +func (e downErr) Timeout() bool { return e.timeout } +func (e downErr) Temporary() bool { return e.temporary } + +func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr { + return downErr{ + timeout: timeout, + temporary: temporary, + desc: fmt.Sprintf(format, a...), + } +} + +// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch +// the name resolution updates and updates the addresses available correspondingly. +func RoundRobin(r naming.Resolver) Balancer { + return &roundRobin{r: r} +} + +type addrInfo struct { + addr Address + connected bool +} + +type roundRobin struct { + r naming.Resolver + w naming.Watcher + addrs []*addrInfo // all the addresses the client should potentially connect + mu sync.Mutex + addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. + next int // index of the next address to return for Get() + waitCh chan struct{} // the channel to block when there is no connected address available + done bool // The Balancer is closed. +} + +func (rr *roundRobin) watchAddrUpdates() error { + updates, err := rr.w.Next() + if err != nil { + grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) + return err + } + rr.mu.Lock() + defer rr.mu.Unlock() + for _, update := range updates { + addr := Address{ + Addr: update.Addr, + Metadata: update.Metadata, + } + switch update.Op { + case naming.Add: + var exist bool + for _, v := range rr.addrs { + if addr == v.addr { + exist = true + grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr) + break + } + } + if exist { + continue + } + rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) + case naming.Delete: + for i, v := range rr.addrs { + if addr == v.addr { + copy(rr.addrs[i:], rr.addrs[i+1:]) + rr.addrs = rr.addrs[:len(rr.addrs)-1] + break + } + } + default: + grpclog.Println("Unknown update.Op ", update.Op) + } + } + // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. + open := make([]Address, len(rr.addrs)) + for i, v := range rr.addrs { + open[i] = v.addr + } + if rr.done { + return ErrClientConnClosing + } + rr.addrCh <- open + return nil +} + +func (rr *roundRobin) Start(target string) error { + if rr.r == nil { + // If there is no name resolver installed, it is not needed to + // do name resolution. In this case, target is added into rr.addrs + // as the only address available and rr.addrCh stays nil. + rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) + return nil + } + w, err := rr.r.Resolve(target) + if err != nil { + return err + } + rr.w = w + rr.addrCh = make(chan []Address) + go func() { + for { + if err := rr.watchAddrUpdates(); err != nil { + return + } + } + }() + return nil +} + +// Up sets the connected state of addr and sends notification if there are pending +// Get() calls. +func (rr *roundRobin) Up(addr Address) func(error) { + rr.mu.Lock() + defer rr.mu.Unlock() + var cnt int + for _, a := range rr.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected { + cnt++ + } + } + // addr is only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + return func(err error) { + rr.down(addr, err) + } +} + +// down unsets the connected state of addr. +func (rr *roundRobin) down(addr Address, err error) { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, a := range rr.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +// Get returns the next addr in the rotation. +func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + if len(rr.addrs) == 0 { + rr.mu.Unlock() + err = fmt.Errorf("there is no address available") + return + } + // Returns the next addr on rr.addrs for failfast RPCs. + addr = rr.addrs[rr.next].addr + rr.next++ + rr.mu.Unlock() + return + } + // Wait on rr.waitCh for non-failfast RPCs. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-ch: + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + } + } +} + +func (rr *roundRobin) Notify() <-chan []Address { + return rr.addrCh +} + +func (rr *roundRobin) Close() error { + rr.mu.Lock() + defer rr.mu.Unlock() + rr.done = true + if rr.w != nil { + rr.w.Close() + } + if rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + if rr.addrCh != nil { + close(rr.addrCh) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 9d815af3..fea07998 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -34,13 +34,14 @@ package grpc import ( + "bytes" "io" + "math" "time" "golang.org/x/net/context" "golang.org/x/net/trace" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) @@ -48,16 +49,23 @@ import ( // On error, it returns the error and indicates whether the call should be retried. // // TODO(zhaoq): Check whether the received message sequence is valid. -func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { +func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { // Try to acquire header metadata from the server if there is any. var err error + defer func() { + if err != nil { + if _, ok := err.(transport.ConnectionError); !ok { + t.CloseStream(stream, err) + } + } + }() c.headerMD, err = stream.Header() if err != nil { return err } - p := &parser{s: stream} + p := &parser{r: stream} for { - if err = recv(p, codec, reply); err != nil { + if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32); err != nil { if err == io.EOF { break } @@ -69,43 +77,43 @@ func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream } // sendRequest writes out various information of an RPC such as Context and Message. -func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { +func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { stream, err := t.NewStream(ctx, callHdr) if err != nil { return nil, err } defer func() { if err != nil { + // If err is connection error, t will be closed, no need to close stream here. if _, ok := err.(transport.ConnectionError); !ok { t.CloseStream(stream, err) } } }() - // TODO(zhaoq): Support compression. - outBuf, err := encode(codec, args, compressionNone) + var cbuf *bytes.Buffer + if compressor != nil { + cbuf = new(bytes.Buffer) + } + outBuf, err := encode(codec, args, compressor, cbuf) if err != nil { return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) } err = t.Write(stream, outBuf, opts) - if err != nil { + // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method + // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following + // recvResponse to get the final status. + if err != nil && err != io.EOF { return nil, err } // Sent successfully. return stream, nil } -// callInfo contains all related configuration and information about an RPC. -type callInfo struct { - failFast bool - headerMD metadata.MD - trailerMD metadata.MD - traceInfo traceInfo // in trace.go -} - -// Invoke is called by the generated code. It sends the RPC request on the -// wire and returns after response is received. +// Invoke sends the RPC request on the wire and returns after response is received. +// Invoke is called by generated code. Also users can call Invoke directly when it +// is really needed in their use cases. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { - var c callInfo + c := defaultCallInfo for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) @@ -136,57 +144,83 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli Last: true, Delay: false, } - var ( - lastErr error // record the error that happened - ) for { var ( err error t transport.ClientTransport stream *transport.Stream + // Record the put handler from Balancer.Get(...). It is called once the + // RPC has completed or failed. + put func() ) - // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. - if lastErr != nil && c.failFast { - return toRPCErr(lastErr) - } + // TODO(zhaoq): Need a formal spec of fail-fast. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, } - t, err = cc.dopts.picker.Pick(ctx) + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + } + gopts := BalancerGetOptions{ + BlockingWait: !c.failFast, + } + t, put, err = cc.getTransport(ctx, gopts) if err != nil { - if lastErr != nil { - // This was a retry; return the error from the last attempt. - return toRPCErr(lastErr) + // TODO(zhaoq): Probably revisit the error handling. + if _, ok := err.(*rpcError); ok { + return err } - return toRPCErr(err) + if err == errConnClosing || err == errConnUnavailable { + if c.failFast { + return Errorf(codes.Unavailable, "%v", err) + } + continue + } + // All the other errors are treated as Internal errors. + return Errorf(codes.Internal, "%v", err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } - stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts) + stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) if err != nil { - if _, ok := err.(transport.ConnectionError); ok { - lastErr = err - continue + if put != nil { + put() + put = nil } - if lastErr != nil { - return toRPCErr(lastErr) + // Retry a non-failfast RPC when + // i) there is a connection error; or + // ii) the server started to drain before this RPC was initiated. + if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { + if c.failFast { + return toRPCErr(err) + } + continue } return toRPCErr(err) } - // Receive the response - lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply) - if _, ok := lastErr.(transport.ConnectionError); ok { - continue + err = recvResponse(cc.dopts, t, &c, stream, reply) + if err != nil { + if put != nil { + put() + put = nil + } + if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { + if c.failFast { + return toRPCErr(err) + } + continue + } + return toRPCErr(err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } - t.CloseStream(stream, lastErr) - if lastErr != nil { - return toRPCErr(lastErr) + t.CloseStream(stream, nil) + if put != nil { + put() + put = nil } - return Errorf(stream.StatusCode(), stream.StatusDesc()) + return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) } } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index bf66914c..1d3b46c6 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -49,22 +49,33 @@ import ( ) var ( - // ErrUnspecTarget indicates that the target address is unspecified. - ErrUnspecTarget = errors.New("grpc: target is unspecified") - // ErrNoTransportSecurity indicates that there is no transport security - // being set for ClientConn. Users should either set one or explicityly - // call WithInsecure DialOption to disable security. - ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") - // ErrCredentialsMisuse indicates that users want to transmit security infomation - // (e.g., oauth2 token) which requires secure connection on an insecure - // connection. - ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)") // ErrClientConnClosing indicates that the operation is illegal because - // the session is closing. + // the ClientConn is closing. ErrClientConnClosing = errors.New("grpc: the client connection is closing") - // ErrClientConnTimeout indicates that the connection could not be - // established or re-established within the specified timeout. - ErrClientConnTimeout = errors.New("grpc: timed out trying to connect") + // ErrClientConnTimeout indicates that the ClientConn cannot establish the + // underlying connections within the specified timeout. + ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., oauth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") + // errNetworkIO indicates that the connection is down due to some network I/O error. + errNetworkIO = errors.New("grpc: failed with network I/O error") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errConnUnavailable indicates that the connection is unavailable. + errConnUnavailable = errors.New("grpc: the connection is unavailable") + errNoAddr = errors.New("grpc: there is no address available to dial") // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second ) @@ -73,9 +84,13 @@ var ( // values passed to Dial. type dialOptions struct { codec Codec - picker Picker + cp Compressor + dc Decompressor + bs backoffStrategy + balancer Balancer block bool insecure bool + timeout time.Duration copts transport.ConnectOptions } @@ -89,9 +104,54 @@ func WithCodec(c Codec) DialOption { } } -func WithPicker(p Picker) DialOption { +// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message +// compressor. +func WithCompressor(cp Compressor) DialOption { return func(o *dialOptions) { - o.picker = p + o.cp = cp + } +} + +// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating +// message decompressor. +func WithDecompressor(dc Decompressor) DialOption { + return func(o *dialOptions) { + o.dc = dc + } +} + +// WithBalancer returns a DialOption which sets a load balancer. +func WithBalancer(b Balancer) DialOption { + return func(o *dialOptions) { + o.balancer = b + } +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up +// for use. +func WithBackoffConfig(b BackoffConfig) DialOption { + // Set defaults to ensure that provided BackoffConfig is valid and + // unexported fields get default values. + setDefaults(&b) + return withBackoff(b) +} + +// withBackoff sets the backoff strategy used for retries after a +// failed connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs backoffStrategy) DialOption { + return func(o *dialOptions) { + o.bs = bs } } @@ -104,6 +164,8 @@ func WithBlock() DialOption { } } +// WithInsecure returns a DialOption which disables transport security for this ClientConn. +// Note that transport security is required unless WithInsecure is set. func WithInsecure() DialOption { return func(o *dialOptions) { o.insecure = true @@ -112,31 +174,37 @@ func WithInsecure() DialOption { // WithTransportCredentials returns a DialOption which configures a // connection level security credentials (e.g., TLS/SSL). -func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption { +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { return func(o *dialOptions) { - o.copts.AuthOptions = append(o.copts.AuthOptions, creds) + o.copts.TransportCredentials = creds } } // WithPerRPCCredentials returns a DialOption which sets // credentials which will place auth state on each outbound RPC. -func WithPerRPCCredentials(creds credentials.Credentials) DialOption { +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { return func(o *dialOptions) { - o.copts.AuthOptions = append(o.copts.AuthOptions, creds) + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) } } -// WithTimeout returns a DialOption that configures a timeout for dialing a client connection. +// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn +// initially. This is valid if and only if WithBlock() is present. func WithTimeout(d time.Duration) DialOption { return func(o *dialOptions) { - o.copts.Timeout = d + o.timeout = d } } // WithDialer returns a DialOption that specifies a function to use for dialing network addresses. -func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption { +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { return func(o *dialOptions) { - o.copts.Dialer = f + o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, deadline.Sub(time.Now())) + } + return f(addr, 0) + } } } @@ -147,25 +215,96 @@ func WithUserAgent(s string) DialOption { } } -// Dial creates a client connection the given target. +// Dial creates a client connection to the given target. func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext creates a client connection to the given target. ctx can be used to +// cancel or expire the pending connecting. Once this function returns, the +// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close +// to terminate all the pending operations after this function returns. +// This is the EXPERIMENTAL API. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, + conns: make(map[Address]*addrConn), } + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + defer func() { + select { + case <-ctx.Done(): + conn, err = nil, ctx.Err() + default: + } + + if err != nil { + cc.Close() + } + }() + for _, opt := range opts { opt(&cc.dopts) } + + // Set defaults. if cc.dopts.codec == nil { - // Set the default codec. cc.dopts.codec = protoCodec{} } - if cc.dopts.picker == nil { - cc.dopts.picker = &unicastPicker{ - target: target, + if cc.dopts.bs == nil { + cc.dopts.bs = DefaultBackoffConfig + } + + var ( + ok bool + addrs []Address + ) + if cc.dopts.balancer == nil { + // Connect to target directly if balancer is nil. + addrs = append(addrs, Address{Addr: target}) + } else { + if err := cc.dopts.balancer.Start(target); err != nil { + return nil, err + } + ch := cc.dopts.balancer.Notify() + if ch == nil { + // There is no name resolver installed. + addrs = append(addrs, Address{Addr: target}) + } else { + addrs, ok = <-ch + if !ok || len(addrs) == 0 { + return nil, errNoAddr + } } } - if err := cc.dopts.picker.Init(cc); err != nil { - return nil, err + waitC := make(chan error, 1) + go func() { + for _, a := range addrs { + if err := cc.resetAddrConn(a, false, nil); err != nil { + waitC <- err + return + } + } + close(waitC) + }() + var timeoutCh <-chan time.Time + if cc.dopts.timeout > 0 { + timeoutCh = time.After(cc.dopts.timeout) + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case err := <-waitC: + if err != nil { + return nil, err + } + case <-timeoutCh: + return nil, ErrClientConnTimeout + } + // If balancer is nil or balancer.Notify() is nil, ok will be false here. + // The lbWatcher goroutine will not be created. + if ok { + go cc.lbWatcher() } colonPos := strings.LastIndex(target, ":") if colonPos == -1 { @@ -208,361 +347,511 @@ func (s ConnectivityState) String() string { } } -// ClientConn represents a client connection to an RPC service. +// ClientConn represents a client connection to an RPC server. type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + target string authority string dopts dialOptions + + mu sync.RWMutex + conns map[Address]*addrConn } -// State returns the connectivity state of cc. -// This is EXPERIMENTAL API. -func (cc *ClientConn) State() (ConnectivityState, error) { - return cc.dopts.picker.State() +func (cc *ClientConn) lbWatcher() { + for addrs := range cc.dopts.balancer.Notify() { + var ( + add []Address // Addresses need to setup connections. + del []*addrConn // Connections need to tear down. + ) + cc.mu.Lock() + for _, a := range addrs { + if _, ok := cc.conns[a]; !ok { + add = append(add, a) + } + } + for k, c := range cc.conns { + var keep bool + for _, a := range addrs { + if k == a { + keep = true + break + } + } + if !keep { + del = append(del, c) + delete(cc.conns, c.addr) + } + } + cc.mu.Unlock() + for _, a := range add { + cc.resetAddrConn(a, true, nil) + } + for _, c := range del { + c.tearDown(errConnDrain) + } + } } -// WaitForStateChange blocks until the state changes to something other than the sourceState. -// It returns the new state or error. -// This is EXPERIMENTAL API. -func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - return cc.dopts.picker.WaitForStateChange(ctx, sourceState) +// resetAddrConn creates an addrConn for addr and adds it to cc.conns. +// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. +// If tearDownErr is nil, errConnDrain will be used instead. +func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error { + ac := &addrConn{ + cc: cc, + addr: addr, + dopts: cc.dopts, + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + ac.stateCV = sync.NewCond(&ac.mu) + if EnableTracing { + ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr) + } + if !ac.dopts.insecure { + if ac.dopts.copts.TransportCredentials == nil { + return errNoTransportSecurity + } + } else { + if ac.dopts.copts.TransportCredentials != nil { + return errCredentialsConflict + } + for _, cd := range ac.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing + } + } + } + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + stale := cc.conns[ac.addr] + cc.conns[ac.addr] = ac + cc.mu.Unlock() + if stale != nil { + // There is an addrConn alive on ac.addr already. This could be due to + // 1) a buggy Balancer notifies duplicated Addresses; + // 2) goaway was received, a new ac will replace the old ac. + // The old ac should be deleted from cc.conns, but the + // underlying transport should drain rather than close. + if tearDownErr == nil { + // tearDownErr is nil if resetAddrConn is called by + // 1) Dial + // 2) lbWatcher + // In both cases, the stale ac should drain, not close. + stale.tearDown(errConnDrain) + } else { + stale.tearDown(tearDownErr) + } + } + // skipWait may overwrite the decision in ac.dopts.block. + if ac.dopts.block && !skipWait { + if err := ac.resetTransport(false); err != nil { + if err != errConnClosing { + // Tear down ac and delete it from cc.conns. + cc.mu.Lock() + delete(cc.conns, ac.addr) + cc.mu.Unlock() + ac.tearDown(err) + } + if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { + return e.Origin() + } + return err + } + // Start to monitor the error status of transport. + go ac.transportMonitor() + } else { + // Start a goroutine connecting to the server asynchronously. + go func() { + if err := ac.resetTransport(false); err != nil { + grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } + return + } + ac.transportMonitor() + }() + } + return nil } -// Close starts to tear down the ClientConn. +func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { + var ( + ac *addrConn + ok bool + put func() + ) + if cc.dopts.balancer == nil { + // If balancer is nil, there should be only one addrConn available. + cc.mu.RLock() + if cc.conns == nil { + cc.mu.RUnlock() + return nil, nil, toRPCErr(ErrClientConnClosing) + } + for _, ac = range cc.conns { + // Break after the first iteration to get the first addrConn. + ok = true + break + } + cc.mu.RUnlock() + } else { + var ( + addr Address + err error + ) + addr, put, err = cc.dopts.balancer.Get(ctx, opts) + if err != nil { + return nil, nil, toRPCErr(err) + } + cc.mu.RLock() + if cc.conns == nil { + cc.mu.RUnlock() + return nil, nil, toRPCErr(ErrClientConnClosing) + } + ac, ok = cc.conns[addr] + cc.mu.RUnlock() + } + if !ok { + if put != nil { + put() + } + return nil, nil, errConnClosing + } + t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait) + if err != nil { + if put != nil { + put() + } + return nil, nil, err + } + return t, put, nil +} + +// Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - return cc.dopts.picker.Close() + cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.mu.Unlock() + if cc.dopts.balancer != nil { + cc.dopts.balancer.Close() + } + for _, ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + return nil } -// Conn is a client connection to a single destination. -type Conn struct { - target string - dopts dialOptions - resetChan chan int - shutdownChan chan struct{} - events trace.EventLog +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + addr Address + dopts dialOptions + events trace.EventLog mu sync.Mutex state ConnectivityState stateCV *sync.Cond + down func(error) // the handler called when a connection is down. // ready is closed and becomes nil when a new transport is up or failed // due to timeout. ready chan struct{} transport transport.ClientTransport + + // The reason this addrConn is torn down. + tearDownErr error } -// NewConn creates a Conn. -func NewConn(cc *ClientConn) (*Conn, error) { - if cc.target == "" { - return nil, ErrUnspecTarget - } - c := &Conn{ - target: cc.target, - dopts: cc.dopts, - resetChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - } - if EnableTracing { - c.events = trace.NewEventLog("grpc.ClientConn", c.target) - } - if !c.dopts.insecure { - var ok bool - for _, cd := range c.dopts.copts.AuthOptions { - if _, ok := cd.(credentials.TransportAuthenticator); !ok { - continue - } - ok = true - } - if !ok { - return nil, ErrNoTransportSecurity - } - } else { - for _, cd := range c.dopts.copts.AuthOptions { - if cd.RequireTransportSecurity() { - return nil, ErrCredentialsMisuse - } - } - } - c.stateCV = sync.NewCond(&c.mu) - if c.dopts.block { - if err := c.resetTransport(false); err != nil { - c.Close() - return nil, err - } - // Start to monitor the error status of transport. - go c.transportMonitor() - } else { - // Start a goroutine connecting to the server asynchronously. - go func() { - if err := c.resetTransport(false); err != nil { - grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err) - c.Close() - return - } - c.transportMonitor() - }() - } - return c, nil -} - -// printf records an event in cc's event log, unless cc has been closed. -// REQUIRES cc.mu is held. -func (cc *Conn) printf(format string, a ...interface{}) { - if cc.events != nil { - cc.events.Printf(format, a...) +// printf records an event in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) printf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Printf(format, a...) } } -// errorf records an error in cc's event log, unless cc has been closed. -// REQUIRES cc.mu is held. -func (cc *Conn) errorf(format string, a ...interface{}) { - if cc.events != nil { - cc.events.Errorf(format, a...) +// errorf records an error in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) errorf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Errorf(format, a...) } } -// State returns the connectivity state of the Conn -func (cc *Conn) State() ConnectivityState { - cc.mu.Lock() - defer cc.mu.Unlock() - return cc.state +// getState returns the connectivity state of the Conn +func (ac *addrConn) getState() ConnectivityState { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state } -// WaitForStateChange blocks until the state changes to something other than the sourceState. -func (cc *Conn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - cc.mu.Lock() - defer cc.mu.Unlock() - if sourceState != cc.state { - return cc.state, nil +// waitForStateChange blocks until the state changes to something other than the sourceState. +func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if sourceState != ac.state { + return ac.state, nil } done := make(chan struct{}) var err error go func() { select { case <-ctx.Done(): - cc.mu.Lock() + ac.mu.Lock() err = ctx.Err() - cc.stateCV.Broadcast() - cc.mu.Unlock() + ac.stateCV.Broadcast() + ac.mu.Unlock() case <-done: } }() defer close(done) - for sourceState == cc.state { - cc.stateCV.Wait() + for sourceState == ac.state { + ac.stateCV.Wait() if err != nil { - return cc.state, err + return ac.state, err } } - return cc.state, nil + return ac.state, nil } -// NotifyReset tries to signal the underlying transport needs to be reset due to -// for example a name resolution change in flight. -func (cc *Conn) NotifyReset() { - select { - case cc.resetChan <- 0: - default: - } -} - -func (cc *Conn) resetTransport(closeTransport bool) error { - var retries int - start := time.Now() - for { - cc.mu.Lock() - cc.printf("connecting") - if cc.state == Shutdown { - // cc.Close() has been invoked. - cc.mu.Unlock() - return ErrClientConnClosing +func (ac *addrConn) resetTransport(closeTransport bool) error { + for retries := 0; ; retries++ { + ac.mu.Lock() + ac.printf("connecting") + if ac.state == Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing } - cc.state = Connecting - cc.stateCV.Broadcast() - cc.mu.Unlock() - if closeTransport { - cc.transport.Close() + if ac.down != nil { + ac.down(downErrorf(false, true, "%v", errNetworkIO)) + ac.down = nil } - // Adjust timeout for the current try. - copts := cc.dopts.copts - if copts.Timeout < 0 { - cc.Close() - return ErrClientConnTimeout + ac.state = Connecting + ac.stateCV.Broadcast() + t := ac.transport + ac.mu.Unlock() + if closeTransport && t != nil { + t.Close() } - if copts.Timeout > 0 { - copts.Timeout -= time.Since(start) - if copts.Timeout <= 0 { - cc.Close() - return ErrClientConnTimeout - } - } - sleepTime := backoff(retries) - timeout := sleepTime - if timeout < minConnectTimeout { - timeout = minConnectTimeout - } - if copts.Timeout == 0 || copts.Timeout > timeout { - copts.Timeout = timeout + sleepTime := ac.dopts.bs.backoff(retries) + timeout := minConnectTimeout + if timeout < sleepTime { + timeout = sleepTime } + ctx, cancel := context.WithTimeout(ac.ctx, timeout) connectTime := time.Now() - addr, err := cc.dopts.picker.PickAddr() - var newTransport transport.ClientTransport - if err == nil { - newTransport, err = transport.NewClientTransport(addr, &copts) - } + newTransport, err := transport.NewClientTransport(ctx, ac.addr.Addr, ac.dopts.copts) if err != nil { - cc.mu.Lock() - if cc.state == Shutdown { - // cc.Close() has been invoked. - cc.mu.Unlock() - return ErrClientConnClosing + cancel() + + if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { + return err } - cc.errorf("transient failure: %v", err) - cc.state = TransientFailure - cc.stateCV.Broadcast() - if cc.ready != nil { - close(cc.ready) - cc.ready = nil + grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, ac.addr) + ac.mu.Lock() + if ac.state == Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing } - cc.mu.Unlock() - sleepTime -= time.Since(connectTime) - if sleepTime < 0 { - sleepTime = 0 - } - // Fail early before falling into sleep. - if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) { - cc.mu.Lock() - cc.errorf("connection timeout") - cc.mu.Unlock() - cc.Close() - return ErrClientConnTimeout + ac.errorf("transient failure: %v", err) + ac.state = TransientFailure + ac.stateCV.Broadcast() + if ac.ready != nil { + close(ac.ready) + ac.ready = nil } + ac.mu.Unlock() closeTransport = false - time.Sleep(sleepTime) - retries++ - grpclog.Printf("grpc: Conn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target) + select { + case <-time.After(sleepTime - time.Since(connectTime)): + case <-ac.ctx.Done(): + return ac.ctx.Err() + } continue } - cc.mu.Lock() - cc.printf("ready") - if cc.state == Shutdown { - // cc.Close() has been invoked. - cc.mu.Unlock() + ac.mu.Lock() + ac.printf("ready") + if ac.state == Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() newTransport.Close() - return ErrClientConnClosing + return errConnClosing } - cc.state = Ready - cc.stateCV.Broadcast() - cc.transport = newTransport - if cc.ready != nil { - close(cc.ready) - cc.ready = nil + ac.state = Ready + ac.stateCV.Broadcast() + ac.transport = newTransport + if ac.ready != nil { + close(ac.ready) + ac.ready = nil } - cc.mu.Unlock() + if ac.cc.dopts.balancer != nil { + ac.down = ac.cc.dopts.balancer.Up(ac.addr) + } + ac.mu.Unlock() return nil } } -func (cc *Conn) reconnect() bool { - cc.mu.Lock() - if cc.state == Shutdown { - // cc.Close() has been invoked. - cc.mu.Unlock() - return false - } - cc.state = TransientFailure - cc.stateCV.Broadcast() - cc.mu.Unlock() - if err := cc.resetTransport(true); err != nil { - // The ClientConn is closing. - cc.mu.Lock() - cc.printf("transport exiting: %v", err) - cc.mu.Unlock() - grpclog.Printf("grpc: Conn.transportMonitor exits due to: %v", err) - return false - } - return true -} - // Run in a goroutine to track the error in transport and create the // new transport if an error happens. It returns when the channel is closing. -func (cc *Conn) transportMonitor() { +func (ac *addrConn) transportMonitor() { for { + ac.mu.Lock() + t := ac.transport + ac.mu.Unlock() select { - // shutdownChan is needed to detect the teardown when - // the ClientConn is idle (i.e., no RPC in flight). - case <-cc.shutdownChan: - return - case <-cc.resetChan: - if !cc.reconnect() { - return - } - case <-cc.transport.Error(): - if !cc.reconnect() { - return - } - // Tries to drain reset signal if there is any since it is out-dated. + // This is needed to detect the teardown when + // the addrConn is idle (i.e., no RPC in flight). + case <-ac.ctx.Done(): select { - case <-cc.resetChan: + case <-t.Error(): + t.Close() default: } - } - } -} - -// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed. -func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) { - for { - cc.mu.Lock() - switch { - case cc.state == Shutdown: - cc.mu.Unlock() - return nil, ErrClientConnClosing - case cc.state == Ready: - cc.mu.Unlock() - return cc.transport, nil - default: - ready := cc.ready - if ready == nil { - ready = make(chan struct{}) - cc.ready = ready - } - cc.mu.Unlock() + return + case <-t.GoAway(): + // If GoAway happens without any network I/O error, ac is closed without shutting down the + // underlying transport (the transport will be closed when all the pending RPCs finished or + // failed.). + // If GoAway and some network I/O error happen concurrently, ac and its underlying transport + // are closed. + // In both cases, a new ac is created. select { - case <-ctx.Done(): - return nil, transport.ContextErr(ctx.Err()) - // Wait until the new transport is ready or failed. - case <-ready: + case <-t.Error(): + ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) + default: + ac.cc.resetAddrConn(ac.addr, true, errConnDrain) + } + return + case <-t.Error(): + select { + case <-ac.ctx.Done(): + t.Close() + return + case <-t.GoAway(): + ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) + return + default: + } + ac.mu.Lock() + if ac.state == Shutdown { + // ac has been shutdown. + ac.mu.Unlock() + return + } + ac.state = TransientFailure + ac.stateCV.Broadcast() + ac.mu.Unlock() + if err := ac.resetTransport(true); err != nil { + ac.mu.Lock() + ac.printf("transport exiting: %v", err) + ac.mu.Unlock() + grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } + return } } } } -// Close starts to tear down the Conn. Returns ErrClientConnClosing if -// it has been closed (mostly due to dial time-out). -// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many ClientConn's in a -// tight loop. -func (cc *Conn) Close() error { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.state == Shutdown { - return ErrClientConnClosing +// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or +// iv) transport is in TransientFailure and there's no balancer/failfast is true. +func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { + for { + ac.mu.Lock() + switch { + case ac.state == Shutdown: + if failfast || !hasBalancer { + // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. + err := ac.tearDownErr + ac.mu.Unlock() + return nil, err + } + ac.mu.Unlock() + return nil, errConnClosing + case ac.state == Ready: + ct := ac.transport + ac.mu.Unlock() + return ct, nil + case ac.state == TransientFailure: + if failfast || hasBalancer { + ac.mu.Unlock() + return nil, errConnUnavailable + } + } + ready := ac.ready + if ready == nil { + ready = make(chan struct{}) + ac.ready = ready + } + ac.mu.Unlock() + select { + case <-ctx.Done(): + return nil, toRPCErr(ctx.Err()) + // Wait until the new transport is ready or failed. + case <-ready: + } } - cc.state = Shutdown - cc.stateCV.Broadcast() - if cc.events != nil { - cc.events.Finish() - cc.events = nil - } - if cc.ready != nil { - close(cc.ready) - cc.ready = nil - } - if cc.transport != nil { - cc.transport.Close() - } - if cc.shutdownChan != nil { - close(cc.shutdownChan) - } - return nil +} + +// tearDown starts to tear down the addrConn. +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many addrConn's in a +// tight loop. +// tearDown doesn't remove ac from ac.cc.conns. +func (ac *addrConn) tearDown(err error) { + ac.cancel() + + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.down != nil { + ac.down(downErrorf(false, false, "%v", err)) + ac.down = nil + } + if err == errConnDrain && ac.transport != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + ac.transport.GracefulClose() + } + if ac.state == Shutdown { + return + } + ac.state = Shutdown + ac.tearDownErr = err + ac.stateCV.Broadcast() + if ac.events != nil { + ac.events.Finish() + ac.events = nil + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + if ac.transport != nil && err != errConnDrain { + ac.transport.Close() + } + return } diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh index 90bf4baf..12023537 100755 --- a/vendor/google.golang.org/grpc/coverage.sh +++ b/vendor/google.golang.org/grpc/coverage.sh @@ -37,6 +37,8 @@ show_cover_report func case "$1" in "") ;; +--html) + show_cover_report html ;; --coveralls) push_to_coveralls ;; *) diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index d94fa2fb..50280102 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -40,11 +40,11 @@ package credentials import ( "crypto/tls" "crypto/x509" + "errors" "fmt" "io/ioutil" "net" "strings" - "time" "golang.org/x/net/context" ) @@ -54,9 +54,9 @@ var ( alpnProtoStr = []string{"h2"} ) -// Credentials defines the common interface all supported credentials must -// implement. -type Credentials interface { +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { // GetRequestMetadata gets the current request metadata, refreshing // tokens if required. This should be called by the transport layer on // each request, and the data should be populated in headers or other @@ -66,7 +66,7 @@ type Credentials interface { // TODO(zhaoq): Define the set of the qualified keys instead of leaving // it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) - // RequireTransportSecurity indicates whether the credentails requires + // RequireTransportSecurity indicates whether the credentials requires // transport security. RequireTransportSecurity() bool } @@ -87,33 +87,26 @@ type AuthInfo interface { AuthType() string } -type authInfoKey struct{} +var ( + // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC + // and the caller should not close rawConn. + ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") +) -// NewContext creates a new context with authInfo attached. -func NewContext(ctx context.Context, authInfo AuthInfo) context.Context { - return context.WithValue(ctx, authInfoKey{}, authInfo) -} - -// FromContext returns the authInfo in ctx if it exists. -func FromContext(ctx context.Context) (authInfo AuthInfo, ok bool) { - authInfo, ok = ctx.Value(authInfoKey{}).(AuthInfo) - return -} - -// TransportAuthenticator defines the common interface for all the live gRPC wire +// TransportCredentials defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). -type TransportAuthenticator interface { +type TransportCredentials interface { // ClientHandshake does the authentication handshake specified by the corresponding // authentication protocol on rawConn for clients. It returns the authenticated // connection and the corresponding auth information about the connection. - ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error) + // Implementations must use the provided context to implement timely cancellation. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns // the authenticated connection and the corresponding auth information about // the connection. - ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) - // Info provides the ProtocolInfo of this TransportAuthenticator. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. Info() ProtocolInfo - Credentials } // TLSInfo contains the auth information for a TLS authenticated connection. @@ -122,6 +115,7 @@ type TLSInfo struct { State tls.ConnectionState } +// AuthType returns the type of TLSInfo as a string. func (t TLSInfo) AuthType() string { return "tls" } @@ -129,7 +123,7 @@ func (t TLSInfo) AuthType() string { // tlsCreds is the credentials required for authenticating a connection using TLS. type tlsCreds struct { // TLS configuration - config tls.Config + config *tls.Config } func (c tlsCreds) Info() ProtocolInfo { @@ -149,40 +143,28 @@ func (c *tlsCreds) RequireTransportSecurity() bool { return true } -type timeoutError struct{} - -func (timeoutError) Error() string { return "credentials: Dial timed out" } -func (timeoutError) Timeout() bool { return true } -func (timeoutError) Temporary() bool { return true } - -func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) { - // borrow some code from tls.DialWithDialer - var errChannel chan error - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- timeoutError{} - }) - } - if c.config.ServerName == "" { +func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := cloneTLSConfig(c.config) + if cfg.ServerName == "" { colonPos := strings.LastIndex(addr, ":") if colonPos == -1 { colonPos = len(addr) } - c.config.ServerName = addr[:colonPos] + cfg.ServerName = addr[:colonPos] } - conn := tls.Client(rawConn, &c.config) - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - err = <-errChannel - } - if err != nil { - rawConn.Close() - return nil, nil, err + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + }() + select { + case err := <-errChannel: + if err != nil { + return nil, nil, err + } + case <-ctx.Done(): + return nil, nil, ctx.Err() } // TODO(zhaoq): Omit the auth info for client now. It is more for // information than anything else. @@ -190,28 +172,27 @@ func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.D } func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { - conn := tls.Server(rawConn, &c.config) + conn := tls.Server(rawConn, c.config) if err := conn.Handshake(); err != nil { - rawConn.Close() return nil, nil, err } return conn, TLSInfo{conn.ConnectionState()}, nil } -// NewTLS uses c to construct a TransportAuthenticator based on TLS. -func NewTLS(c *tls.Config) TransportAuthenticator { - tc := &tlsCreds{*c} +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{cloneTLSConfig(c)} tc.config.NextProtos = alpnProtoStr return tc } // NewClientTLSFromCert constructs a TLS from the input certificate for client. -func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator { +func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}) } // NewClientTLSFromFile constructs a TLS from the input certificate file for client. -func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) { +func NewClientTLSFromFile(certFile, serverName string) (TransportCredentials, error) { b, err := ioutil.ReadFile(certFile) if err != nil { return nil, err @@ -224,13 +205,13 @@ func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, } // NewServerTLSFromCert constructs a TLS from the input certificate for server. -func NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator { +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) } // NewServerTLSFromFile constructs a TLS from the input certificate file and key // file for server. -func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) { +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { return nil, err diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go new file mode 100644 index 00000000..9647b9ec --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go @@ -0,0 +1,76 @@ +// +build go1.7 + +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO replace this function with official clone function. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, + Renegotiation: cfg.Renegotiation, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go new file mode 100644 index 00000000..09b8d12c --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go @@ -0,0 +1,74 @@ +// +build !go1.7 + +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO replace this function with official clone function. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index c0f54f75..b4c0e740 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -1,6 +1,6 @@ /* Package grpc implements an RPC system called gRPC. -See https://github.com/grpc/grpc for more information about gRPC. +See www.grpc.io for more information about gRPC. */ package grpc diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index cc6e27c0..2cc09be4 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -42,6 +42,8 @@ import ( ) // Use golang's standard logger by default. +// Access is not mutex-protected: do not modify except in init() +// functions. var logger Logger = log.New(os.Stderr, "", log.LstdFlags) // Logger mimics golang's standard Logger as an interface. @@ -54,7 +56,8 @@ type Logger interface { Println(args ...interface{}) } -// SetLogger sets the logger that is used in grpc. +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. func SetLogger(l Logger) { logger = l } diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 00000000..588f59e5 --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" +) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 00000000..5489143a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,49 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package internal contains gRPC-internal code for testing, to avoid polluting +// the godoc of the top-level grpc package. +package internal + +// TestingCloseConns closes all existing transports but keeps +// grpcServer.lis accepting new connections. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingCloseConns func(grpcServer interface{}) + +// TestingUseHandlerImpl enables the http.Handler-based server implementation. +// It must be called before Serve and requires TLS credentials. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingUseHandlerImpl func(grpcServer interface{}) diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 58469ddd..b3200156 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -60,15 +60,21 @@ func encodeKeyValue(k, v string) (string, string) { // DecodeKeyValue returns the original key and value corresponding to the // encoded data in k, v. +// If k is a binary header and v contains comma, v is split on comma before decoded, +// and the decoded v will be joined with comma before returned. func DecodeKeyValue(k, v string) (string, string, error) { if !strings.HasSuffix(k, binHdrSuffix) { return k, v, nil } - val, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return "", "", err + vvs := strings.Split(v, ",") + for i, vv := range vvs { + val, err := base64.StdEncoding.DecodeString(vv) + if err != nil { + return "", "", err + } + vvs[i] = string(val) } - return k, string(val), nil + return k, strings.Join(vvs, ","), nil } // MD is a mapping from metadata keys to values. Users should use the following diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go index 06605607..c2e0871e 100644 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -66,7 +66,8 @@ type Resolver interface { // Watcher watches for the updates on the specified target. type Watcher interface { // Next blocks until an update or error happens. It may return one or more - // updates. The first call should get the full set of the results. + // updates. The first call should get the full set of the results. It should + // return an error if and only if Watcher cannot recover. Next() ([]*Update, error) // Close closes the Watcher. Close() diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 00000000..bfa6205b --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker.go b/vendor/google.golang.org/grpc/picker.go deleted file mode 100644 index 50f315b4..00000000 --- a/vendor/google.golang.org/grpc/picker.go +++ /dev/null @@ -1,243 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "container/list" - "fmt" - "sync" - - "golang.org/x/net/context" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/naming" - "google.golang.org/grpc/transport" -) - -// Picker picks a Conn for RPC requests. -// This is EXPERIMENTAL and please do not implement your own Picker for now. -type Picker interface { - // Init does initial processing for the Picker, e.g., initiate some connections. - Init(cc *ClientConn) error - // Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC - // or some error happens. - Pick(ctx context.Context) (transport.ClientTransport, error) - // PickAddr picks a peer address for connecting. This will be called repeated for - // connecting/reconnecting. - PickAddr() (string, error) - // State returns the connectivity state of the underlying connections. - State() (ConnectivityState, error) - // WaitForStateChange blocks until the state changes to something other than - // the sourceState. It returns the new state or error. - WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) - // Close closes all the Conn's owned by this Picker. - Close() error -} - -// unicastPicker is the default Picker which is used when there is no custom Picker -// specified by users. It always picks the same Conn. -type unicastPicker struct { - target string - conn *Conn -} - -func (p *unicastPicker) Init(cc *ClientConn) error { - c, err := NewConn(cc) - if err != nil { - return err - } - p.conn = c - return nil -} - -func (p *unicastPicker) Pick(ctx context.Context) (transport.ClientTransport, error) { - return p.conn.Wait(ctx) -} - -func (p *unicastPicker) PickAddr() (string, error) { - return p.target, nil -} - -func (p *unicastPicker) State() (ConnectivityState, error) { - return p.conn.State(), nil -} - -func (p *unicastPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - return p.conn.WaitForStateChange(ctx, sourceState) -} - -func (p *unicastPicker) Close() error { - if p.conn != nil { - return p.conn.Close() - } - return nil -} - -// unicastNamingPicker picks an address from a name resolver to set up the connection. -type unicastNamingPicker struct { - cc *ClientConn - resolver naming.Resolver - watcher naming.Watcher - mu sync.Mutex - // The list of the addresses are obtained from watcher. - addrs *list.List - // It tracks the current picked addr by PickAddr(). The next PickAddr may - // push it forward on addrs. - pickedAddr *list.Element - conn *Conn -} - -// NewUnicastNamingPicker creates a Picker to pick addresses from a name resolver -// to connect. -func NewUnicastNamingPicker(r naming.Resolver) Picker { - return &unicastNamingPicker{ - resolver: r, - addrs: list.New(), - } -} - -type addrInfo struct { - addr string - // Set to true if this addrInfo needs to be deleted in the next PickAddrr() call. - deleting bool -} - -// processUpdates calls Watcher.Next() once and processes the obtained updates. -func (p *unicastNamingPicker) processUpdates() error { - updates, err := p.watcher.Next() - if err != nil { - return err - } - for _, update := range updates { - switch update.Op { - case naming.Add: - p.mu.Lock() - p.addrs.PushBack(&addrInfo{ - addr: update.Addr, - }) - p.mu.Unlock() - // Initial connection setup - if p.conn == nil { - conn, err := NewConn(p.cc) - if err != nil { - return err - } - p.conn = conn - } - case naming.Delete: - p.mu.Lock() - for e := p.addrs.Front(); e != nil; e = e.Next() { - if update.Addr == e.Value.(*addrInfo).addr { - if e == p.pickedAddr { - // Do not remove the element now if it is the current picked - // one. We leave the deletion to the next PickAddr() call. - e.Value.(*addrInfo).deleting = true - // Notify Conn to close it. All the live RPCs on this connection - // will be aborted. - p.conn.NotifyReset() - } else { - p.addrs.Remove(e) - } - } - } - p.mu.Unlock() - default: - grpclog.Println("Unknown update.Op ", update.Op) - } - } - return nil -} - -// monitor runs in a standalone goroutine to keep watching name resolution updates until the watcher -// is closed. -func (p *unicastNamingPicker) monitor() { - for { - if err := p.processUpdates(); err != nil { - return - } - } -} - -func (p *unicastNamingPicker) Init(cc *ClientConn) error { - w, err := p.resolver.Resolve(cc.target) - if err != nil { - return err - } - p.watcher = w - p.cc = cc - // Get the initial name resolution. - if err := p.processUpdates(); err != nil { - return err - } - go p.monitor() - return nil -} - -func (p *unicastNamingPicker) Pick(ctx context.Context) (transport.ClientTransport, error) { - return p.conn.Wait(ctx) -} - -func (p *unicastNamingPicker) PickAddr() (string, error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.pickedAddr == nil { - p.pickedAddr = p.addrs.Front() - } else { - pa := p.pickedAddr - p.pickedAddr = pa.Next() - if pa.Value.(*addrInfo).deleting { - p.addrs.Remove(pa) - } - if p.pickedAddr == nil { - p.pickedAddr = p.addrs.Front() - } - } - if p.pickedAddr == nil { - return "", fmt.Errorf("there is no address available to pick") - } - return p.pickedAddr.Value.(*addrInfo).addr, nil -} - -func (p *unicastNamingPicker) State() (ConnectivityState, error) { - return 0, fmt.Errorf("State() is not supported for unicastNamingPicker") -} - -func (p *unicastNamingPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - return 0, fmt.Errorf("WaitForStateChange is not supported for unicastNamingPciker") -} - -func (p *unicastNamingPicker) Close() error { - p.watcher.Close() - p.conn.Close() - return nil -} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 46a6801b..35ac9cc7 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -34,13 +34,14 @@ package grpc import ( + "bytes" + "compress/gzip" "encoding/binary" "fmt" "io" + "io/ioutil" "math" - "math/rand" "os" - "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" @@ -60,7 +61,7 @@ type Codec interface { String() string } -// protoCodec is a Codec implemetation with protobuf. It is the default codec for gRPC. +// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC. type protoCodec struct{} func (protoCodec) Marshal(v interface{}) ([]byte, error) { @@ -75,6 +76,73 @@ func (protoCodec) String() string { return "proto" } +// Compressor defines the interface gRPC uses to compress a message. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +func NewGZIPCompressor() Compressor { + return &gzipCompressor{} +} + +type gzipCompressor struct { +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := gzip.NewWriter(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + z, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + defer z.Close() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + traceInfo traceInfo // in trace.go +} + +var defaultCallInfo = callInfo{failFast: true} + // CallOption configures a Call before it starts or extracts information from // a Call after it completes. type CallOption interface { @@ -113,41 +181,70 @@ func Trailer(md *metadata.MD) CallOption { }) } +// FailFast configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If failfast is true, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will retry +// the call if it fails due to a transient error. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md +func FailFast(failFast bool) CallOption { + return beforeCall(func(c *callInfo) error { + c.failFast = failFast + return nil + }) +} + // The format of the payload: compressed or not? type payloadFormat uint8 const ( compressionNone payloadFormat = iota // no compression - compressionFlate - // More formats + compressionMade ) -// parser reads complelete gRPC messages from the underlying reader. +// parser reads complete gRPC messages from the underlying reader. type parser struct { - s io.Reader -} + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader -// recvMsg is to read a complete gRPC message from the stream. It is blocking if -// the message has not been complete yet. It returns the message and its type, -// EOF is returned with nil msg and 0 pf if the entire stream is done. Other -// non-nil error is returned if something is wrong on reading. -func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { // The header of a gRPC message. Find more detail // at http://www.grpc.io/docs/guides/wire.html. - var buf [5]byte + header [5]byte +} - if _, err := io.ReadFull(p.s, buf[:]); err != nil { +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := io.ReadFull(p.r, p.header[:]); err != nil { return 0, nil, err } - pf = payloadFormat(buf[0]) - length := binary.BigEndian.Uint32(buf[1:]) + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { return pf, nil, nil } + if length > uint32(maxMsgSize) { + return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: msg = make([]byte, int(length)) - if _, err := io.ReadFull(p.s, msg); err != nil { + if _, err := io.ReadFull(p.r, msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } @@ -158,7 +255,7 @@ func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { // encode serializes msg and prepends the message header. If msg is nil, it // generates the message header of 0 message length. -func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { +func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) { var b []byte var length uint if msg != nil { @@ -168,6 +265,12 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { if err != nil { return nil, err } + if cp != nil { + if err := cp.Do(cbuf, b); err != nil { + return nil, err + } + b = cbuf.Bytes() + } length = uint(len(b)) } if length > math.MaxUint32 { @@ -182,7 +285,11 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { var buf = make([]byte, payloadLen+sizeLen+len(b)) // Write payload format - buf[0] = byte(pf) + if cp == nil { + buf[0] = byte(compressionNone) + } else { + buf[0] = byte(compressionMade) + } // Write length of b into buf binary.BigEndian.PutUint32(buf[1:], uint32(length)) // Copy encoded msg to buf @@ -191,22 +298,40 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { return buf, nil } -func recv(p *parser, c Codec, m interface{}) error { - pf, d, err := p.recvMsg() +func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { + switch pf { + case compressionNone: + case compressionMade: + if dc == nil || recvCompress != dc.Type() { + return transport.StreamErrorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return transport.StreamErrorf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int) error { + pf, d, err := p.recvMsg(maxMsgSize) if err != nil { return err } - switch pf { - case compressionNone: - if err := c.Unmarshal(d, m); err != nil { - if rErr, ok := err.(rpcError); ok { - return rErr - } else { - return Errorf(codes.Internal, "grpc: %v", err) - } + if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { + return err + } + if pf == compressionMade { + d, err = dc.Do(bytes.NewReader(d)) + if err != nil { + return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } - default: - return Errorf(codes.Internal, "gprc: compression is not supported yet.") + } + if len(d) > maxMsgSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize) + } + if err := c.Unmarshal(d, m); err != nil { + return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) } return nil } @@ -217,8 +342,8 @@ type rpcError struct { desc string } -func (e rpcError) Error() string { - return fmt.Sprintf("rpc error: code = %d desc = %q", e.code, e.desc) +func (e *rpcError) Error() string { + return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc) } // Code returns the error code for err if it was produced by the rpc system. @@ -227,7 +352,7 @@ func Code(err error) codes.Code { if err == nil { return codes.OK } - if e, ok := err.(rpcError); ok { + if e, ok := err.(*rpcError); ok { return e.code } return codes.Unknown @@ -239,7 +364,7 @@ func ErrorDesc(err error) string { if err == nil { return "" } - if e, ok := err.(rpcError); ok { + if e, ok := err.(*rpcError); ok { return e.desc } return err.Error() @@ -251,7 +376,7 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { if c == codes.OK { return nil } - return rpcError{ + return &rpcError{ code: c, desc: fmt.Sprintf(format, a...), } @@ -260,18 +385,37 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { // toRPCErr converts an error into a rpcError. func toRPCErr(err error) error { switch e := err.(type) { - case rpcError: + case *rpcError: return err case transport.StreamError: - return rpcError{ + return &rpcError{ code: e.Code, desc: e.Desc, } case transport.ConnectionError: - return rpcError{ + return &rpcError{ code: codes.Internal, desc: e.Desc, } + default: + switch err { + case context.DeadlineExceeded: + return &rpcError{ + code: codes.DeadlineExceeded, + desc: err.Error(), + } + case context.Canceled: + return &rpcError{ + code: codes.Canceled, + desc: err.Error(), + } + case ErrClientConnClosing: + return &rpcError{ + code: codes.FailedPrecondition, + desc: err.Error(), + } + } + } return Errorf(codes.Unknown, "%v", err) } @@ -304,34 +448,10 @@ func convertCode(err error) codes.Code { return codes.Unknown } -const ( - // how long to wait after the first failure before retrying - baseDelay = 1.0 * time.Second - // upper bound of backoff delay - maxDelay = 120 * time.Second - // backoff increases by this factor on each retry - backoffFactor = 1.6 - // backoff is randomized downwards by this factor - backoffJitter = 0.2 -) - -func backoff(retries int) (t time.Duration) { - if retries == 0 { - return baseDelay - } - backoff, max := float64(baseDelay), float64(maxDelay) - for backoff < max && retries > 0 { - backoff *= backoffFactor - retries-- - } - if backoff > max { - backoff = max - } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + backoffJitter*(rand.Float64()*2-1) - if backoff < 0 { - return 0 - } - return time.Duration(backoff) -} +// SupportPackageIsVersion3 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the grpc package. +// +// This constant may be renamed in the future if a change in the generated code +// requires a synchronised update of grpc-go and protoc-gen-go. This constant +// should not be referenced from any other code. +const SupportPackageIsVersion3 = true diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 655e7d86..b2a825ad 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -34,10 +34,12 @@ package grpc import ( + "bytes" "errors" "fmt" "io" "net" + "net/http" "reflect" "runtime" "strings" @@ -45,15 +47,17 @@ import ( "time" "golang.org/x/net/context" + "golang.org/x/net/http2" "golang.org/x/net/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -69,6 +73,7 @@ type ServiceDesc struct { HandlerType interface{} Methods []MethodDesc Streams []StreamDesc + Metadata interface{} } // service consists of the information of the server serving this service and @@ -77,24 +82,38 @@ type service struct { server interface{} // the server for service methods md map[string]*MethodDesc sd map[string]*StreamDesc + mdata interface{} } // Server is a gRPC server to serve RPC requests. type Server struct { - opts options - mu sync.Mutex - lis map[net.Listener]bool - conns map[transport.ServerTransport]bool + opts options + + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[io.Closer]bool + drain bool + // A CondVar to let GracefulStop() blocks until all the pending RPCs are finished + // and all the transport goes away. + cv *sync.Cond m map[string]*service // service name -> service info events trace.EventLog } type options struct { - creds credentials.Credentials + creds credentials.TransportCredentials codec Codec + cp Compressor + dc Decompressor + maxMsgSize int + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor maxConcurrentStreams uint32 + useHandlerImpl bool // use http.Handler-based server } +var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit + // A ServerOption sets options. type ServerOption func(*options) @@ -105,6 +124,28 @@ func CustomCodec(codec Codec) ServerOption { } } +// RPCCompressor returns a ServerOption that sets a compressor for outbound messages. +func RPCCompressor(cp Compressor) ServerOption { + return func(o *options) { + o.cp = cp + } +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages. +func RPCDecompressor(dc Decompressor) ServerOption { + return func(o *options) { + o.dc = dc + } +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages. +// If this is not set, gRPC uses the default 4MB. +func MaxMsgSize(m int) ServerOption { + return func(o *options) { + o.maxMsgSize = m + } +} + // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { @@ -114,16 +155,40 @@ func MaxConcurrentStreams(n uint32) ServerOption { } // Creds returns a ServerOption that sets credentials for server connections. -func Creds(c credentials.Credentials) ServerOption { +func Creds(c credentials.TransportCredentials) ServerOption { return func(o *options) { o.creds = c } } +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return func(o *options) { + if o.unaryInt != nil { + panic("The unary server interceptor has been set.") + } + o.unaryInt = i + } +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return func(o *options) { + if o.streamInt != nil { + panic("The stream server interceptor has been set.") + } + o.streamInt = i + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { var opts options + opts.maxMsgSize = defaultMaxMsgSize for _, o := range opt { o(&opts) } @@ -134,9 +199,10 @@ func NewServer(opt ...ServerOption) *Server { s := &Server{ lis: make(map[net.Listener]bool), opts: opts, - conns: make(map[transport.ServerTransport]bool), + conns: make(map[io.Closer]bool), m: make(map[string]*service), } + s.cv = sync.NewCond(&s.mu) if EnableTracing { _, file, line, _ := runtime.Caller(1) s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) @@ -183,6 +249,7 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { server: ss, md: make(map[string]*MethodDesc), sd: make(map[string]*StreamDesc), + mdata: sd.Metadata, } for i := range sd.Methods { d := &sd.Methods[i] @@ -195,100 +262,254 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { s.m[sd.ServiceName] = srv } +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.m { + methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) + for m := range srv.md { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.sd { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + var ( // ErrServerStopped indicates that the operation is now illegal because of // the server being stopped. ErrServerStopped = errors.New("grpc: the server has been stopped") ) +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if s.opts.creds == nil { + return rawConn, nil, nil + } + return s.opts.creds.ServerHandshake(rawConn) +} + // Serve accepts incoming connections on the listener lis, creating a new // ServerTransport and service goroutine for each. The service goroutines -// read gRPC request and then call the registered handlers to reply to them. -// Service returns when lis.Accept fails. +// read gRPC requests and then call the registered handlers to reply to them. +// Service returns when lis.Accept fails. lis will be closed when +// this method returns. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") if s.lis == nil { s.mu.Unlock() + lis.Close() return ErrServerStopped } s.lis[lis] = true s.mu.Unlock() defer func() { - lis.Close() s.mu.Lock() - delete(s.lis, lis) + if s.lis != nil && s.lis[lis] { + lis.Close() + delete(s.lis, lis) + } s.mu.Unlock() }() for { - c, err := lis.Accept() + rawConn, err := lis.Accept() if err != nil { s.mu.Lock() s.printf("done serving; Accept = %v", err) s.mu.Unlock() return err } - var authInfo credentials.AuthInfo - if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok { - var conn net.Conn - conn, authInfo, err = creds.ServerHandshake(c) - if err != nil { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", c.RemoteAddr(), err) - s.mu.Unlock() - grpclog.Println("grpc: Server.Serve failed to complete security handshake.") - continue - } - c = conn - } - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - c.Close() - return nil - } - st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) - if err != nil { - s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) - s.mu.Unlock() - c.Close() - grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) - continue - } - s.conns[st] = true - s.mu.Unlock() - - go func() { - var wg sync.WaitGroup - st.HandleStreams(func(stream *transport.Stream) { - var trInfo *traceInfo - if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), - } - trInfo.firstLine.client = false - trInfo.firstLine.remoteAddr = st.RemoteAddr() - stream.TraceContext(trInfo.tr) - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = dl.Sub(time.Now()) - } - } - wg.Add(1) - go func() { - s.handleStream(st, stream, trInfo) - wg.Done() - }() - }) - wg.Wait() - s.mu.Lock() - delete(s.conns, st) - s.mu.Unlock() - }() + // Start a new goroutine to deal with rawConn + // so we don't stall this Accept loop goroutine. + go s.handleRawConn(rawConn) } } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, pf payloadFormat, opts *transport.Options) error { - p, err := encode(s.opts.codec, msg, pf) +// handleRawConn is run in its own goroutine and handles a just-accepted +// connection that has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + // If serverHandShake returns ErrConnDispatched, keep rawConn open. + if err != credentials.ErrConnDispatched { + rawConn.Close() + } + return + } + + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + conn.Close() + return + } + s.mu.Unlock() + + if s.opts.useHandlerImpl { + s.serveUsingHandler(conn) + } else { + s.serveNewHTTP2Transport(conn, authInfo) + } +} + +// serveNewHTTP2Transport sets up a new http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go) and +// serves streams on it. +// This is run in its own goroutine (it does network I/O in +// transport.NewServerTransport). +func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { + st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) + return + } + if !s.addConn(st) { + st.Close() + return + } + s.serveStreams(st) +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer s.removeConn(st) + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// serveUsingHandler is called from handleRawConn when s is configured +// to handle requests via the http.Handler interface. It sets up a +// net/http.Server to handle the just-accepted conn. The http.Server +// is configured to route all incoming requests (all HTTP/2 streams) +// to ServeHTTP, which creates a new ServerTransport for each stream. +// serveUsingHandler blocks until conn closes. +// +// This codepath is only used when Server.TestingUseHandlerImpl has +// been configured. This lets the end2end tests exercise the ServeHTTP +// method as one of the environment types. +// +// conn is the *tls.Conn that's already been authenticated. +func (s *Server) serveUsingHandler(conn net.Conn) { + if !s.addConn(conn) { + conn.Close() + return + } + defer s.removeConn(conn) + h2s := &http2.Server{ + MaxConcurrentStreams: s.opts.maxConcurrentStreams, + } + h2s.ServeConn(conn, &http2.ServeConnOpts{ + Handler: s, + }) +} + +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + st.Close() + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + trInfo = &traceInfo{ + tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), + } + trInfo.firstLine.client = false + trInfo.firstLine.remoteAddr = st.RemoteAddr() + stream.TraceContext(trInfo.tr) + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = dl.Sub(time.Now()) + } + return trInfo +} + +func (s *Server) addConn(c io.Closer) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil || s.drain { + return false + } + s.conns[c] = true + return true +} + +func (s *Server) removeConn(c io.Closer) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, c) + s.cv.Signal() + } +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { + var cbuf *bytes.Buffer + if cp != nil { + cbuf = new(bytes.Buffer) + } + p, err := encode(s.opts.codec, msg, cp, cbuf) if err != nil { // This typically indicates a fatal issue (e.g., memory // corruption or hardware faults) the application program @@ -314,96 +535,141 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - p := &parser{s: stream} + if s.opts.cp != nil { + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + stream.SetSendCompress(s.opts.cp.Type()) + } + p := &parser{r: stream} for { - pf, req, err := p.recvMsg() + pf, req, err := p.recvMsg(s.opts.maxMsgSize) if err == io.EOF { // The entire stream is done (for unary RPC only). return err } + if err == io.ErrUnexpectedEOF { + err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"} + } if err != nil { switch err := err.(type) { + case *rpcError: + if err := t.WriteStatus(stream, err.code, err.desc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } case transport.ConnectionError: // Nothing to do here. case transport.StreamError: if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) } default: panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) } return err } - switch pf { - case compressionNone: - statusCode := codes.OK - statusDesc := "" - df := func(v interface{}) error { - if err := s.opts.codec.Unmarshal(req, v); err != nil { - return err + + if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { + switch err := err.(type) { + case transport.StreamError: + if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) - } - return nil - } - reply, appErr := md.Handler(srv.server, stream.Context(), df) - if appErr != nil { - if err, ok := appErr.(rpcError); ok { - statusCode = err.code - statusDesc = err.desc - } else { - statusCode = convertCode(appErr) - statusDesc = appErr.Error() - } - if trInfo != nil && statusCode != codes.OK { - trInfo.tr.LazyLog(stringer(statusDesc), true) - trInfo.tr.SetError() + default: + if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) } - if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + } + return err + } + statusCode := codes.OK + statusDesc := "" + df := func(v interface{}) error { + if pf == compressionMade { + var err error + req, err = s.opts.dc.Do(bytes.NewReader(req)) + if err != nil { + if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } return err } - return nil } - if trInfo != nil { - trInfo.tr.LazyLog(stringer("OK"), false) + if len(req) > s.opts.maxMsgSize { + // TODO: Revisit the error code. Currently keep it consistent with + // java implementation. + statusCode = codes.Internal + statusDesc = fmt.Sprintf("grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize) } - opts := &transport.Options{ - Last: true, - Delay: false, - } - if err := s.sendResponse(t, stream, reply, compressionNone, opts); err != nil { - switch err := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - statusCode = err.Code - statusDesc = err.Desc - default: - statusCode = codes.Unknown - statusDesc = err.Error() - } + if err := s.opts.codec.Unmarshal(req, v); err != nil { return err } if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) } - return t.WriteStatus(stream, statusCode, statusDesc) - default: - panic(fmt.Sprintf("payload format to be supported: %d", pf)) + return nil } + reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) + if appErr != nil { + if err, ok := appErr.(*rpcError); ok { + statusCode = err.code + statusDesc = err.desc + } else { + statusCode = convertCode(appErr) + statusDesc = appErr.Error() + } + if trInfo != nil && statusCode != codes.OK { + trInfo.tr.LazyLog(stringer(statusDesc), true) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + return err + } + return nil + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { + switch err := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + statusCode = err.Code + statusDesc = err.Desc + default: + statusCode = codes.Unknown + statusDesc = err.Error() + } + return err + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + return t.WriteStatus(stream, statusCode, statusDesc) } } func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if s.opts.cp != nil { + stream.SetSendCompress(s.opts.cp.Type()) + } ss := &serverStream{ - t: t, - s: stream, - p: &parser{s: stream}, - codec: s.opts.codec, - trInfo: trInfo, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, + maxMsgSize: s.opts.maxMsgSize, + trInfo: trInfo, + } + if ss.cp != nil { + ss.cbuf = new(bytes.Buffer) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -418,10 +684,24 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() }() } - if appErr := sd.Handler(srv.server, ss); appErr != nil { - if err, ok := appErr.(rpcError); ok { + var appErr error + if s.opts.streamInt == nil { + appErr = sd.Handler(srv.server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler) + } + if appErr != nil { + if err, ok := appErr.(*rpcError); ok { ss.statusCode = err.code ss.statusDesc = err.desc + } else if err, ok := appErr.(transport.StreamError); ok { + ss.statusCode = err.Code + ss.statusDesc = err.Desc } else { ss.statusCode = convertCode(appErr) ss.statusDesc = appErr.Error() @@ -509,21 +789,28 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } } -// Stop stops the gRPC server. Once Stop returns, the server stops accepting -// connection requests and closes all the connected connections. +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. func (s *Server) Stop() { s.mu.Lock() listeners := s.lis s.lis = nil - cs := s.conns + st := s.conns s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Signal() s.mu.Unlock() + for lis := range listeners { lis.Close() } - for c := range cs { + for c := range st { c.Close() } + s.mu.Lock() if s.events != nil { s.events.Finish() @@ -532,14 +819,49 @@ func (s *Server) Stop() { s.mu.Unlock() } -// TestingCloseConns closes all exiting transports but keeps s.lis accepting new -// connections. This is for test only now. -func (s *Server) TestingCloseConns() { +// GracefulStop stops the gRPC server gracefully. It stops the server to accept new +// connections and RPCs and blocks until all the pending RPCs are finished. +func (s *Server) GracefulStop() { + s.mu.Lock() + defer s.mu.Unlock() + if s.drain == true || s.conns == nil { + return + } + s.drain = true + for lis := range s.lis { + lis.Close() + } + s.lis = nil + for c := range s.conns { + c.(transport.ServerTransport).Drain() + } + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } +} + +func init() { + internal.TestingCloseConns = func(arg interface{}) { + arg.(*Server).testingCloseConns() + } + internal.TestingUseHandlerImpl = func(arg interface{}) { + arg.(*Server).opts.useHandlerImpl = true + } +} + +// testingCloseConns closes all existing transports but keeps s.lis +// accepting new connections. +func (s *Server) testingCloseConns() { s.mu.Lock() for c := range s.conns { c.Close() + delete(s.conns, c) } - s.conns = make(map[transport.ServerTransport]bool) s.mu.Unlock() } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 0ee572c2..51df3f01 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -34,8 +34,10 @@ package grpc import ( + "bytes" "errors" "io" + "math" "sync" "time" @@ -46,12 +48,14 @@ import ( "google.golang.org/grpc/transport" ) -type streamHandler func(srv interface{}, stream ServerStream) error +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. type StreamDesc struct { StreamName string - Handler streamHandler + Handler StreamHandler // At least one of these is true. ServerStreams bool @@ -66,26 +70,24 @@ type Stream interface { // breaks. // On error, it aborts the stream and returns an RPC status on client // side. On server side, it simply returns the error to the caller. - // SendMsg is called by generated code. + // SendMsg is called by generated code. Also Users can call SendMsg + // directly when it is really needed in their use cases. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message or the stream is // done. On client side, it returns io.EOF when the stream is done. On - // any other error, it aborts the streama nd returns an RPC status. On + // any other error, it aborts the stream and returns an RPC status. On // server side, it simply returns the error to the caller. RecvMsg(m interface{}) error } -// ClientStream defines the interface a client stream has to satify. +// ClientStream defines the interface a client stream has to satisfy. type ClientStream interface { - // Header returns the header metedata received from the server if there + // Header returns the header metadata received from the server if there // is any. It blocks if the metadata is not ready to read. Header() (metadata.MD, error) - // Trailer returns the trailer metadata from the server. It must be called - // after stream.Recv() returns non-nil error (including io.EOF) for - // bi-directional streaming and server streaming or stream.CloseAndRecv() - // returns for client streaming in order to receive trailer metadata if - // present. Otherwise, it could returns an empty MD even though trailer - // is present. + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). Trailer() metadata.MD // CloseSend closes the send direction of the stream. It closes the stream // when non-nil error is met. @@ -95,61 +97,144 @@ type ClientStream interface { // NewClientStream creates a new Stream for the client side. This is called // by generated code. -func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { var ( t transport.ClientTransport - err error + s *transport.Stream + put func() ) - t, err = cc.dopts.picker.Pick(ctx) - if err != nil { - return nil, toRPCErr(err) + c := defaultCallInfo + for _, o := range opts { + if err := o.before(&c); err != nil { + return nil, toRPCErr(err) + } } - // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, + Flush: desc.ServerStreams && desc.ClientStreams, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + } + var trInfo traceInfo + if EnableTracing { + trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + trInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + defer func() { + if err != nil { + // Need to call tr.finish() if error is returned. + // Because tr will not be returned to caller. + trInfo.tr.LazyPrintf("RPC: [%v]", err) + trInfo.tr.SetError() + trInfo.tr.Finish() + } + }() + } + gopts := BalancerGetOptions{ + BlockingWait: !c.failFast, + } + for { + t, put, err = cc.getTransport(ctx, gopts) + if err != nil { + // TODO(zhaoq): Probably revisit the error handling. + if _, ok := err.(*rpcError); ok { + return nil, err + } + if err == errConnClosing || err == errConnUnavailable { + if c.failFast { + return nil, Errorf(codes.Unavailable, "%v", err) + } + continue + } + // All the other errors are treated as Internal errors. + return nil, Errorf(codes.Internal, "%v", err) + } + + s, err = t.NewStream(ctx, callHdr) + if err != nil { + if put != nil { + put() + put = nil + } + if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { + if c.failFast { + return nil, toRPCErr(err) + } + continue + } + return nil, toRPCErr(err) + } + break } cs := &clientStream{ - desc: desc, - codec: cc.dopts.codec, + opts: opts, + c: c, + desc: desc, + codec: cc.dopts.codec, + cp: cc.dopts.cp, + dc: cc.dopts.dc, + + put: put, + t: t, + s: s, + p: &parser{r: s}, + tracing: EnableTracing, + trInfo: trInfo, } - if cs.tracing { - cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) - cs.trInfo.firstLine.client = true - if deadline, ok := ctx.Deadline(); ok { - cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) - } - cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) - ctx = trace.NewContext(ctx, cs.trInfo.tr) + if cc.dopts.cp != nil { + cs.cbuf = new(bytes.Buffer) } - s, err := t.NewStream(ctx, callHdr) - if err != nil { - return nil, toRPCErr(err) - } - cs.t = t - cs.s = s - cs.p = &parser{s: s} - // Listen on ctx.Done() to detect cancellation when there is no pending - // I/O operations on this stream. + // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination + // when there is no pending I/O operations on this stream. go func() { - <-s.Context().Done() - cs.closeTransportStream(transport.ContextErr(s.Context().Err())) + select { + case <-t.Error(): + // Incur transport error, simply exit. + case <-s.Done(): + // TODO: The trace of the RPC is terminated here when there is no pending + // I/O, which is probably not the optimal solution. + if s.StatusCode() == codes.OK { + cs.finish(nil) + } else { + cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc())) + } + cs.closeTransportStream(nil) + case <-s.GoAway(): + cs.finish(errConnDrain) + cs.closeTransportStream(errConnDrain) + case <-s.Context().Done(): + err := s.Context().Err() + cs.finish(err) + cs.closeTransportStream(transport.ContextErr(err)) + } }() return cs, nil } // clientStream implements a client side Stream. type clientStream struct { + opts []CallOption + c callInfo t transport.ClientTransport s *transport.Stream p *parser desc *StreamDesc codec Codec + cp Compressor + cbuf *bytes.Buffer + dc Decompressor tracing bool // set to EnableTracing when the clientStream is created. mu sync.Mutex + put func() closed bool // trInfo.tr is set when the clientStream is created (if EnableTracing is true), // and is set to nil when the clientStream's finish method is called. @@ -183,7 +268,20 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { cs.mu.Unlock() } defer func() { - if err == nil || err == io.EOF { + if err != nil { + cs.finish(err) + } + if err == nil { + return + } + if err == io.EOF { + // Specialize the process for server streaming. SendMesg is only called + // once when creating the stream object. io.EOF needs to be skipped when + // the rpc is early finished (before the stream object is created.). + // TODO: It is probably better to move this into the generated code. + if !cs.desc.ClientStreams && cs.desc.ServerStreams { + err = nil + } return } if _, ok := err.(transport.ConnectionError); !ok { @@ -191,7 +289,12 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { } err = toRPCErr(err) }() - out, err := encode(cs.codec, m, compressionNone) + out, err := encode(cs.codec, m, cs.cp, cs.cbuf) + defer func() { + if cs.cbuf != nil { + cs.cbuf.Reset() + } + }() if err != nil { return transport.StreamErrorf(codes.Internal, "grpc: %v", err) } @@ -199,7 +302,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { } func (cs *clientStream) RecvMsg(m interface{}) (err error) { - err = recv(cs.p, cs.codec, m) + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32) defer func() { // err != nil indicates the termination of the stream. if err != nil { @@ -218,16 +321,17 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { return } // Special handling for client streaming rpc. - err = recv(cs.p, cs.codec, m) + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32) cs.closeTransportStream(err) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } if err == io.EOF { if cs.s.StatusCode() == codes.OK { + cs.finish(err) return nil } - return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) } return toRPCErr(err) } @@ -239,15 +343,20 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { // Returns io.EOF to indicate the end of the stream. return } - return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) } return toRPCErr(err) } func (cs *clientStream) CloseSend() (err error) { err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) + defer func() { + if err != nil { + cs.finish(err) + } + }() if err == nil || err == io.EOF { - return + return nil } if _, ok := err.(transport.ConnectionError); !ok { cs.closeTransportStream(err) @@ -268,11 +377,18 @@ func (cs *clientStream) closeTransportStream(err error) { } func (cs *clientStream) finish(err error) { + cs.mu.Lock() + defer cs.mu.Unlock() + for _, o := range cs.opts { + o.after(&cs.c) + } + if cs.put != nil { + cs.put() + cs.put = nil + } if !cs.tracing { return } - cs.mu.Lock() - defer cs.mu.Unlock() if cs.trInfo.tr != nil { if err == nil || err == io.EOF { cs.trInfo.tr.LazyPrintf("RPC: [OK]") @@ -303,6 +419,10 @@ type serverStream struct { s *transport.Stream p *parser codec Codec + cp Compressor + dc Decompressor + cbuf *bytes.Buffer + maxMsgSize int statusCode codes.Code statusDesc string trInfo *traceInfo @@ -341,7 +461,12 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { ss.mu.Unlock() } }() - out, err := encode(ss.codec, m, compressionNone) + out, err := encode(ss.codec, m, ss.cp, ss.cbuf) + defer func() { + if ss.cbuf != nil { + ss.cbuf.Reset() + } + }() if err != nil { err = transport.StreamErrorf(codes.Internal, "grpc: %v", err) return err @@ -364,5 +489,5 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { ss.mu.Unlock() } }() - return recv(ss.p, ss.codec, m) + return recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize) } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index cde04fbf..f6747e1d 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -101,9 +101,8 @@ type payload struct { func (p payload) String() string { if p.sent { return fmt.Sprintf("sent: %v", p.msg) - } else { - return fmt.Sprintf("recv: %v", p.msg) } + return fmt.Sprintf("recv: %v", p.msg) } type fmtStringer struct { diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go index f6b38a5a..4ef0830b 100644 --- a/vendor/google.golang.org/grpc/transport/control.go +++ b/vendor/google.golang.org/grpc/transport/control.go @@ -56,43 +56,38 @@ type windowUpdate struct { increment uint32 } -func (windowUpdate) isItem() bool { - return true -} +func (*windowUpdate) item() {} type settings struct { ack bool ss []http2.Setting } -func (settings) isItem() bool { - return true -} +func (*settings) item() {} type resetStream struct { streamID uint32 code http2.ErrCode } -func (resetStream) isItem() bool { - return true +func (*resetStream) item() {} + +type goAway struct { } +func (*goAway) item() {} + type flushIO struct { } -func (flushIO) isItem() bool { - return true -} +func (*flushIO) item() {} type ping struct { ack bool data [8]byte } -func (ping) isItem() bool { - return true -} +func (*ping) item() {} // quotaPool is a pool which accumulates the quota and sends it to acquire() // when it is available. @@ -172,10 +167,6 @@ func (qb *quotaPool) acquire() <-chan int { type inFlow struct { // The inbound flow control limit for pending data. limit uint32 - // conn points to the shared connection-level inFlow that is shared - // by all streams on that conn. It is nil for the inFlow on the conn - // directly. - conn *inFlow mu sync.Mutex // pendingData is the overall data which have been received but not been @@ -186,75 +177,39 @@ type inFlow struct { pendingUpdate uint32 } -// onData is invoked when some data frame is received. It increments not only its -// own pendingData but also that of the associated connection-level flow. +// onData is invoked when some data frame is received. It updates pendingData. func (f *inFlow) onData(n uint32) error { - if n == 0 { - return nil - } f.mu.Lock() defer f.mu.Unlock() - if f.pendingData+f.pendingUpdate+n > f.limit { - return fmt.Errorf("recieved %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate+n, f.limit) - } - if f.conn != nil { - if err := f.conn.onData(n); err != nil { - return ConnectionErrorf("%v", err) - } - } f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit { + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) + } return nil } -// connOnRead updates the connection level states when the application consumes data. -func (f *inFlow) connOnRead(n uint32) uint32 { - if n == 0 || f.conn != nil { - return 0 - } +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { f.mu.Lock() defer f.mu.Unlock() + if f.pendingData == 0 { + return 0 + } f.pendingData -= n f.pendingUpdate += n if f.pendingUpdate >= f.limit/4 { - ret := f.pendingUpdate + wu := f.pendingUpdate f.pendingUpdate = 0 - return ret + return wu } return 0 } -// onRead is invoked when the application reads the data. It returns the window updates -// for both stream and connection level. -func (f *inFlow) onRead(n uint32) (swu, cwu uint32) { - if n == 0 { - return - } - f.mu.Lock() - defer f.mu.Unlock() - if f.pendingData == 0 { - // pendingData has been adjusted by restoreConn. - return - } - f.pendingData -= n - f.pendingUpdate += n - if f.pendingUpdate >= f.limit/4 { - swu = f.pendingUpdate - f.pendingUpdate = 0 - } - cwu = f.conn.connOnRead(n) - return -} - -// restoreConn is invoked when a stream is terminated. It removes its stake in -// the connection-level flow and resets its own state. -func (f *inFlow) restoreConn() uint32 { - if f.conn == nil { - return 0 - } +func (f *inFlow) resetPendingData() uint32 { f.mu.Lock() defer f.mu.Unlock() n := f.pendingData f.pendingData = 0 - f.pendingUpdate = 0 - return f.conn.connOnRead(n) + return n } diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go new file mode 100644 index 00000000..ee1c46ba --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/go16.go @@ -0,0 +1,46 @@ +// +build go1.6,!go1.7 + +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "net" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go new file mode 100644 index 00000000..356f13ff --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/go17.go @@ -0,0 +1,46 @@ +// +build go1.7 + +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "net" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go new file mode 100644 index 00000000..f23b2daf --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -0,0 +1,397 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + if !validContentType(r.Header.Get("Content-Type")) { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + if _, ok := w.(http.CloseNotifier); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + var metakv []string + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) { + continue + } + for _, v := range vv { + if k == "user-agent" { + // user-agent is special. Copying logic of http_util.go. + if i := strings.LastIndex(v, " "); i == -1 { + // There is no application user agent string being set + continue + } else { + v = v[:i] + } + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + didCommonHeaders bool + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { + err := ht.do(func() { + ht.writeCommonHeaders(s) + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode)) + if statusDesc != "" { + h.Set("Grpc-Message", encodeGrpcMessage(statusDesc)) + } + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to + // send undeclared Trailers after the + // headers have possibly been written. + h.Add(http2.TrailerPrefix+k, v) + } + } + } + }) + close(ht.writes) + return err +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + if ht.didCommonHeaders { + return + } + ht.didCommonHeaders = true + + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", "application/grpc") + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + ht.rw.Write(data) + if !opts.Delay { + ht.rw.(http.Flusher).Flush() + } + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + h := ht.rw.Header() + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, v) + } + } + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + var ctx context.Context + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(context.Background(), ht.timeout) + } else { + ctx, cancel = context.WithCancel(context.Background()) + } + + // requestOver is closed when either the request's context is done + // or the status has been written via WriteStatus. + requestOver := make(chan struct{}) + + // clientGone receives a single value if peer is gone, either + // because the underlying connection is dead or because the + // peer sends an http2 RST_STREAM. + clientGone := ht.rw.(http.CloseNotifier).CloseNotify() + go func() { + select { + case <-requestOver: + return + case <-ht.closedCh: + case <-clientGone: + } + cancel() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + windowHandler: func(int) {}, // nothing + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} + } + ctx = metadata.NewContext(ctx, ht.headerMD) + ctx = peer.NewContext(ctx, pr) + s.ctx = newContextWithStream(ctx, s) + s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(&recvMsg{data: buf[:n:n]}) + buf = buf[n:] + } + if err != nil { + s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn, ok := <-ht.writes: + if !ok { + return + } + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return StreamError{ + Code: code, + Desc: se.Error(), + } + } + } + return ConnectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go index 07b0c117..afbba454 100644 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -35,7 +35,7 @@ package transport import ( "bytes" - "errors" + "fmt" "io" "math" "net" @@ -50,6 +50,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" ) // http2Client implements the ClientTransport interface with HTTP2. @@ -71,6 +72,9 @@ type http2Client struct { shutdownChan chan struct{} // errorChan is closed to notify the I/O error to the caller. errorChan chan struct{} + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} framer *framer hBuf *bytes.Buffer // the buffer for HPACK encoding @@ -88,7 +92,7 @@ type http2Client struct { // The scheme used: https if TLS is on, http otherwise. scheme string - authCreds []credentials.Credentials + creds []credentials.PerRPCCredentials mu sync.Mutex // guard the following variables state transportState // the state of underlying connection @@ -97,69 +101,71 @@ type http2Client struct { maxStreams int // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 + // goAwayID records the Last-Stream-ID in the GoAway frame from the server. + goAwayID uint32 + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 +} + +func dial(fn func(context.Context, string) (net.Conn, error), ctx context.Context, addr string) (net.Conn, error) { + if fn != nil { + return fn(ctx, addr) + } + return dialContext(ctx, "tcp", addr) +} + +func isTemporary(err error) bool { + switch err { + case io.EOF: + // Connection closures may be resolved upon retry, and are thus + // treated as temporary. + return true + case context.DeadlineExceeded: + // In Go 1.7, context.DeadlineExceeded implements Timeout(), and this + // special case is not needed. Until then, we need to keep this + // clause. + return true + } + + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return false } // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err error) { - if opts.Dialer == nil { - // Set the default Dialer. - opts.Dialer = func(addr string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout("tcp", addr, timeout) - } - } +func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ ClientTransport, err error) { scheme := "http" - startT := time.Now() - timeout := opts.Timeout - conn, connErr := opts.Dialer(addr, timeout) - if connErr != nil { - return nil, ConnectionErrorf("transport: %v", connErr) + conn, err := dial(opts.Dialer, ctx, addr) + if err != nil { + return nil, ConnectionErrorf(true, err, "transport: %v", err) } - var authInfo credentials.AuthInfo - for _, c := range opts.AuthOptions { - if ccreds, ok := c.(credentials.TransportAuthenticator); ok { - scheme = "https" - // TODO(zhaoq): Now the first TransportAuthenticator is used if there are - // multiple ones provided. Revisit this if it is not appropriate. Probably - // place the ClientTransport construction into a separate function to make - // things clear. - if timeout > 0 { - timeout -= time.Since(startT) - } - conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout) - break - } - } - if connErr != nil { - return nil, ConnectionErrorf("transport: %v", connErr) - } - defer func() { + // Any further errors will close the underlying connection + defer func(conn net.Conn) { if err != nil { conn.Close() } - }() - // Send connection preface to server. - n, err := conn.Write(clientPreface) - if err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - if n != len(clientPreface) { - return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - } - framer := newFramer(conn) - if initialWindowSize != defaultWindowSize { - err = framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) - } else { - err = framer.writeSettings(true) - } - if err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, ConnectionErrorf("transport: %v", err) + }(conn) + var authInfo credentials.AuthInfo + if creds := opts.TransportCredentials; creds != nil { + scheme = "https" + conn, authInfo, err = creds.ClientHandshake(ctx, addr, conn) + if err != nil { + // Credentials handshake errors are typically considered permanent + // to avoid retrying on e.g. bad certificates. + temp := isTemporary(err) + return nil, ConnectionErrorf(temp, err, "transport: %v", err) } } ua := primaryUA @@ -177,7 +183,8 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e writableChan: make(chan int, 1), shutdownChan: make(chan struct{}), errorChan: make(chan struct{}), - framer: framer, + goAway: make(chan struct{}), + framer: newFramer(conn), hBuf: &buf, hEnc: hpack.NewEncoder(&buf), controlBuf: newRecvBuffer(), @@ -186,31 +193,58 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e scheme: scheme, state: reachable, activeStreams: make(map[uint32]*Stream), - authCreds: opts.AuthOptions, + creds: opts.PerRPCCredentials, maxStreams: math.MaxInt32, streamSendQuota: defaultWindowSize, } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, ConnectionErrorf(true, err, "transport: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, ConnectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + if initialWindowSize != defaultWindowSize { + err = t.framer.writeSettings(true, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(initialWindowSize), + }) + } else { + err = t.framer.writeSettings(true) + } + if err != nil { + t.Close() + return nil, ConnectionErrorf(true, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil { + t.Close() + return nil, ConnectionErrorf(true, err, "transport: %v", err) + } + } go t.controller() t.writableChan <- 0 - // Start the reader goroutine for incoming message. The threading model - // on receiving is that each transport has a dedicated goroutine which - // reads HTTP2 frame from network. Then it dispatches the frame to the - // corresponding stream entity. - go t.reader() return t, nil } func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { - fc := &inFlow{ - limit: initialWindowSize, - conn: t.fc, - } // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ id: t.nextID, + done: make(chan struct{}), + goAway: make(chan struct{}), method: callHdr.Method, + sendCompress: callHdr.SendCompress, buf: newRecvBuffer(), - fc: fc, + fc: &inFlow{limit: initialWindowSize}, sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), headerChan: make(chan struct{}), } @@ -221,8 +255,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { // Make a stream be able to cancel the pending operations by itself. s.ctx, s.cancel = context.WithCancel(ctx) s.dec = &recvBufferReader{ - ctx: s.ctx, - recv: s.buf, + ctx: s.ctx, + goAway: s.goAway, + recv: s.buf, } return s } @@ -234,16 +269,22 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea var timeout time.Duration if dl, ok := ctx.Deadline(); ok { timeout = dl.Sub(time.Now()) - if timeout <= 0 { - return nil, ContextErr(context.DeadlineExceeded) - } + } + select { + case <-ctx.Done(): + return nil, ContextErr(ctx.Err()) + default: + } + pr := &peer.Peer{ + Addr: t.conn.RemoteAddr(), } // Attach Auth info if there is any. if t.authInfo != nil { - ctx = credentials.NewContext(ctx, t.authInfo) + pr.AuthInfo = t.authInfo } + ctx = peer.NewContext(ctx, pr) authData := make(map[string]string) - for _, c := range t.authCreds { + for _, c := range t.creds { // Construct URI required to get auth request metadata. var port string if pos := strings.LastIndex(t.target, ":"); pos != -1 { @@ -266,6 +307,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } } t.mu.Lock() + if t.activeStreams == nil { + t.mu.Unlock() + return nil, ErrConnClosing + } + if t.state == draining { + t.mu.Unlock() + return nil, ErrStreamDrain + } if t.state != reachable { t.mu.Unlock() return nil, ErrConnClosing @@ -273,7 +322,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea checkStreamsQuota := t.streamsQuota != nil t.mu.Unlock() if checkStreamsQuota { - sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire()) + sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) if err != nil { return nil, err } @@ -282,11 +331,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.streamsQuota.add(sq - 1) } } - if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil { - // t.streamsQuota will be updated when t.CloseStream is invoked. + if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { + // Return the quota back now because there is no stream returned to the caller. + if _, ok := err.(StreamError); ok && checkStreamsQuota { + t.streamsQuota.add(1) + } return nil, err } t.mu.Lock() + if t.state == draining { + t.mu.Unlock() + if checkStreamsQuota { + t.streamsQuota.add(1) + } + // Need to make t writable again so that the rpc in flight can still proceed. + t.writableChan <- 0 + return nil, ErrStreamDrain + } if t.state != reachable { t.mu.Unlock() return nil, ErrConnClosing @@ -317,10 +378,15 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.SendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + } if timeout > 0 { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) } for k, v := range authData { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) } var ( @@ -330,6 +396,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if md, ok := metadata.FromContext(ctx); ok { hasMD = true for k, v := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } for _, entry := range v { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) } @@ -344,6 +414,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } else { endHeaders = true } + var flush bool + if endHeaders && (hasMD || callHdr.Flush) { + flush = true + } if first { // Sends a HeadersFrame to server to start a new stream. p := http2.HeadersFrameParam{ @@ -355,15 +429,15 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea // Do a force flush for the buffered frames iff it is the last headers frame // and there is header metadata to be sent. Otherwise, there is flushing until // the corresponding data frame is written. - err = t.framer.writeHeaders(hasMD && endHeaders, p) + err = t.framer.writeHeaders(flush, p) first = false } else { // Sends Continuation frames for the leftover headers. - err = t.framer.writeContinuation(hasMD && endHeaders, s.id, endHeaders, t.hBuf.Next(size)) + err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size)) } if err != nil { t.notifyError(err) - return nil, ConnectionErrorf("transport: %v", err) + return nil, ConnectionErrorf(true, err, "transport: %v", err) } } t.writableChan <- 0 @@ -375,22 +449,29 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea func (t *http2Client) CloseStream(s *Stream, err error) { var updateStreams bool t.mu.Lock() + if t.activeStreams == nil { + t.mu.Unlock() + return + } if t.streamsQuota != nil { updateStreams = true } delete(t.activeStreams, s.id) + if t.state == draining && len(t.activeStreams) == 0 { + // The transport is draining and s is the last live stream on t. + t.mu.Unlock() + t.Close() + return + } t.mu.Unlock() if updateStreams { t.streamsQuota.add(1) } - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), the caller needs - // to call cancel on the stream to interrupt the blocking on - // other goroutines. - s.cancel() s.mu.Lock() - if q := s.fc.restoreConn(); q > 0 { - t.controlBuf.put(&windowUpdate{0, q}) + if q := s.fc.resetPendingData(); q > 0 { + if n := t.fc.onRead(q); n > 0 { + t.controlBuf.put(&windowUpdate{0, n}) + } } if s.state == streamDone { s.mu.Unlock() @@ -414,7 +495,10 @@ func (t *http2Client) Close() (err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return errors.New("transport: Close() was already called") + return + } + if t.state == reachable || t.state == draining { + close(t.errorChan) } t.state = closing t.mu.Unlock() @@ -437,6 +521,50 @@ func (t *http2Client) Close() (err error) { return } +func (t *http2Client) GracefulClose() error { + t.mu.Lock() + switch t.state { + case unreachable: + // The server may close the connection concurrently. t is not available for + // any streams. Close it now. + t.mu.Unlock() + t.Close() + return nil + case closing: + t.mu.Unlock() + return nil + } + // Notify the streams which were initiated after the server sent GOAWAY. + select { + case <-t.goAway: + n := t.prevGoAwayID + if n == 0 && t.nextID > 1 { + n = t.nextID - 2 + } + m := t.goAwayID + 2 + if m == 2 { + m = 1 + } + for i := m; i <= n; i += 2 { + if s, ok := t.activeStreams[i]; ok { + close(s.goAway) + } + } + default: + } + if t.state == draining { + t.mu.Unlock() + return nil + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + return t.Close() + } + return nil +} + // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. // TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later @@ -449,15 +577,15 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { size := http2MaxFrameLen s.sendQuotaPool.add(0) // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) + sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire()) if err != nil { return err } t.sendQuotaPool.add(0) // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) + tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire()) if err != nil { - if _, ok := err.(StreamError); ok { + if _, ok := err.(StreamError); ok || err == io.EOF { t.sendQuotaPool.cancel() } return err @@ -489,7 +617,11 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { // Indicate there is a writer who is about to write a data frame. t.framer.adjustNumWriters(1) // Got some quota. Try to acquire writing privilege on the transport. - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.writableChan); err != nil { + if _, ok := err.(StreamError); ok || err == io.EOF { + // Return the connection quota back. + t.sendQuotaPool.add(len(p)) + } if t.framer.adjustNumWriters(-1) == 0 { // This writer is the last one in this batch and has the // responsibility to flush the buffered frames. It queues @@ -499,6 +631,16 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { } return err } + select { + case <-s.ctx.Done(): + t.sendQuotaPool.add(len(p)) + if t.framer.adjustNumWriters(-1) == 0 { + t.controlBuf.put(&flushIO{}) + } + t.writableChan <- 0 + return ContextErr(s.ctx.Err()) + default: + } if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { // Do a force flush iff this is last frame for the entire gRPC message // and the caller is the only writer at this moment. @@ -509,7 +651,7 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { // invoked. if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { t.notifyError(err) - return ConnectionErrorf("transport: %v", err) + return ConnectionErrorf(true, err, "transport: %v", err) } if t.framer.adjustNumWriters(-1) == 0 { t.framer.flushWrite() @@ -524,11 +666,7 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { } s.mu.Lock() if s.state != streamDone { - if s.state == streamReadDone { - s.state = streamDone - } else { - s.state = streamWriteDone - } + s.state = streamWriteDone } s.mu.Unlock() return nil @@ -537,55 +675,62 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { t.mu.Lock() defer t.mu.Unlock() - if t.activeStreams == nil { - // The transport is closing. - return nil, false - } - if s, ok := t.activeStreams[f.Header().StreamID]; ok { - return s, true - } - return nil, false + s, ok := t.activeStreams[f.Header().StreamID] + return s, ok } // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. func (t *http2Client) updateWindow(s *Stream, n uint32) { - swu, cwu := s.fc.onRead(n) - if swu > 0 { - t.controlBuf.put(&windowUpdate{s.id, swu}) + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return } - if cwu > 0 { - t.controlBuf.put(&windowUpdate{0, cwu}) + if w := t.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w}) } } func (t *http2Client) handleData(f *http2.DataFrame) { + size := len(f.Data()) + if err := t.fc.onData(uint32(size)); err != nil { + t.notifyError(ConnectionErrorf(true, err, "%v", err)) + return + } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } return } - size := len(f.Data()) if size > 0 { + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + // The stream has been closed. Release the corresponding quota. + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + return + } if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - t.notifyError(err) - return - } - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return - } s.state = streamDone s.statusCode = codes.Internal s.statusDesc = err.Error() + close(s.done) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } + s.mu.Unlock() // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? @@ -597,13 +742,14 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // the read direction is closed, and set the status appropriately. if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { s.mu.Lock() - if s.state == streamWriteDone { - s.state = streamDone - } else { - s.state = streamReadDone + if s.state == streamDone { + s.mu.Unlock() + return } + s.state = streamDone s.statusCode = codes.Internal s.statusDesc = "server closed the stream without sending trailers" + close(s.done) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } @@ -624,10 +770,13 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { close(s.headerChan) s.headerDone = true } - s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)] + s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) + s.statusCode = codes.Unknown } + s.statusDesc = fmt.Sprintf("stream terminated by RST_STREAM with error code: %d", f.ErrCode) + close(s.done) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } @@ -652,7 +801,32 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { } func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { - // TODO(zhaoq): GoAwayFrame handler to be implemented + t.mu.Lock() + if t.state == reachable || t.state == draining { + if f.LastStreamID > 0 && f.LastStreamID%2 != 1 { + t.mu.Unlock() + t.notifyError(ConnectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID)) + return + } + select { + case <-t.goAway: + id := t.goAwayID + // t.goAway has been closed (i.e.,multiple GoAways). + if id < f.LastStreamID { + t.mu.Unlock() + t.notifyError(ConnectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID)) + return + } + t.prevGoAwayID = id + t.goAwayID = f.LastStreamID + t.mu.Unlock() + return + default: + } + t.goAwayID = f.LastStreamID + close(t.goAway) + } + t.mu.Unlock() } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -667,52 +841,59 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { } } -// operateHeader takes action on the decoded headers. It returns the current -// stream if there are remaining headers on the wire (in the following -// Continuation frame). -func (t *http2Client) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool) (pendingStream *Stream) { - defer func() { - if pendingStream == nil { - hDec.state = decodeState{} - } - }() - endHeaders, err := hDec.decodeClientHTTP2Headers(frame) - if s == nil { - // s has been closed. - return nil +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s, ok := t.getStream(frame) + if !ok { + return } - if err != nil { - s.write(recvMsg{err: err}) + var state decodeState + for _, hf := range frame.Fields { + state.processHeaderField(hf) + } + if state.err != nil { + s.write(recvMsg{err: state.err}) // Something wrong. Stops reading even when there is remaining. - return nil - } - if !endHeaders { - return s + return } + endStream := frame.StreamEnded() + s.mu.Lock() + if !endStream { + s.recvCompress = state.encoding + } if !s.headerDone { - if !endStream && len(hDec.state.mdata) > 0 { - s.header = hDec.state.mdata + if !endStream && len(state.mdata) > 0 { + s.header = state.mdata } close(s.headerChan) s.headerDone = true } if !endStream || s.state == streamDone { s.mu.Unlock() - return nil + return } - if len(hDec.state.mdata) > 0 { - s.trailer = hDec.state.mdata + if len(state.mdata) > 0 { + s.trailer = state.mdata } + s.statusCode = state.statusCode + s.statusDesc = state.statusDesc + close(s.done) s.state = streamDone - s.statusCode = hDec.state.statusCode - s.statusDesc = hDec.state.statusDesc s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) - return nil +} + +func handleMalformedHTTP2(s *Stream, err error) { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: err}) } // reader runs as a separate goroutine in charge of reading data from network @@ -735,25 +916,31 @@ func (t *http2Client) reader() { } t.handleSettings(sf) - hDec := newHPACKDecoder() - var curStream *Stream // loop to keep reading incoming messages on this transport. for { frame, err := t.framer.readFrame() if err != nil { - t.notifyError(err) - return + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + handleMalformedHTTP2(s, StreamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail())) + } + continue + } else { + // Transport error. + t.notifyError(err) + return + } } switch frame := frame.(type) { - case *http2.HeadersFrame: - // operateHeaders has to be invoked regardless the value of curStream - // because the HPACK decoder needs to be updated using the received - // headers. - curStream, _ = t.getStream(frame) - endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) - curStream = t.operateHeaders(hDec, curStream, frame, endStream) - case *http2.ContinuationFrame: - curStream = t.operateHeaders(hDec, curStream, frame, frame.HeadersEnded()) + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: @@ -848,13 +1035,22 @@ func (t *http2Client) Error() <-chan struct{} { return t.errorChan } +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + func (t *http2Client) notifyError(err error) { t.mu.Lock() - defer t.mu.Unlock() // make sure t.errorChan is closed only once. + if t.state == draining { + t.mu.Unlock() + t.Close() + return + } if t.state == reachable { t.state = unreachable close(t.errorChan) grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) } + t.mu.Unlock() } diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go index e16c63cc..16010d55 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -49,6 +49,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" ) // ErrIllegalHeaderWrite indicates that setting header is illegal because of @@ -61,8 +62,8 @@ type http2Server struct { maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by sending a value on writableChan - // and releases it by receiving from writableChan. + // A writer acquires the write lock by receiving a value on writableChan + // and releases it by sending on writableChan. writableChan chan int // shutdownChan is closed when Close is called. // Blocking operations should select on shutdownChan to avoid @@ -99,18 +100,23 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthI if maxStreams == 0 { maxStreams = math.MaxUint32 } else { - settings = append(settings, http2.Setting{http2.SettingMaxConcurrentStreams, maxStreams}) + settings = append(settings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) } if initialWindowSize != defaultWindowSize { - settings = append(settings, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) + settings = append(settings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(initialWindowSize)}) } if err := framer.writeSettings(true, settings...); err != nil { - return nil, ConnectionErrorf("transport: %v", err) + return nil, ConnectionErrorf(true, err, "transport: %v", err) } // Adjust the connection flow control window if needed. if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, ConnectionErrorf("transport: %v", err) + return nil, ConnectionErrorf(true, err, "transport: %v", err) } } var buf bytes.Buffer @@ -135,67 +141,77 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthI return t, nil } -// operateHeader takes action on the decoded headers. It returns the current -// stream if there are remaining headers on the wire (in the following -// Continuation frame). -func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream)) (pendingStream *Stream) { - defer func() { - if pendingStream == nil { - hDec.state = decodeState{} - } - }() - endHeaders, err := hDec.decodeServerHTTP2Headers(frame) - if s == nil { - // s has been closed. - return nil +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) (close bool) { + buf := newRecvBuffer() + s := &Stream{ + id: frame.Header().StreamID, + st: t, + buf: buf, + fc: &inFlow{limit: initialWindowSize}, } - if err != nil { - grpclog.Printf("transport: http2Server.operateHeader found %v", err) + + var state decodeState + for _, hf := range frame.Fields { + state.processHeaderField(hf) + } + if err := state.err; err != nil { if se, ok := err.(StreamError); ok { t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) } - return nil + return } - if endStream { + + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if !endHeaders { - return s - } - if hDec.state.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout) + s.recvCompress = state.encoding + if state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout) } else { s.ctx, s.cancel = context.WithCancel(context.TODO()) } + pr := &peer.Peer{ + Addr: t.conn.RemoteAddr(), + } // Attach Auth info if there is any. if t.authInfo != nil { - s.ctx = credentials.NewContext(s.ctx, t.authInfo) + pr.AuthInfo = t.authInfo } + s.ctx = peer.NewContext(s.ctx, pr) // Cache the current stream to the context so that the server application // can find out. Required when the server wants to send some metadata // back to the client (unary call only). s.ctx = newContextWithStream(s.ctx, s) // Attach the received metadata to the context. - if len(hDec.state.mdata) > 0 { - s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata) + if len(state.mdata) > 0 { + s.ctx = metadata.NewContext(s.ctx, state.mdata) } s.dec = &recvBufferReader{ ctx: s.ctx, recv: s.buf, } - s.method = hDec.state.method + s.recvCompress = state.encoding + s.method = state.method t.mu.Lock() if t.state != reachable { t.mu.Unlock() - return nil + return } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) - return nil + return } + if s.id%2 != 1 || s.id <= t.maxStreamID { + t.mu.Unlock() + // illegal gRPC stream id. + grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", s.id) + return true + } + t.maxStreamID = s.id s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) t.activeStreams[s.id] = s t.mu.Unlock() @@ -203,7 +219,7 @@ func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame header t.updateWindow(s, uint32(n)) } handle(s) - return nil + return } // HandleStreams receives incoming streams using the given handler. This is @@ -223,6 +239,10 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { } frame, err := t.framer.readFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } if err != nil { grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) t.Close() @@ -236,39 +256,33 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { } t.handleSettings(sf) - hDec := newHPACKDecoder() - var curStream *Stream for { frame, err := t.framer.readFrame() if err != nil { + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s) + } + t.controlBuf.put(&resetStream{se.StreamID, se.Code}) + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) t.Close() return } switch frame := frame.(type) { - case *http2.HeadersFrame: - id := frame.Header().StreamID - if id%2 != 1 || id <= t.maxStreamID { - // illegal gRPC stream id. - grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id) + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle) { t.Close() break } - t.maxStreamID = id - buf := newRecvBuffer() - fc := &inFlow{ - limit: initialWindowSize, - conn: t.fc, - } - curStream = &Stream{ - id: frame.Header().StreamID, - st: t, - buf: buf, - fc: fc, - } - endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) - curStream = t.operateHeaders(hDec, curStream, frame, endStream, handle) - case *http2.ContinuationFrame: - curStream = t.operateHeaders(hDec, curStream, frame, frame.HeadersEnded(), handle) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: @@ -280,7 +294,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) case *http2.GoAwayFrame: - break + // TODO: Handle GoAway from the client appropriately. default: grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) } @@ -306,33 +320,51 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. func (t *http2Server) updateWindow(s *Stream, n uint32) { - swu, cwu := s.fc.onRead(n) - if swu > 0 { - t.controlBuf.put(&windowUpdate{s.id, swu}) + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return } - if cwu > 0 { - t.controlBuf.put(&windowUpdate{0, cwu}) + if w := t.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w}) } } func (t *http2Server) handleData(f *http2.DataFrame) { + size := len(f.Data()) + if err := t.fc.onData(uint32(size)); err != nil { + grpclog.Printf("transport: http2Server %v", err) + t.Close() + return + } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } return } - size := len(f.Data()) if size > 0 { - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - grpclog.Printf("transport: http2Server %v", err) - t.Close() - return + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + // The stream has been closed. Release the corresponding quota. + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) } + return + } + if err := s.fc.onData(uint32(size)); err != nil { + s.mu.Unlock() t.closeStream(s) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } + s.mu.Unlock() // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? @@ -344,11 +376,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // Received the end of stream from the client. s.mu.Lock() if s.state != streamDone { - if s.state == streamWriteDone { - s.state = streamDone - } else { - s.state = streamReadDone - } + s.state = streamReadDone } s.mu.Unlock() s.write(recvMsg{err: io.EOF}) @@ -420,7 +448,7 @@ func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) e } if err != nil { t.Close() - return ConnectionErrorf("transport: %v", err) + return ConnectionErrorf(true, err, "transport: %v", err) } } return nil @@ -435,13 +463,20 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } s.headerOk = true s.mu.Unlock() - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { return err } t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } for k, v := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } for _, entry := range v { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) } @@ -468,7 +503,7 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s headersSent = true } s.mu.Unlock() - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { return err } t.hBuf.Reset() @@ -481,9 +516,13 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s Name: "grpc-status", Value: strconv.Itoa(int(statusCode)), }) - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc}) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(statusDesc)}) // Attach the trailer metadata. for k, v := range s.trailer { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } for _, entry := range v { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) } @@ -503,18 +542,25 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { // TODO(zhaoq): Support multi-writers for a single stream. var writeHeaderFrame bool s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return StreamErrorf(codes.Unknown, "the stream has been done") + } if !s.headerOk { writeHeaderFrame = true s.headerOk = true } s.mu.Unlock() if writeHeaderFrame { - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { return err } t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } p := http2.HeadersFrameParam{ StreamID: s.id, BlockFragment: t.hBuf.Bytes(), @@ -522,7 +568,7 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { } if err := t.framer.writeHeaders(false, p); err != nil { t.Close() - return ConnectionErrorf("transport: %v", err) + return ConnectionErrorf(true, err, "transport: %v", err) } t.writableChan <- 0 } @@ -534,13 +580,13 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { size := http2MaxFrameLen s.sendQuotaPool.add(0) // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) + sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire()) if err != nil { return err } t.sendQuotaPool.add(0) // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) + tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire()) if err != nil { if _, ok := err.(StreamError); ok { t.sendQuotaPool.cancel() @@ -566,7 +612,11 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { t.framer.adjustNumWriters(1) // Got some quota. Try to acquire writing privilege on the // transport. - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { + if _, ok := err.(StreamError); ok { + // Return the connection quota back. + t.sendQuotaPool.add(ps) + } if t.framer.adjustNumWriters(-1) == 0 { // This writer is the last one in this batch and has the // responsibility to flush the buffered frames. It queues @@ -576,13 +626,23 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { } return err } + select { + case <-s.ctx.Done(): + t.sendQuotaPool.add(ps) + if t.framer.adjustNumWriters(-1) == 0 { + t.controlBuf.put(&flushIO{}) + } + t.writableChan <- 0 + return ContextErr(s.ctx.Err()) + default: + } var forceFlush bool if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { forceFlush = true } if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { t.Close() - return ConnectionErrorf("transport: %v", err) + return ConnectionErrorf(true, err, "transport: %v", err) } if t.framer.adjustNumWriters(-1) == 0 { t.framer.flushWrite() @@ -627,6 +687,17 @@ func (t *http2Server) controller() { } case *resetStream: t.framer.writeRSTStream(true, i.streamID, i.code) + case *goAway: + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + // The transport is closing. + return + } + sid := t.maxStreamID + t.state = draining + t.mu.Unlock() + t.framer.writeGoAway(true, sid, http2.ErrCodeNo, nil) case *flushIO: t.framer.flushWrite() case *ping: @@ -672,23 +743,32 @@ func (t *http2Server) Close() (err error) { func (t *http2Server) closeStream(s *Stream) { t.mu.Lock() delete(t.activeStreams, s.id) - t.mu.Unlock() - if q := s.fc.restoreConn(); q > 0 { - t.controlBuf.put(&windowUpdate{0, q}) + if t.state == draining && len(t.activeStreams) == 0 { + defer t.Close() } + t.mu.Unlock() + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() s.mu.Lock() + if q := s.fc.resetPendingData(); q > 0 { + if w := t.fc.onRead(q); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + } if s.state == streamDone { s.mu.Unlock() return } s.state = streamDone s.mu.Unlock() - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() } func (t *http2Server) RemoteAddr() net.Addr { return t.conn.RemoteAddr() } + +func (t *http2Server) Drain() { + t.controlBuf.put(&goAway{}) +} diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go index fec4e475..79da5126 100644 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -35,6 +35,7 @@ package transport import ( "bufio" + "bytes" "fmt" "io" "net" @@ -52,7 +53,7 @@ import ( const ( // The primary user agent - primaryUA = "grpc-go/0.11" + primaryUA = "grpc-go/1.0" // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame // http://http2.github.io/http2-spec/#SettingValues @@ -62,13 +63,14 @@ const ( ) var ( - clientPreface = []byte(http2.ClientPreface) - http2RSTErrConvTab = map[http2.ErrCode]codes.Code{ + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ http2.ErrCodeNo: codes.Internal, http2.ErrCodeProtocol: codes.Internal, http2.ErrCodeInternal: codes.Internal, http2.ErrCodeFlowControl: codes.ResourceExhausted, http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, http2.ErrCodeFrameSize: codes.Internal, http2.ErrCodeRefusedStream: codes.Unavailable, http2.ErrCodeCancel: codes.Canceled, @@ -76,6 +78,7 @@ var ( http2.ErrCodeConnect: codes.Internal, http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.FailedPrecondition, } statusCodeConvTab = map[codes.Code]http2.ErrCode{ codes.Internal: http2.ErrCodeInternal, @@ -89,6 +92,9 @@ var ( // Records the states during HPACK decoding. Must be reset once the // decoding of the entire headers are finished. type decodeState struct { + err error // first error encountered decoding + + encoding string // statusCode caches the stream status received from the trailer // the server sent. Client side only. statusCode codes.Code @@ -101,25 +107,11 @@ type decodeState struct { mdata map[string][]string } -// An hpackDecoder decodes HTTP2 headers which may span multiple frames. -type hpackDecoder struct { - h *hpack.Decoder - state decodeState - err error // The err when decoding -} - -// A headerFrame is either a http2.HeaderFrame or http2.ContinuationFrame. -type headerFrame interface { - Header() http2.FrameHeader - HeaderBlockFragment() []byte - HeadersEnded() bool -} - // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. func isReservedHeader(hdr string) bool { - if hdr[0] == ':' { + if hdr != "" && hdr[0] == ':' { return true } switch hdr { @@ -136,98 +128,86 @@ func isReservedHeader(hdr string) bool { } } -func newHPACKDecoder() *hpackDecoder { - d := &hpackDecoder{} - d.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) { - switch f.Name { - case "content-type": - if !strings.Contains(f.Value, "application/grpc") { - d.err = StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected header") - return - } - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.err = StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return - } - d.state.statusCode = codes.Code(code) - case "grpc-message": - d.state.statusDesc = f.Value - case "grpc-timeout": - d.state.timeoutSet = true - var err error - d.state.timeout, err = timeoutDecode(f.Value) - if err != nil { - d.err = StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err) - return - } - case ":path": - d.state.method = f.Value - default: - if !isReservedHeader(f.Name) { - if f.Name == "user-agent" { - i := strings.LastIndex(f.Value, " ") - if i == -1 { - // There is no application user agent string being set. - return - } - // Extract the application user agent string. - f.Value = f.Value[:i] - } - if d.state.mdata == nil { - d.state.mdata = make(map[string][]string) - } - k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) - if err != nil { - grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) +// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders +// that should be propagated into metadata visible to users. +func isWhitelistedPseudoHeader(hdr string) bool { + switch hdr { + case ":authority": + return true + default: + return false + } +} + +func (d *decodeState) setErr(err error) { + if d.err == nil { + d.err = err + } +} + +func validContentType(t string) bool { + e := "application/grpc" + if !strings.HasPrefix(t, e) { + return false + } + // Support variations on the content-type + // (e.g. "application/grpc+blah", "application/grpc;blah"). + if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' { + return false + } + return true +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) { + switch f.Name { + case "content-type": + if !validContentType(f.Value) { + d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)) + return + } + case "grpc-encoding": + d.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)) + return + } + d.statusCode = codes.Code(code) + case "grpc-message": + d.statusDesc = decodeGrpcMessage(f.Value) + case "grpc-timeout": + d.timeoutSet = true + var err error + d.timeout, err = decodeTimeout(f.Value) + if err != nil { + d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err)) + return + } + case ":path": + d.method = f.Value + default: + if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) { + if f.Name == "user-agent" { + i := strings.LastIndex(f.Value, " ") + if i == -1 { + // There is no application user agent string being set. return } - d.state.mdata[k] = append(d.state.mdata[k], v) + // Extract the application user agent string. + f.Value = f.Value[:i] } + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) + if err != nil { + grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) + return + } + d.mdata[k] = append(d.mdata[k], v) } - }) - return d -} - -func (d *hpackDecoder) decodeClientHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { - d.err = nil - _, err = d.h.Write(frame.HeaderBlockFragment()) - if err != nil { - err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) } - - if frame.HeadersEnded() { - if closeErr := d.h.Close(); closeErr != nil && err == nil { - err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) - } - endHeaders = true - } - - if err == nil && d.err != nil { - err = d.err - } - return -} - -func (d *hpackDecoder) decodeServerHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { - d.err = nil - _, err = d.h.Write(frame.HeaderBlockFragment()) - if err != nil { - err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) - } - - if frame.HeadersEnded() { - if closeErr := d.h.Close(); closeErr != nil && err == nil { - err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) - } - endHeaders = true - } - - if err == nil && d.err != nil { - err = d.err - } - return } type timeoutUnit uint8 @@ -272,7 +252,7 @@ func div(d, r time.Duration) int64 { } // TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. -func timeoutEncode(t time.Duration) string { +func encodeTimeout(t time.Duration) string { if d := div(t, time.Nanosecond); d <= maxTimeoutValue { return strconv.FormatInt(d, 10) + "n" } @@ -292,7 +272,7 @@ func timeoutEncode(t time.Duration) string { return strconv.FormatInt(div(t, time.Hour), 10) + "H" } -func timeoutDecode(s string) (time.Duration, error) { +func decodeTimeout(s string) (time.Duration, error) { size := len(s) if size < 2 { return 0, fmt.Errorf("transport: timeout string is too short: %q", s) @@ -309,6 +289,80 @@ func timeoutDecode(s string) (time.Duration, error) { return d * time.Duration(t), nil } +const ( + spaceByte = ' ' + tildaByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". +// It checks to see if each individual byte in msg is an +// allowable byte, and then either percent encoding or passing it through. +// When percent encoding, the byte is converted into hexadecimal notation +// with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c < tildaByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c >= spaceByte && c < tildaByte && c != percentByte { + buf.WriteByte(c) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", c)) + } + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseInt(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + type framer struct { numWriters int32 reader io.Reader @@ -318,10 +372,11 @@ type framer struct { func newFramer(conn net.Conn) *framer { f := &framer{ - reader: conn, + reader: bufio.NewReaderSize(conn, http2IOBufSize), writer: bufio.NewWriterSize(conn, http2IOBufSize), } f.fr = http2.NewFramer(f.writer, f.reader) + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } @@ -449,3 +504,7 @@ func (f *framer) flushWrite() error { func (f *framer) readFrame() (http2.Frame, error) { return f.fr.ReadFrame() } + +func (f *framer) errorDetail() error { + return f.fr.ErrorDetail() +} diff --git a/vendor/google.golang.org/grpc/transport/pre_go16.go b/vendor/google.golang.org/grpc/transport/pre_go16.go new file mode 100644 index 00000000..33d91c17 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/pre_go16.go @@ -0,0 +1,51 @@ +// +build !go1.6 + +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + if deadline, ok := ctx.Deadline(); ok { + dialer.Timeout = deadline.Sub(time.Now()) + } + return dialer.Dial(network, address) +} diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index 71b562fd..b39c33c2 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -44,7 +44,6 @@ import ( "io" "net" "sync" - "time" "golang.org/x/net/context" "golang.org/x/net/trace" @@ -63,13 +62,11 @@ type recvMsg struct { err error } -func (recvMsg) isItem() bool { - return true -} +func (*recvMsg) item() {} // All items in an out of a recvBuffer should be the same type. type item interface { - isItem() bool + item() } // recvBuffer is an unbounded channel of item. @@ -89,12 +86,14 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r item) { b.mu.Lock() defer b.mu.Unlock() - b.backlog = append(b.backlog, r) - select { - case b.c <- b.backlog[0]: - b.backlog = b.backlog[1:] - default: + if len(b.backlog) == 0 { + select { + case b.c <- r: + return + default: + } } + b.backlog = append(b.backlog, r) } func (b *recvBuffer) load() { @@ -120,10 +119,11 @@ func (b *recvBuffer) get() <-chan item { // recvBufferReader implements io.Reader interface to read the data from // recvBuffer. type recvBufferReader struct { - ctx context.Context - recv *recvBuffer - last *bytes.Reader // Stores the remaining data in the previous calls. - err error + ctx context.Context + goAway chan struct{} + recv *recvBuffer + last *bytes.Reader // Stores the remaining data in the previous calls. + err error } // Read reads the next len(p) bytes from last. If last is drained, it tries to @@ -141,6 +141,8 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) { select { case <-r.ctx.Done(): return 0, ContextErr(r.ctx.Err()) + case <-r.goAway: + return 0, ErrStreamDrain case i := <-r.recv.get(): r.recv.load() m := i.(*recvMsg) @@ -158,7 +160,7 @@ const ( streamActive streamState = iota streamWriteDone // EndStream sent streamReadDone // EndStream received - streamDone // sendDone and recvDone or RSTStreamFrame is sent or received. + streamDone // the entire stream is finished. ) // Stream represents an RPC in the transport layer. @@ -169,12 +171,18 @@ type Stream struct { // ctx is the associated context of the stream. ctx context.Context cancel context.CancelFunc + // done is closed when the final status arrives. + done chan struct{} + // goAway is closed when the server sent GoAways signal before this stream was initiated. + goAway chan struct{} // method records the associated RPC method of the stream. - method string - buf *recvBuffer - dec io.Reader - fc *inFlow - recvQuota uint32 + method string + recvCompress string + sendCompress string + buf *recvBuffer + dec io.Reader + fc *inFlow + recvQuota uint32 // The accumulated inbound quota pending for window update. updateQuota uint32 // The handler to control the window update procedure for both this @@ -201,6 +209,29 @@ type Stream struct { statusDesc string } +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a chanel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// GoAway returns a channel which is closed when the server sent GoAways signal +// before this stream was initiated. +func (s *Stream) GoAway() <-chan struct{} { + return s.goAway +} + // Header acquires the key-value pairs of header metadata once it // is available. It blocks until i) the metadata is ready or ii) there is no // header metadata or iii) the stream is cancelled/expired. @@ -208,6 +239,8 @@ func (s *Stream) Header() (metadata.MD, error) { select { case <-s.ctx.Done(): return nil, ContextErr(s.ctx.Err()) + case <-s.goAway: + return nil, ErrStreamDrain case <-s.headerChan: return s.header.Copy(), nil } @@ -286,20 +319,18 @@ func (s *Stream) Read(p []byte) (n int, err error) { return } -type key int - // The key to save transport.Stream in the context. -const streamKey = key(0) +type streamKey struct{} // newContextWithStream creates a new context from ctx and attaches stream // to it. func newContextWithStream(ctx context.Context, stream *Stream) context.Context { - return context.WithValue(ctx, streamKey, stream) + return context.WithValue(ctx, streamKey{}, stream) } // StreamFromContext returns the stream saved in ctx. func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { - s, ok = ctx.Value(streamKey).(*Stream) + s, ok = ctx.Value(streamKey{}).(*Stream) return } @@ -310,6 +341,7 @@ const ( reachable transportState = iota unreachable closing + draining ) // NewServerTransport creates a ServerTransport with conn or non-nil error @@ -323,36 +355,56 @@ type ConnectOptions struct { // UserAgent is the application user agent. UserAgent string // Dialer specifies how to dial a network address. - Dialer func(string, time.Duration) (net.Conn, error) - // AuthOptions stores the credentials required to setup a client connection and/or issue RPCs. - AuthOptions []credentials.Credentials - // Timeout specifies the timeout for dialing a client connection. - Timeout time.Duration + Dialer func(context.Context, string) (net.Conn, error) + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client connection. + TransportCredentials credentials.TransportCredentials } // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, error) { - return newHTTP2Client(target, opts) +func NewClientTransport(ctx context.Context, target string, opts ConnectOptions) (ClientTransport, error) { + return newHTTP2Client(ctx, target, opts) } // Options provides additional hints and information for message // transmission. type Options struct { - // Indicate whether it is the last piece for this stream. + // Last indicates whether this write is the last piece for + // this stream. Last bool - // The hint to transport impl whether the data could be buffered for - // batching write. Transport impl can feel free to ignore it. + + // Delay is a hint to the transport implementation for whether + // the data could be buffered for a batching write. The + // Transport implementation may ignore the hint. Delay bool } // CallHdr carries the information of a particular RPC. type CallHdr struct { - Host string // peer host - Method string // the operation to perform on the specified host + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // RecvCompress specifies the compression algorithm applied on + // inbound messages. + RecvCompress string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Flush indicates whether a new stream command should be sent + // to the peer without waiting for the first data. This is + // only a hint. The transport may modify the flush decision + // for performance purposes. + Flush bool } -// ClientTransport is the common interface for all gRPC client side transport +// ClientTransport is the common interface for all gRPC client-side transport // implementations. type ClientTransport interface { // Close tears down this transport. Once it returns, the transport @@ -360,6 +412,10 @@ type ClientTransport interface { // is called only once. Close() error + // GracefulClose starts to tear down the transport. It stops accepting + // new RPCs and wait the completion of the pending RPCs. + GracefulClose() error + // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. Write(s *Stream, data []byte, opts *Options) error @@ -379,25 +435,45 @@ type ClientTransport interface { // and create a new one) in error case. It should not return nil // once the transport is initiated. Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTranspor + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} } -// ServerTransport is the common interface for all gRPC server side transport +// ServerTransport is the common interface for all gRPC server-side transport // implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. type ServerTransport interface { - // WriteStatus sends the status of a stream to the client. - WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error - // Write sends the data for the given stream. - Write(s *Stream, data []byte, opts *Options) error - // WriteHeader sends the header metedata for the given stream. - WriteHeader(s *Stream, md metadata.MD) error // HandleStreams receives incoming streams using the given handler. HandleStreams(func(*Stream)) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. + // WriteStatus is the final call made on a stream and always + // occurs. + WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error + // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. Close() error + // RemoteAddr returns the remote network address. RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() } // StreamErrorf creates an StreamError with the specified error code and description. @@ -409,9 +485,11 @@ func StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError { } // ConnectionErrorf creates an ConnectionError with the specified error description. -func ConnectionErrorf(format string, a ...interface{}) ConnectionError { +func ConnectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, } } @@ -419,14 +497,36 @@ func ConnectionErrorf(format string, a ...interface{}) ConnectionError { // entire connection and the retry of all the active streams. type ConnectionError struct { Desc string + temp bool + err error } func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: desc = %q", e.Desc) } -// Define some common ConnectionErrors. -var ErrConnClosing = ConnectionError{Desc: "transport is closing"} +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = ConnectionErrorf(true, nil, "transport is closing") + // ErrStreamDrain indicates that the stream is rejected by the server because + // the server stops accepting new RPCs. + ErrStreamDrain = StreamErrorf(codes.Unavailable, "the server stops accepting new RPCs") +) // StreamError is an error that only affects one stream within a connection. type StreamError struct { @@ -451,12 +551,25 @@ func ContextErr(err error) StreamError { // wait blocks until it can receive from ctx.Done, closing, or proceed. // If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. +// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise +// it return the StreamError for ctx.Err. +// If it receives from goAway, it returns 0, ErrStreamDrain. // If it receives from closing, it returns 0, ErrConnClosing. // If it receives from proceed, it returns the received integer, nil. -func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) { +func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) { select { case <-ctx.Done(): return 0, ContextErr(ctx.Err()) + case <-done: + // User cancellation has precedence. + select { + case <-ctx.Done(): + return 0, ContextErr(ctx.Err()) + default: + } + return 0, io.EOF + case <-goAway: + return 0, ErrStreamDrain case <-closing: return 0, ErrConnClosing case i := <-proceed: