Merge pull request #766 from mrunalp/ctr_status_fixes

Container status fixes
This commit is contained in:
Mrunal Patel 2017-08-18 07:39:56 -07:00 committed by GitHub
commit 959aab4fd5
15 changed files with 204 additions and 148 deletions

View file

@ -357,11 +357,23 @@ func (c *ContainerServer) LoadSandbox(id string) error {
return err
}
scontainer, err := oci.NewContainer(m.Annotations[annotations.ContainerID], cname, sandboxPath, m.Annotations[annotations.LogPath], sb.NetNs(), labels, kubeAnnotations, "", nil, id, false, false, false, privileged, trusted, sandboxDir, created, m.Annotations["org.opencontainers.image.stopSignal"])
scontainer, err := oci.NewContainer(m.Annotations[annotations.ContainerID], cname, sandboxPath, m.Annotations[annotations.LogPath], sb.NetNs(), labels, kubeAnnotations, "", "", "", nil, id, false, false, false, privileged, trusted, sandboxDir, created, m.Annotations["org.opencontainers.image.stopSignal"])
if err != nil {
return err
}
if m.Annotations[annotations.Volumes] != "" {
containerVolumes := []oci.ContainerVolume{}
if err = json.Unmarshal([]byte(m.Annotations[annotations.Volumes]), &containerVolumes); err != nil {
return fmt.Errorf("failed to unmarshal container volumes: %v", err)
}
if containerVolumes != nil {
for _, cv := range containerVolumes {
scontainer.AddVolume(cv)
}
}
}
c.ContainerStateFromDisk(scontainer)
if err = label.ReserveLabel(processLabel); err != nil {
@ -447,6 +459,16 @@ func (c *ContainerServer) LoadContainer(id string) error {
img = ""
}
imgName, ok := m.Annotations[annotations.ImageName]
if !ok {
imgName = ""
}
imgRef, ok := m.Annotations[annotations.ImageRef]
if !ok {
imgRef = ""
}
kubeAnnotations := make(map[string]string)
if err = json.Unmarshal([]byte(m.Annotations[annotations.Annotations]), &kubeAnnotations); err != nil {
return err
@ -457,7 +479,7 @@ func (c *ContainerServer) LoadContainer(id string) error {
return err
}
ctr, err := oci.NewContainer(id, name, containerPath, m.Annotations[annotations.LogPath], sb.NetNs(), labels, kubeAnnotations, img, &metadata, sb.ID(), tty, stdin, stdinOnce, sb.Privileged(), sb.Trusted(), containerDir, created, m.Annotations["org.opencontainers.image.stopSignal"])
ctr, err := oci.NewContainer(id, name, containerPath, m.Annotations[annotations.LogPath], sb.NetNs(), labels, kubeAnnotations, img, imgName, imgRef, &metadata, sb.ID(), tty, stdin, stdinOnce, sb.Privileged(), sb.Trusted(), containerDir, created, m.Annotations["org.opencontainers.image.stopSignal"])
if err != nil {
return err
}

View file

@ -153,6 +153,7 @@ type Sandbox struct {
resolvPath string
hostname string
portMappings []*hostport.PortMapping
stopped bool
}
const (
@ -381,6 +382,19 @@ func (s *Sandbox) NetNsCreate() error {
return nil
}
// SetStopped sets the sandbox state to stopped.
// This should be set after a stop operation succeeds
// so that subsequent stops can return fast.
func (s *Sandbox) SetStopped() {
s.stopped = true
}
// Stopped returns whether the sandbox state has been
// set to stopped.
func (s *Sandbox) Stopped() bool {
return s.stopped
}
// NetNsJoin attempts to join the sandbox to an existing network namespace
// This will fail if the sandbox is already part of a network namespace
func (s *Sandbox) NetNsJoin(nspath, name string) error {

View file

@ -43,6 +43,16 @@ type Container struct {
// this is the /var/lib/storage/... directory
dir string
stopSignal string
imageName string
imageRef string
volumes []ContainerVolume
}
// ContainerVolume is a bind mount for the container.
type ContainerVolume struct {
ContainerPath string `json:"container_path"`
HostPath string `json:"host_path"`
Readonly bool `json:"readonly"`
}
// ContainerState represents the status of a container.
@ -57,7 +67,7 @@ type ContainerState struct {
}
// NewContainer creates a container object.
func NewContainer(id string, name string, bundlePath string, logPath string, netns ns.NetNS, labels map[string]string, annotations map[string]string, image string, metadata *pb.ContainerMetadata, sandbox string, terminal bool, stdin bool, stdinOnce bool, privileged bool, trusted bool, dir string, created time.Time, stopSignal string) (*Container, error) {
func NewContainer(id string, name string, bundlePath string, logPath string, netns ns.NetNS, labels map[string]string, annotations map[string]string, image string, imageName string, imageRef string, metadata *pb.ContainerMetadata, sandbox string, terminal bool, stdin bool, stdinOnce bool, privileged bool, trusted bool, dir string, created time.Time, stopSignal string) (*Container, error) {
state := &ContainerState{}
state.Created = created
c := &Container{
@ -76,6 +86,8 @@ func NewContainer(id string, name string, bundlePath string, logPath string, net
metadata: metadata,
annotations: annotations,
image: image,
imageName: imageName,
imageRef: imageRef,
dir: dir,
state: state,
stopSignal: stopSignal,
@ -155,6 +167,16 @@ func (c *Container) Image() string {
return c.image
}
// ImageName returns the image name of the container.
func (c *Container) ImageName() string {
return c.imageName
}
// ImageRef returns the image ref of the container.
func (c *Container) ImageRef() string {
return c.imageRef
}
// Sandbox returns the sandbox name of the container.
func (c *Container) Sandbox() string {
return c.sandbox
@ -189,3 +211,14 @@ func (c *Container) State() *ContainerState {
defer c.opLock.Unlock()
return c.state
}
// AddVolume adds a volume to list of container volumes.
func (c *Container) AddVolume(v ContainerVolume) {
c.volumes = append(c.volumes, v)
}
// Volumes returns the list of container volumes.
func (c *Container) Volumes() []ContainerVolume {
return c.volumes
}

View file

@ -496,8 +496,16 @@ func (r *Runtime) ExecSync(c *Container, command []string, timeout int64) (resp
func (r *Runtime) StopContainer(c *Container, timeout int64) error {
c.opLock.Lock()
defer c.opLock.Unlock()
// Check if the process is around before sending a signal
err := unix.Kill(c.state.Pid, 0)
if err == unix.ESRCH {
c.state.Finished = time.Now()
return nil
}
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.Path(c), "kill", c.id, c.GetStopSignal()); err != nil {
return err
return fmt.Errorf("failed to stop container %s, %v", c.id, err)
}
if timeout == -1 {
// default 10 seconds delay

View file

@ -22,6 +22,12 @@ const (
// Image is the container image ID annotation
Image = "io.kubernetes.cri-o.Image"
// ImageName is the container image name annotation
ImageName = "io.kubernetes.cri-o.ImageName"
// ImageRef is the container image ref annotation
ImageRef = "io.kubernetes.cri-o.ImageRef"
// KubeName is the kubernetes name annotation
KubeName = "io.kubernetes.cri-o.KubeName"
@ -63,6 +69,9 @@ const (
// StdinOnce is the stdin_once annotation
StdinOnce = "io.kubernetes.cri-o.StdinOnce"
// Volumes is the volumes annotatoin
Volumes = "io.kubernetes.cri-o.Volumes"
)
// ContainerType values

View file

@ -11,6 +11,7 @@ import (
"strings"
"time"
"github.com/docker/distribution/reference"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/symlink"
"github.com/kubernetes-incubator/cri-o/libkpod"
@ -39,22 +40,23 @@ const (
seccompLocalhostPrefix = "localhost/"
)
func addOCIBindMounts(sb *sandbox.Sandbox, containerConfig *pb.ContainerConfig, specgen *generate.Generator) error {
func addOCIBindMounts(sb *sandbox.Sandbox, containerConfig *pb.ContainerConfig, specgen *generate.Generator) ([]oci.ContainerVolume, error) {
volumes := []oci.ContainerVolume{}
mounts := containerConfig.GetMounts()
for _, mount := range mounts {
dest := mount.ContainerPath
if dest == "" {
return fmt.Errorf("Mount.ContainerPath is empty")
return nil, fmt.Errorf("Mount.ContainerPath is empty")
}
src := mount.HostPath
if src == "" {
return fmt.Errorf("Mount.HostPath is empty")
return nil, fmt.Errorf("Mount.HostPath is empty")
}
if _, err := os.Stat(src); err != nil && os.IsNotExist(err) {
if err1 := os.MkdirAll(src, 0644); err1 != nil {
return fmt.Errorf("Failed to mkdir %s: %s", src, err)
return nil, fmt.Errorf("Failed to mkdir %s: %s", src, err)
}
}
@ -67,14 +69,20 @@ func addOCIBindMounts(sb *sandbox.Sandbox, containerConfig *pb.ContainerConfig,
if mount.SelinuxRelabel {
// Need a way in kubernetes to determine if the volume is shared or private
if err := label.Relabel(src, sb.MountLabel(), true); err != nil && err != unix.ENOTSUP {
return fmt.Errorf("relabel failed %s: %v", src, err)
return nil, fmt.Errorf("relabel failed %s: %v", src, err)
}
}
volumes = append(volumes, oci.ContainerVolume{
ContainerPath: dest,
HostPath: src,
Readonly: mount.Readonly,
})
specgen.AddBindMount(src, dest, options)
}
return nil
return volumes, nil
}
func addImageVolumes(rootfs string, s *Server, containerInfo *storage.ContainerInfo, specgen *generate.Generator, mountLabel string) error {
@ -326,10 +334,6 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq
return nil, err
}
if err = s.Runtime().UpdateStatus(container); err != nil {
return nil, err
}
s.addContainer(container)
if err = s.CtrIDIndex().Add(containerID); err != nil {
@ -360,10 +364,17 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
specgen.HostSpecific = true
specgen.ClearProcessRlimits()
if err := addOCIBindMounts(sb, containerConfig, &specgen); err != nil {
containerVolumes, err := addOCIBindMounts(sb, containerConfig, &specgen)
if err != nil {
return nil, err
}
volumesJSON, err := json.Marshal(containerVolumes)
if err != nil {
return nil, err
}
specgen.AddAnnotation(annotations.Volumes, string(volumesJSON))
// Add cgroup mount so container process can introspect its own limits
specgen.AddCgroupsMount("ro")
@ -565,6 +576,41 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
}
image = images[0]
// Get imageName and imageRef that are requested in container status
imageName := image
status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), image)
if err != nil {
return nil, err
}
imageRef := status.ID
//
// TODO: https://github.com/kubernetes-incubator/cri-o/issues/531
//
//for _, n := range status.Names {
//r, err := reference.ParseNormalizedNamed(n)
//if err != nil {
//return nil, fmt.Errorf("failed to normalize image name for ImageRef: %v", err)
//}
//if digested, isDigested := r.(reference.Canonical); isDigested {
//imageRef = reference.FamiliarString(digested)
//break
//}
//}
for _, n := range status.Names {
r, err := reference.ParseNormalizedNamed(n)
if err != nil {
return nil, fmt.Errorf("failed to normalize image name for Image: %v", err)
}
if tagged, isTagged := r.(reference.Tagged); isTagged {
imageName = reference.FamiliarString(tagged)
break
}
}
specgen.AddAnnotation(annotations.ImageName, imageName)
specgen.AddAnnotation(annotations.ImageRef, imageRef)
// bind mount the pod shm
specgen.AddBindMount(sb.ShmPath(), "/dev/shm", []string{"rw"})
@ -727,11 +773,15 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
return nil, err
}
container, err := oci.NewContainer(containerID, containerName, containerInfo.RunDir, logPath, sb.NetNs(), labels, kubeAnnotations, image, metadata, sb.ID(), containerConfig.Tty, containerConfig.Stdin, containerConfig.StdinOnce, sb.Privileged(), sb.Trusted(), containerInfo.Dir, created, containerImageConfig.Config.StopSignal)
container, err := oci.NewContainer(containerID, containerName, containerInfo.RunDir, logPath, sb.NetNs(), labels, kubeAnnotations, image, imageName, imageRef, metadata, sb.ID(), containerConfig.Tty, containerConfig.Stdin, containerConfig.StdinOnce, sb.Privileged(), sb.Trusted(), containerInfo.Dir, created, containerImageConfig.Config.StopSignal)
if err != nil {
return nil, err
}
for _, cv := range containerVolumes {
container.AddVolume(cv)
}
return container, nil
}

View file

@ -20,10 +20,6 @@ func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerReq
return nil, err
}
if err := s.Runtime().UpdateStatus(c); err != nil {
return nil, fmt.Errorf("failed to update container state: %v", err)
}
cState := s.Runtime().ContainerStatus(c)
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
if err := s.Runtime().StopContainer(c, -1); err != nil {

View file

@ -1,12 +1,7 @@
package server
import (
"encoding/json"
"fmt"
"github.com/docker/distribution/reference"
"github.com/kubernetes-incubator/cri-o/oci"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
@ -26,11 +21,6 @@ func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusReq
return nil, err
}
if err = s.Runtime().UpdateStatus(c); err != nil {
return nil, err
}
s.ContainerStateToDisk(c)
containerID := c.ID()
resp := &pb.ContainerStatusResponse{
Status: &pb.ContainerStatus{
@ -38,52 +28,35 @@ func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusReq
Metadata: c.Metadata(),
Labels: c.Labels(),
Annotations: c.Annotations(),
ImageRef: c.ImageRef(),
},
}
resp.Status.Image = &pb.ImageSpec{Image: c.ImageName()}
mounts, err := s.getMounts(containerID)
if err != nil {
return nil, err
mounts := []*pb.Mount{}
for _, cv := range c.Volumes() {
mounts = append(mounts, &pb.Mount{
ContainerPath: cv.ContainerPath,
HostPath: cv.HostPath,
Readonly: cv.Readonly,
})
}
resp.Status.Mounts = mounts
imageName := c.Image()
status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), imageName)
if err != nil {
return nil, err
}
imageRef := status.ID
//
// TODO: https://github.com/kubernetes-incubator/cri-o/issues/531
//
//for _, n := range status.Names {
//r, err := reference.ParseNormalizedNamed(n)
//if err != nil {
//return nil, fmt.Errorf("failed to normalize image name for ImageRef: %v", err)
//}
//if digested, isDigested := r.(reference.Canonical); isDigested {
//imageRef = reference.FamiliarString(digested)
//break
//}
//}
resp.Status.ImageRef = imageRef
for _, n := range status.Names {
r, err := reference.ParseNormalizedNamed(n)
if err != nil {
return nil, fmt.Errorf("failed to normalize image name for Image: %v", err)
}
if tagged, isTagged := r.(reference.Tagged); isTagged {
imageName = reference.FamiliarString(tagged)
break
}
}
resp.Status.Image = &pb.ImageSpec{Image: imageName}
cState := s.Runtime().ContainerStatus(c)
rStatus := pb.ContainerState_CONTAINER_UNKNOWN
// If we defaulted to exit code -1 earlier then we attempt to
// get the exit code from the exit file again.
// TODO: We could wait in UpdateStatus for exit file to show up.
if cState.ExitCode == -1 {
err := s.Runtime().UpdateStatus(c)
if err != nil {
logrus.Warnf("Failed to UpdateStatus of container %s: %v", c.ID(), err)
}
cState = s.Runtime().ContainerStatus(c)
}
switch cState.Status {
case oci.ContainerStateCreated:
rStatus = pb.ContainerState_CONTAINER_CREATED
@ -120,46 +93,3 @@ func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusReq
logrus.Debugf("ContainerStatusResponse: %+v", resp)
return resp, nil
}
func (s *Server) getMounts(id string) ([]*pb.Mount, error) {
config, err := s.Store().FromContainerDirectory(id, "config.json")
if err != nil {
return nil, err
}
var m rspec.Spec
if err = json.Unmarshal(config, &m); err != nil {
return nil, err
}
isRO := func(m rspec.Mount) bool {
var ro bool
for _, o := range m.Options {
if o == "ro" {
ro = true
break
}
}
return ro
}
isBind := func(m rspec.Mount) bool {
var bind bool
for _, o := range m.Options {
if o == "bind" || o == "rbind" {
bind = true
break
}
}
return bind
}
mounts := []*pb.Mount{}
for _, b := range m.Mounts {
if !isBind(b) {
continue
}
mounts = append(mounts, &pb.Mount{
ContainerPath: b.Destination,
HostPath: b.Source,
Readonly: isRO(b),
})
}
return mounts, nil
}

View file

@ -17,9 +17,6 @@ func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest
return nil, err
}
if err := s.Runtime().UpdateStatus(c); err != nil {
return nil, err
}
cStatus := s.Runtime().ContainerStatus(c)
if cStatus.Status != oci.ContainerStateStopped {
if err := s.Runtime().StopContainer(c, req.Timeout); err != nil {
@ -33,6 +30,6 @@ func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest
s.ContainerStateToDisk(c)
resp := &pb.StopContainerResponse{}
logrus.Debugf("StopContainerResponse: %+v", resp)
logrus.Debugf("StopContainerResponse %s: %+v", c.ID(), resp)
return resp, nil
}

View file

@ -60,10 +60,6 @@ func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxReque
// it's better not to panic
continue
}
if err := s.Runtime().UpdateStatus(podInfraContainer); err != nil {
return nil, err
}
cState := s.Runtime().ContainerStatus(podInfraContainer)
created := cState.Created.UnixNano()
rStatus := pb.PodSandboxState_SANDBOX_NOTREADY

View file

@ -38,10 +38,7 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
// Delete all the containers in the sandbox
for _, c := range containers {
if err := s.Runtime().UpdateStatus(c); err != nil {
return nil, fmt.Errorf("failed to update container state: %v", err)
}
if !sb.Stopped() {
cState := s.Runtime().ContainerStatus(c)
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
if err := s.Runtime().StopContainer(c, -1); err != nil {
@ -49,6 +46,7 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
logrus.Warnf("failed to stop container %s: %v", c.Name(), err)
}
}
}
if err := s.Runtime().DeleteContainer(c); err != nil {
return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.ID(), err)

View file

@ -78,19 +78,9 @@ func (s *Server) runContainer(container *oci.Container, cgroupParent string) err
if err := s.Runtime().CreateContainer(container, cgroupParent); err != nil {
return err
}
if err := s.Runtime().UpdateStatus(container); err != nil {
return err
}
if err := s.Runtime().StartContainer(container); err != nil {
return err
}
if err := s.Runtime().UpdateStatus(container); err != nil {
return err
}
return nil
}
@ -461,7 +451,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
return nil, fmt.Errorf("failed to write runtime configuration for pod sandbox %s(%s): %v", sb.Name(), id, err)
}
container, err := oci.NewContainer(id, containerName, podContainer.RunDir, logPath, sb.NetNs(), labels, kubeAnnotations, "", nil, id, false, false, false, sb.Privileged(), sb.Trusted(), podContainer.Dir, created, podContainer.Config.Config.StopSignal)
container, err := oci.NewContainer(id, containerName, podContainer.RunDir, logPath, sb.NetNs(), labels, kubeAnnotations, "", "", "", nil, id, false, false, false, sb.Privileged(), sb.Trusted(), podContainer.Dir, created, podContainer.Config.Config.StopSignal)
if err != nil {
return nil, err
}

View file

@ -16,11 +16,6 @@ func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusR
}
podInfraContainer := sb.InfraContainer()
if err = s.Runtime().UpdateStatus(podInfraContainer); err != nil {
return nil, err
}
s.ContainerStateToDisk(podInfraContainer)
cState := s.Runtime().ContainerStatus(podInfraContainer)
netNsPath, err := podInfraContainer.NetNsPath()

View file

@ -34,6 +34,13 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque
resp := &pb.StopPodSandboxResponse{}
logrus.Warnf("could not get sandbox %s, it's probably been stopped already: %v", req.PodSandboxId, err)
logrus.Debugf("StopPodSandboxResponse %s: %+v", req.PodSandboxId, resp)
return resp, nil
}
if sb.Stopped() {
resp := &pb.StopPodSandboxResponse{}
logrus.Debugf("StopPodSandboxResponse %s: %+v", sb.ID(), resp)
return resp, nil
}
@ -70,9 +77,6 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque
containers = append(containers, podInfraContainer)
for _, c := range containers {
if err := s.Runtime().UpdateStatus(c); err != nil {
return nil, err
}
cStatus := s.Runtime().ContainerStatus(c)
if cStatus.Status != oci.ContainerStateStopped {
if err := s.Runtime().StopContainer(c, -1); err != nil {
@ -113,8 +117,9 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque
logrus.Warnf("failed to stop sandbox container in pod sandbox %s: %v", sb.ID(), err)
}
sb.SetStopped()
resp := &pb.StopPodSandboxResponse{}
logrus.Debugf("StopPodSandboxResponse: %+v", resp)
logrus.Debugf("StopPodSandboxResponse %s: %+v", sb.ID(), resp)
return resp, nil
}

View file

@ -313,15 +313,28 @@ func (s *Server) StartExitMonitor() {
logrus.Debugf("event: %v", event)
if event.Op&fsnotify.Create == fsnotify.Create {
containerID := filepath.Base(event.Name)
logrus.Debugf("container exited: %v", containerID)
logrus.Debugf("container or sandbox exited: %v", containerID)
c := s.GetContainer(containerID)
if c != nil {
logrus.Debugf("container exited and found: %v", containerID)
err := s.Runtime().UpdateStatus(c)
if err != nil {
logrus.Warnf("Failed to update container status %s: %v", c, err)
} else {
s.ContainerStateToDisk(c)
}
} else {
sb := s.GetSandbox(containerID)
if sb != nil {
c := sb.InfraContainer()
logrus.Debugf("sandbox exited and found: %v", containerID)
err := s.Runtime().UpdateStatus(c)
if err != nil {
logrus.Warnf("Failed to update sandbox infra container status %s: %v", c, err)
} else {
s.ContainerStateToDisk(c)
}
}
}
}
case err := <-watcher.Errors: