Merge pull request #584 from runcom/start-failed

adjust status on container start failure
This commit is contained in:
Mrunal Patel 2017-06-12 07:26:22 -07:00 committed by GitHub
commit 2a6db15113
8 changed files with 163 additions and 12 deletions

View file

@ -10,6 +10,7 @@ import (
"syscall"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/pools"
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/kubernetes-incubator/cri-o/utils"
"golang.org/x/net/context"
@ -108,7 +109,7 @@ func redirectResponseToOutputStream(tty bool, outputStream, errorStream io.Write
}
var err error
if tty {
_, err = io.Copy(outputStream, conn)
_, err = pools.Copy(outputStream, conn)
} else {
// TODO
}

View file

@ -7,6 +7,7 @@ import (
"os/exec"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/pools"
"github.com/kubernetes-incubator/cri-o/oci"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
@ -67,11 +68,11 @@ func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader,
})
if stdin != nil {
go io.Copy(p, stdin)
go pools.Copy(p, stdin)
}
if stdout != nil {
go io.Copy(stdout, p)
go pools.Copy(stdout, p)
}
cmdErr = execCmd.Wait()
@ -85,7 +86,7 @@ func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader,
if err != nil {
return err
}
go io.Copy(w, stdin)
go pools.Copy(w, stdin)
execCmd.Stdin = r
}

View file

@ -8,6 +8,7 @@ import (
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/pools"
"github.com/kubernetes-incubator/cri-o/oci"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
@ -78,7 +79,7 @@ func (ss streamService) PortForward(podSandboxID string, port int32, stream io.R
return fmt.Errorf("unable to do port forwarding: error creating stdin pipe: %v", err)
}
go func() {
io.Copy(inPipe, stream)
pools.Copy(inPipe, stream)
inPipe.Close()
}()

View file

@ -4,6 +4,7 @@ import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/kubernetes-incubator/cri-o/oci"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
@ -15,12 +16,26 @@ func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerReque
if err != nil {
return nil, err
}
if err = s.runtime.StartContainer(c); err != nil {
return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err)
state := s.runtime.ContainerStatus(c)
if state.Status != oci.ContainerStateCreated {
return nil, fmt.Errorf("container %s is not in created state: %s", c.ID(), state.Status)
}
s.containerStateToDisk(c)
defer func() {
// if the call to StartContainer fails below we still want to fill
// some fields of a container status. In particular, we're going to
// adjust container started/finished time and set an error to be
// returned in the Reason field for container status call.
if err != nil {
s.runtime.SetStartFailed(c, err)
}
s.containerStateToDisk(c)
}()
err = s.runtime.StartContainer(c)
if err != nil {
return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err)
}
resp := &pb.StartContainerResponse{}
logrus.Debugf("StartContainerResponse %+v", resp)

View file

@ -12,6 +12,12 @@ import (
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
const (
oomKilledReason = "OOMKilled"
completedReason = "Completed"
errorReason = "Error"
)
// ContainerStatus returns status of the container.
func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) {
logrus.Debugf("ContainerStatusRequest %+v", req)
@ -100,11 +106,12 @@ func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusReq
resp.Status.ExitCode = cState.ExitCode
switch {
case cState.OOMKilled:
resp.Status.Reason = "OOMKilled"
resp.Status.Reason = oomKilledReason
case cState.ExitCode == 0:
resp.Status.Reason = "Completed"
resp.Status.Reason = completedReason
default:
resp.Status.Reason = "Error"
resp.Status.Reason = errorReason
resp.Status.Message = cState.Error
}
}