1391c5c2fd
CRI-O works well with runc when stopping a container because as soon as the container process returns, it can consider every container resources such as its rootfs as being freed, and it can proceed further by unmounting it. But in case of virtualized runtime such as Clear Containers or Kata Containers, the same rootfs is being mounted into the VM, usually as a device being hotplugged. This means the runtime will need to be triggered after the container process has returned. Particularly, such runtimes should expect a call into "state" in order to realize the container process is not running anymore, and it would trigger the container to be officially stopped, proceeding to the necessary unmounts. The way this can be done from CRI-O, without impacting the case of runc, is to explicitly wait for the container status to be updated into "stopped" after the container process has returned. This way CRI-O will call into "state" as long as it cannot see the container status being updated properly, generating an error after a timeout. Both PollUpdateStatusStopped() and WaitContainerStateStopped() make use of go routines in order to support a timeout definition. They follow the waitContainerStop() approach with chControl. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
126 lines
4.1 KiB
Go
126 lines
4.1 KiB
Go
package server
|
|
|
|
import (
|
|
"fmt"
|
|
"time"
|
|
|
|
"github.com/containers/storage"
|
|
"github.com/docker/docker/pkg/mount"
|
|
"github.com/docker/docker/pkg/symlink"
|
|
"github.com/kubernetes-incubator/cri-o/lib/sandbox"
|
|
"github.com/kubernetes-incubator/cri-o/oci"
|
|
"github.com/opencontainers/selinux/go-selinux/label"
|
|
"github.com/pkg/errors"
|
|
"github.com/sirupsen/logrus"
|
|
"golang.org/x/net/context"
|
|
"golang.org/x/sys/unix"
|
|
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
|
)
|
|
|
|
// StopPodSandbox stops the sandbox. If there are any running containers in the
|
|
// sandbox, they should be force terminated.
|
|
func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (resp *pb.StopPodSandboxResponse, err error) {
|
|
const operation = "stop_pod_sandbox"
|
|
defer func() {
|
|
recordOperation(operation, time.Now())
|
|
recordError(operation, err)
|
|
}()
|
|
|
|
logrus.Debugf("StopPodSandboxRequest %+v", req)
|
|
sb, err := s.getPodSandboxFromRequest(req.PodSandboxId)
|
|
if err != nil {
|
|
if err == sandbox.ErrIDEmpty {
|
|
return nil, err
|
|
}
|
|
|
|
// If the sandbox isn't found we just return an empty response to adhere
|
|
// the the CRI interface which expects to not error out in not found
|
|
// cases.
|
|
|
|
resp = &pb.StopPodSandboxResponse{}
|
|
logrus.Warnf("could not get sandbox %s, it's probably been stopped already: %v", req.PodSandboxId, err)
|
|
logrus.Debugf("StopPodSandboxResponse %s: %+v", req.PodSandboxId, resp)
|
|
return resp, nil
|
|
}
|
|
|
|
if sb.Stopped() {
|
|
resp = &pb.StopPodSandboxResponse{}
|
|
logrus.Debugf("StopPodSandboxResponse %s: %+v", sb.ID(), resp)
|
|
return resp, nil
|
|
}
|
|
|
|
podInfraContainer := sb.InfraContainer()
|
|
containers := sb.Containers().List()
|
|
containers = append(containers, podInfraContainer)
|
|
|
|
for _, c := range containers {
|
|
cStatus := s.Runtime().ContainerStatus(c)
|
|
if cStatus.Status != oci.ContainerStateStopped {
|
|
timeout := int64(10)
|
|
if err := s.Runtime().StopContainer(ctx, c, timeout); err != nil {
|
|
return nil, fmt.Errorf("failed to stop container %s in pod sandbox %s: %v", c.Name(), sb.ID(), err)
|
|
}
|
|
if err := s.Runtime().WaitContainerStateStopped(ctx, c, timeout); err != nil {
|
|
return nil, fmt.Errorf("failed to get container 'stopped' status %s in pod sandbox %s: %v", c.Name(), sb.ID(), err)
|
|
}
|
|
if c.ID() == podInfraContainer.ID() {
|
|
continue
|
|
}
|
|
if err := s.StorageRuntimeServer().StopContainer(c.ID()); err != nil && errors.Cause(err) != storage.ErrContainerUnknown {
|
|
// assume container already umounted
|
|
logrus.Warnf("failed to stop container %s in pod sandbox %s: %v", c.Name(), sb.ID(), err)
|
|
}
|
|
}
|
|
s.ContainerStateToDisk(c)
|
|
}
|
|
|
|
// Clean up sandbox networking and close its network namespace.
|
|
hostNetwork := sb.NetNsPath() == ""
|
|
s.networkStop(hostNetwork, sb)
|
|
if err := sb.NetNsRemove(); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if err := label.ReleaseLabel(sb.ProcessLabel()); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// unmount the shm for the pod
|
|
if sb.ShmPath() != "/dev/shm" {
|
|
// we got namespaces in the form of
|
|
// /var/run/containers/storage/overlay-containers/CID/userdata/shm
|
|
// but /var/run on most system is symlinked to /run so we first resolve
|
|
// the symlink and then try and see if it's mounted
|
|
fp, err := symlink.FollowSymlinkInScope(sb.ShmPath(), "/")
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if mounted, err := mount.Mounted(fp); err == nil && mounted {
|
|
if err := unix.Unmount(fp, unix.MNT_DETACH); err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
}
|
|
|
|
if err := s.StorageRuntimeServer().StopContainer(sb.ID()); err != nil && errors.Cause(err) != storage.ErrContainerUnknown {
|
|
logrus.Warnf("failed to stop sandbox container in pod sandbox %s: %v", sb.ID(), err)
|
|
}
|
|
|
|
sb.SetStopped()
|
|
resp = &pb.StopPodSandboxResponse{}
|
|
logrus.Debugf("StopPodSandboxResponse %s: %+v", sb.ID(), resp)
|
|
return resp, nil
|
|
}
|
|
|
|
// StopAllPodSandboxes removes all pod sandboxes
|
|
func (s *Server) StopAllPodSandboxes() {
|
|
logrus.Debugf("StopAllPodSandboxes")
|
|
for _, sb := range s.ContainerServer.ListSandboxes() {
|
|
pod := &pb.StopPodSandboxRequest{
|
|
PodSandboxId: sb.ID(),
|
|
}
|
|
if _, err := s.StopPodSandbox(nil, pod); err != nil {
|
|
logrus.Warnf("could not StopPodSandbox %s: %v", sb.ID(), err)
|
|
}
|
|
}
|
|
}
|