Need to cleanup all pods on service poweroff
When powering off the system, we want the ocid service, to shutdown all containers running on the system so they can cleanup properly This patch will cleanup all pods on poweroff. The ocid-shutdown.service drops a file /var/run/ocid.shutdown when the system is shutting down. The ocid-shutdown.service should only be executed at system shutdown. On bootup sequence should be start ocid.service start ocid-shutdown.service (This is a NO-OP) On system shutdown stop ocid-shutdown.service (Creates /var/run/ocid.shutdown) stop ocid.service (Notices /var/run/ocid.service and stops all pods before exiting.) Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
parent
7d7570e604
commit
54ee55493d
4 changed files with 48 additions and 1 deletions
|
@ -40,7 +40,8 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
|
|||
cState := s.runtime.ContainerStatus(c)
|
||||
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
|
||||
if err := s.runtime.StopContainer(c); err != nil {
|
||||
return nil, fmt.Errorf("failed to stop container %s: %v", c.Name(), err)
|
||||
// Assume container is already stopped
|
||||
logrus.Warnf("failed to stop container %s: %v", c.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,3 +108,17 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
|
|||
logrus.Debugf("RemovePodSandboxResponse %+v", resp)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// RemoveAllPodSandboxes removes all pod sandboxes
|
||||
func (s *Server) RemoveAllPodSandboxes() {
|
||||
logrus.Debugf("RemoveAllPodSandboxes")
|
||||
s.Update()
|
||||
for _, sb := range s.state.sandboxes {
|
||||
pod := &pb.RemovePodSandboxRequest{
|
||||
PodSandboxId: sb.id,
|
||||
}
|
||||
if _, err := s.RemovePodSandbox(nil, pod); err != nil {
|
||||
logrus.Warnf("could not RemovePodSandbox %s: %v", sb.id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue