2016-07-08 19:04:00 +00:00
|
|
|
package server
|
|
|
|
|
2016-07-29 22:35:10 +00:00
|
|
|
import (
|
2016-09-18 14:35:17 +00:00
|
|
|
"encoding/json"
|
2016-08-01 23:50:45 +00:00
|
|
|
"fmt"
|
2016-09-18 14:35:17 +00:00
|
|
|
"io/ioutil"
|
2017-06-03 16:17:57 +00:00
|
|
|
"net"
|
2016-08-04 14:34:30 +00:00
|
|
|
"os"
|
2016-10-07 15:59:39 +00:00
|
|
|
"path/filepath"
|
2016-09-17 14:10:35 +00:00
|
|
|
"sync"
|
2017-05-11 09:04:43 +00:00
|
|
|
"time"
|
2016-08-01 23:50:45 +00:00
|
|
|
|
2016-09-18 14:35:17 +00:00
|
|
|
"github.com/Sirupsen/logrus"
|
2016-10-18 14:48:33 +00:00
|
|
|
"github.com/containers/image/types"
|
2017-05-17 17:18:35 +00:00
|
|
|
sstorage "github.com/containers/storage"
|
2017-05-11 10:10:18 +00:00
|
|
|
"github.com/docker/docker/pkg/ioutils"
|
2016-09-20 08:27:11 +00:00
|
|
|
"github.com/docker/docker/pkg/registrar"
|
|
|
|
"github.com/docker/docker/pkg/truncindex"
|
2016-09-26 23:55:12 +00:00
|
|
|
"github.com/kubernetes-incubator/cri-o/oci"
|
2017-06-01 16:40:33 +00:00
|
|
|
"github.com/kubernetes-incubator/cri-o/pkg/annotations"
|
2017-01-19 21:36:21 +00:00
|
|
|
"github.com/kubernetes-incubator/cri-o/pkg/ocicni"
|
2016-10-18 14:48:33 +00:00
|
|
|
"github.com/kubernetes-incubator/cri-o/pkg/storage"
|
2016-11-29 12:34:15 +00:00
|
|
|
"github.com/kubernetes-incubator/cri-o/server/apparmor"
|
2016-11-23 09:41:48 +00:00
|
|
|
"github.com/kubernetes-incubator/cri-o/server/seccomp"
|
2016-09-20 08:27:11 +00:00
|
|
|
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
2017-03-22 17:58:35 +00:00
|
|
|
"github.com/opencontainers/selinux/go-selinux/label"
|
2017-06-03 16:17:57 +00:00
|
|
|
knet "k8s.io/apimachinery/pkg/util/net"
|
2016-10-26 11:23:53 +00:00
|
|
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
2017-06-15 20:56:17 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
|
2017-02-08 13:56:20 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
|
2017-06-16 21:22:20 +00:00
|
|
|
iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables"
|
|
|
|
utildbus "k8s.io/kubernetes/pkg/util/dbus"
|
|
|
|
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
|
|
|
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
2016-07-29 22:35:10 +00:00
|
|
|
)
|
|
|
|
|
2016-07-19 18:53:57 +00:00
|
|
|
const (
|
|
|
|
runtimeAPIVersion = "v1alpha1"
|
2017-05-12 13:36:15 +00:00
|
|
|
shutdownFile = "/var/lib/crio/crio.shutdown"
|
2016-07-08 19:04:00 +00:00
|
|
|
)
|
|
|
|
|
2017-06-08 20:08:29 +00:00
|
|
|
func isTrue(annotaton string) bool {
|
|
|
|
return annotaton == "true"
|
|
|
|
}
|
|
|
|
|
2017-02-08 13:56:20 +00:00
|
|
|
// streamService implements streaming.Runtime.
|
|
|
|
type streamService struct {
|
|
|
|
runtimeServer *Server // needed by Exec() endpoint
|
|
|
|
streamServer streaming.Server
|
|
|
|
streaming.Runtime
|
|
|
|
}
|
|
|
|
|
2016-07-19 18:53:57 +00:00
|
|
|
// Server implements the RuntimeService and ImageService
|
|
|
|
type Server struct {
|
2017-04-19 19:17:10 +00:00
|
|
|
config Config
|
|
|
|
runtime *oci.Runtime
|
|
|
|
store sstorage.Store
|
|
|
|
storageImageServer storage.ImageServer
|
|
|
|
storageRuntimeServer storage.RuntimeServer
|
|
|
|
stateLock sync.Mutex
|
|
|
|
updateLock sync.RWMutex
|
|
|
|
state *serverState
|
|
|
|
netPlugin ocicni.CNIPlugin
|
2017-06-15 20:56:17 +00:00
|
|
|
hostportManager hostport.HostPortManager
|
2017-04-19 19:17:10 +00:00
|
|
|
podNameIndex *registrar.Registrar
|
|
|
|
podIDIndex *truncindex.TruncIndex
|
|
|
|
ctrNameIndex *registrar.Registrar
|
|
|
|
ctrIDIndex *truncindex.TruncIndex
|
|
|
|
imageContext *types.SystemContext
|
2016-11-23 09:41:48 +00:00
|
|
|
|
|
|
|
seccompEnabled bool
|
|
|
|
seccompProfile seccomp.Seccomp
|
2016-11-29 12:34:15 +00:00
|
|
|
|
|
|
|
appArmorEnabled bool
|
2016-11-30 08:19:36 +00:00
|
|
|
appArmorProfile string
|
2017-02-08 13:56:20 +00:00
|
|
|
|
|
|
|
stream streamService
|
2016-07-08 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
2017-02-08 14:11:52 +00:00
|
|
|
// GetExec returns exec stream request
|
|
|
|
func (s *Server) GetExec(req *pb.ExecRequest) (*pb.ExecResponse, error) {
|
|
|
|
return s.stream.streamServer.GetExec(req)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetAttach returns attach stream request
|
|
|
|
func (s *Server) GetAttach(req *pb.AttachRequest) (*pb.AttachResponse, error) {
|
|
|
|
return s.stream.streamServer.GetAttach(req)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetPortForward returns port forward stream request
|
|
|
|
func (s *Server) GetPortForward(req *pb.PortForwardRequest) (*pb.PortForwardResponse, error) {
|
|
|
|
return s.stream.streamServer.GetPortForward(req)
|
|
|
|
}
|
|
|
|
|
2016-10-07 14:20:04 +00:00
|
|
|
func (s *Server) loadContainer(id string) error {
|
2017-05-17 17:18:35 +00:00
|
|
|
config, err := s.store.FromContainerDirectory(id, "config.json")
|
2016-10-07 14:20:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
var m rspec.Spec
|
|
|
|
if err = json.Unmarshal(config, &m); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
labels := make(map[string]string)
|
2017-06-01 16:40:33 +00:00
|
|
|
if err = json.Unmarshal([]byte(m.Annotations[annotations.Labels]), &labels); err != nil {
|
2016-10-07 14:20:04 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-06-01 16:40:33 +00:00
|
|
|
name := m.Annotations[annotations.Name]
|
2016-10-07 14:20:04 +00:00
|
|
|
name, err = s.reserveContainerName(id, name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-22 10:26:19 +00:00
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
s.releaseContainerName(name)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2016-10-26 11:23:53 +00:00
|
|
|
var metadata pb.ContainerMetadata
|
2017-06-01 16:40:33 +00:00
|
|
|
if err = json.Unmarshal([]byte(m.Annotations[annotations.Metadata]), &metadata); err != nil {
|
2016-10-26 11:23:53 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-06-01 16:40:33 +00:00
|
|
|
sb := s.getSandbox(m.Annotations[annotations.SandboxID])
|
2016-10-07 14:20:04 +00:00
|
|
|
if sb == nil {
|
2017-06-01 16:40:33 +00:00
|
|
|
return fmt.Errorf("could not get sandbox with id %s, skipping", m.Annotations[annotations.SandboxID])
|
2016-10-07 14:20:04 +00:00
|
|
|
}
|
|
|
|
|
2017-06-08 20:08:29 +00:00
|
|
|
tty := isTrue(m.Annotations[annotations.TTY])
|
|
|
|
stdin := isTrue(m.Annotations[annotations.Stdin])
|
|
|
|
stdinOnce := isTrue(m.Annotations[annotations.StdinOnce])
|
|
|
|
|
2017-05-17 17:18:35 +00:00
|
|
|
containerPath, err := s.store.ContainerRunDirectory(id)
|
2016-10-18 14:48:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-10-07 14:20:04 +00:00
|
|
|
|
2017-05-11 10:10:18 +00:00
|
|
|
containerDir, err := s.store.ContainerDirectory(id)
|
2017-05-11 09:12:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-12 10:12:03 +00:00
|
|
|
var img *pb.ImageSpec
|
2017-06-01 16:40:33 +00:00
|
|
|
image, ok := m.Annotations[annotations.Image]
|
2016-12-12 10:12:03 +00:00
|
|
|
if ok {
|
|
|
|
img = &pb.ImageSpec{
|
2017-02-03 14:41:28 +00:00
|
|
|
Image: image,
|
2016-12-12 10:12:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-01 16:40:33 +00:00
|
|
|
kubeAnnotations := make(map[string]string)
|
|
|
|
if err = json.Unmarshal([]byte(m.Annotations[annotations.Annotations]), &kubeAnnotations); err != nil {
|
2016-12-12 10:12:03 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-01 16:40:33 +00:00
|
|
|
created, err := time.Parse(time.RFC3339Nano, m.Annotations[annotations.Created])
|
2017-05-11 09:22:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-02 21:15:19 +00:00
|
|
|
ctr, err := oci.NewContainer(id, name, containerPath, m.Annotations[annotations.LogPath], sb.netNs(), labels, kubeAnnotations, img, &metadata, sb.id, tty, stdin, stdinOnce, sb.privileged, sb.trusted, containerDir, created, m.Annotations["org.opencontainers.image.stopSignal"])
|
2016-10-07 14:20:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-11 10:10:18 +00:00
|
|
|
|
|
|
|
s.containerStateFromDisk(ctr)
|
|
|
|
|
2016-12-22 10:26:19 +00:00
|
|
|
s.addContainer(ctr)
|
2017-03-27 17:14:11 +00:00
|
|
|
return s.ctrIDIndex.Add(id)
|
2016-10-07 14:20:04 +00:00
|
|
|
}
|
|
|
|
|
2017-05-11 10:10:18 +00:00
|
|
|
func (s *Server) containerStateFromDisk(c *oci.Container) error {
|
|
|
|
if err := c.FromDisk(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// ignore errors, this is a best effort to have up-to-date info about
|
|
|
|
// a given container before its state gets stored
|
|
|
|
s.runtime.UpdateStatus(c)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-11 10:03:59 +00:00
|
|
|
func (s *Server) containerStateToDisk(c *oci.Container) error {
|
|
|
|
// ignore errors, this is a best effort to have up-to-date info about
|
|
|
|
// a given container before its state gets stored
|
|
|
|
s.runtime.UpdateStatus(c)
|
|
|
|
|
|
|
|
jsonSource, err := ioutils.NewAtomicFileWriter(c.StatePath(), 0644)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer jsonSource.Close()
|
|
|
|
enc := json.NewEncoder(jsonSource)
|
|
|
|
return enc.Encode(s.runtime.ContainerStatus(c))
|
|
|
|
}
|
|
|
|
|
2016-11-23 17:16:21 +00:00
|
|
|
func configNetNsPath(spec rspec.Spec) (string, error) {
|
|
|
|
for _, ns := range spec.Linux.Namespaces {
|
|
|
|
if ns.Type != rspec.NetworkNamespace {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if ns.Path == "" {
|
|
|
|
return "", fmt.Errorf("empty networking namespace")
|
|
|
|
}
|
|
|
|
|
|
|
|
return ns.Path, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return "", fmt.Errorf("missing networking namespace")
|
|
|
|
}
|
|
|
|
|
2016-09-19 07:39:13 +00:00
|
|
|
func (s *Server) loadSandbox(id string) error {
|
2017-05-17 17:18:35 +00:00
|
|
|
config, err := s.store.FromContainerDirectory(id, "config.json")
|
2016-09-19 07:39:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-09-20 08:27:11 +00:00
|
|
|
var m rspec.Spec
|
|
|
|
if err = json.Unmarshal(config, &m); err != nil {
|
2016-09-19 07:39:13 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-09-20 08:27:11 +00:00
|
|
|
labels := make(map[string]string)
|
2017-06-01 16:40:33 +00:00
|
|
|
if err = json.Unmarshal([]byte(m.Annotations[annotations.Labels]), &labels); err != nil {
|
2016-09-20 08:27:11 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-06-01 16:40:33 +00:00
|
|
|
name := m.Annotations[annotations.Name]
|
2016-09-20 08:16:59 +00:00
|
|
|
name, err = s.reservePodName(id, name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-22 10:26:19 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
s.releasePodName(name)
|
|
|
|
}
|
|
|
|
}()
|
2016-10-26 11:23:53 +00:00
|
|
|
var metadata pb.PodSandboxMetadata
|
2017-06-01 16:40:33 +00:00
|
|
|
if err = json.Unmarshal([]byte(m.Annotations[annotations.Metadata]), &metadata); err != nil {
|
2016-10-26 11:23:53 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-10-05 13:29:30 +00:00
|
|
|
|
|
|
|
processLabel, mountLabel, err := label.InitLabels(label.DupSecOpt(m.Process.SelinuxLabel))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-10-24 12:01:01 +00:00
|
|
|
|
2017-06-01 16:40:33 +00:00
|
|
|
kubeAnnotations := make(map[string]string)
|
|
|
|
if err = json.Unmarshal([]byte(m.Annotations[annotations.Annotations]), &kubeAnnotations); err != nil {
|
2016-11-01 05:49:21 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-08 20:08:29 +00:00
|
|
|
privileged := isTrue(m.Annotations[annotations.PrivilegedRuntime])
|
2017-06-02 21:15:19 +00:00
|
|
|
trusted := isTrue(m.Annotations[annotations.TrustedSandbox])
|
2017-02-21 17:19:06 +00:00
|
|
|
|
2016-10-24 12:01:01 +00:00
|
|
|
sb := &sandbox{
|
2016-10-05 13:29:30 +00:00
|
|
|
id: id,
|
|
|
|
name: name,
|
2017-06-01 16:40:33 +00:00
|
|
|
kubeName: m.Annotations[annotations.KubeName],
|
|
|
|
logDir: filepath.Dir(m.Annotations[annotations.LogPath]),
|
2016-10-05 13:29:30 +00:00
|
|
|
labels: labels,
|
|
|
|
containers: oci.NewMemoryStore(),
|
|
|
|
processLabel: processLabel,
|
|
|
|
mountLabel: mountLabel,
|
2017-06-01 16:40:33 +00:00
|
|
|
annotations: kubeAnnotations,
|
2016-10-26 11:23:53 +00:00
|
|
|
metadata: &metadata,
|
2017-06-01 16:40:33 +00:00
|
|
|
shmPath: m.Annotations[annotations.ShmPath],
|
2017-02-21 17:19:06 +00:00
|
|
|
privileged: privileged,
|
2017-06-02 21:15:19 +00:00
|
|
|
trusted: trusted,
|
2017-06-01 16:40:33 +00:00
|
|
|
resolvPath: m.Annotations[annotations.ResolvPath],
|
2016-10-24 12:01:01 +00:00
|
|
|
}
|
2016-11-23 17:16:21 +00:00
|
|
|
|
|
|
|
// We add a netNS only if we can load a permanent one.
|
|
|
|
// Otherwise, the sandbox will live in the host namespace.
|
|
|
|
netNsPath, err := configNetNsPath(m)
|
|
|
|
if err == nil {
|
2016-12-12 15:37:03 +00:00
|
|
|
netNS, nsErr := netNsGet(netNsPath, sb.name)
|
2016-11-23 17:16:21 +00:00
|
|
|
// If we can't load the networking namespace
|
|
|
|
// because it's closed, we just set the sb netns
|
|
|
|
// pointer to nil. Otherwise we return an error.
|
|
|
|
if nsErr != nil && nsErr != errSandboxClosedNetNS {
|
|
|
|
return nsErr
|
|
|
|
}
|
|
|
|
|
|
|
|
sb.netns = netNS
|
|
|
|
}
|
|
|
|
|
2016-10-24 12:01:01 +00:00
|
|
|
s.addSandbox(sb)
|
|
|
|
|
2016-12-22 10:26:19 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
s.removeSandbox(sb.id)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2017-05-17 17:18:35 +00:00
|
|
|
sandboxPath, err := s.store.ContainerRunDirectory(id)
|
2016-10-18 14:48:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-10-05 13:29:30 +00:00
|
|
|
|
2017-05-11 10:10:18 +00:00
|
|
|
sandboxDir, err := s.store.ContainerDirectory(id)
|
2017-05-11 09:12:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-01 16:40:33 +00:00
|
|
|
cname, err := s.reserveContainerName(m.Annotations[annotations.ContainerID], m.Annotations[annotations.ContainerName])
|
2016-10-07 14:20:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-22 10:26:19 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
s.releaseContainerName(cname)
|
|
|
|
}
|
|
|
|
}()
|
2017-02-21 17:19:06 +00:00
|
|
|
|
2017-06-01 16:40:33 +00:00
|
|
|
created, err := time.Parse(time.RFC3339Nano, m.Annotations[annotations.Created])
|
2017-05-11 09:22:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-02 21:15:19 +00:00
|
|
|
scontainer, err := oci.NewContainer(m.Annotations[annotations.ContainerID], cname, sandboxPath, m.Annotations[annotations.LogPath], sb.netNs(), labels, kubeAnnotations, nil, nil, id, false, false, false, privileged, trusted, sandboxDir, created, m.Annotations["org.opencontainers.image.stopSignal"])
|
2016-09-19 07:39:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-11 10:10:18 +00:00
|
|
|
|
|
|
|
s.containerStateFromDisk(scontainer)
|
|
|
|
|
2016-12-22 10:26:19 +00:00
|
|
|
if err = label.ReserveLabel(processLabel); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sb.infraContainer = scontainer
|
2016-10-07 14:20:04 +00:00
|
|
|
if err = s.ctrIDIndex.Add(scontainer.ID()); err != nil {
|
|
|
|
return err
|
2016-09-19 07:39:13 +00:00
|
|
|
}
|
2016-09-20 08:16:59 +00:00
|
|
|
if err = s.podIDIndex.Add(id); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-09-19 07:39:13 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-07 14:20:04 +00:00
|
|
|
func (s *Server) restore() {
|
2016-10-18 14:48:33 +00:00
|
|
|
containers, err := s.store.Containers()
|
2016-10-08 12:36:14 +00:00
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2016-10-18 14:48:33 +00:00
|
|
|
logrus.Warnf("could not read containers and sandboxes: %v", err)
|
|
|
|
}
|
|
|
|
pods := map[string]*storage.RuntimeContainerMetadata{}
|
|
|
|
podContainers := map[string]*storage.RuntimeContainerMetadata{}
|
|
|
|
for _, container := range containers {
|
2017-04-19 19:17:10 +00:00
|
|
|
metadata, err2 := s.storageRuntimeServer.GetContainerMetadata(container.ID)
|
2016-10-18 14:48:33 +00:00
|
|
|
if err2 != nil {
|
|
|
|
logrus.Warnf("error parsing metadata for %s: %v, ignoring", container.ID, err2)
|
2016-10-07 14:20:04 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-10-18 14:48:33 +00:00
|
|
|
if metadata.Pod {
|
|
|
|
pods[container.ID] = &metadata
|
|
|
|
} else {
|
|
|
|
podContainers[container.ID] = &metadata
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for containerID, metadata := range pods {
|
|
|
|
if err = s.loadSandbox(containerID); err != nil {
|
|
|
|
logrus.Warnf("could not restore sandbox %s container %s: %v", metadata.PodID, containerID, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for containerID := range podContainers {
|
|
|
|
if err := s.loadContainer(containerID); err != nil {
|
|
|
|
logrus.Warnf("could not restore container %s: %v", containerID, err)
|
2016-10-07 14:20:04 +00:00
|
|
|
}
|
|
|
|
}
|
2016-10-18 14:48:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update makes changes to the server's state (lists of pods and containers) to
|
|
|
|
// reflect the list of pods and containers that are stored on disk, possibly
|
|
|
|
// having been modified by other parties
|
|
|
|
func (s *Server) Update() {
|
|
|
|
logrus.Debugf("updating sandbox and container information")
|
|
|
|
if err := s.update(); err != nil {
|
|
|
|
logrus.Errorf("error updating sandbox and container information: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) update() error {
|
2017-04-04 15:24:55 +00:00
|
|
|
s.updateLock.Lock()
|
|
|
|
defer s.updateLock.Unlock()
|
|
|
|
|
2016-10-18 14:48:33 +00:00
|
|
|
containers, err := s.store.Containers()
|
2016-10-08 12:36:14 +00:00
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2016-10-18 14:48:33 +00:00
|
|
|
logrus.Warnf("could not read containers and sandboxes: %v", err)
|
|
|
|
return err
|
2016-10-07 14:20:04 +00:00
|
|
|
}
|
2016-10-18 14:48:33 +00:00
|
|
|
newPods := map[string]*storage.RuntimeContainerMetadata{}
|
|
|
|
oldPods := map[string]string{}
|
|
|
|
removedPods := map[string]string{}
|
|
|
|
newPodContainers := map[string]*storage.RuntimeContainerMetadata{}
|
|
|
|
oldPodContainers := map[string]string{}
|
|
|
|
removedPodContainers := map[string]string{}
|
|
|
|
for _, container := range containers {
|
|
|
|
if s.hasSandbox(container.ID) {
|
|
|
|
// FIXME: do we need to reload/update any info about the sandbox?
|
|
|
|
oldPods[container.ID] = container.ID
|
|
|
|
oldPodContainers[container.ID] = container.ID
|
2016-10-07 14:20:04 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-10-18 14:48:33 +00:00
|
|
|
if s.getContainer(container.ID) != nil {
|
|
|
|
// FIXME: do we need to reload/update any info about the container?
|
|
|
|
oldPodContainers[container.ID] = container.ID
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// not previously known, so figure out what it is
|
2017-04-19 19:17:10 +00:00
|
|
|
metadata, err2 := s.storageRuntimeServer.GetContainerMetadata(container.ID)
|
2016-10-18 14:48:33 +00:00
|
|
|
if err2 != nil {
|
|
|
|
logrus.Errorf("error parsing metadata for %s: %v, ignoring", container.ID, err2)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if metadata.Pod {
|
|
|
|
newPods[container.ID] = &metadata
|
|
|
|
} else {
|
|
|
|
newPodContainers[container.ID] = &metadata
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.ctrIDIndex.Iterate(func(id string) {
|
|
|
|
if _, ok := oldPodContainers[id]; !ok {
|
|
|
|
// this container's ID wasn't in the updated list -> removed
|
|
|
|
removedPodContainers[id] = id
|
|
|
|
}
|
|
|
|
})
|
|
|
|
for removedPodContainer := range removedPodContainers {
|
|
|
|
// forget this container
|
|
|
|
c := s.getContainer(removedPodContainer)
|
2016-12-22 10:03:32 +00:00
|
|
|
if c == nil {
|
|
|
|
logrus.Warnf("bad state when getting container removed %+v", removedPodContainer)
|
|
|
|
continue
|
|
|
|
}
|
2016-10-18 14:48:33 +00:00
|
|
|
s.releaseContainerName(c.Name())
|
|
|
|
s.removeContainer(c)
|
|
|
|
if err = s.ctrIDIndex.Delete(c.ID()); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
logrus.Debugf("forgetting removed pod container %s", c.ID())
|
|
|
|
}
|
|
|
|
s.podIDIndex.Iterate(func(id string) {
|
|
|
|
if _, ok := oldPods[id]; !ok {
|
|
|
|
// this pod's ID wasn't in the updated list -> removed
|
|
|
|
removedPods[id] = id
|
|
|
|
}
|
|
|
|
})
|
|
|
|
for removedPod := range removedPods {
|
|
|
|
// forget this pod
|
|
|
|
sb := s.getSandbox(removedPod)
|
2016-12-21 21:11:51 +00:00
|
|
|
if sb == nil {
|
|
|
|
logrus.Warnf("bad state when getting pod to remove %+v", removedPod)
|
|
|
|
continue
|
|
|
|
}
|
2016-10-18 14:48:33 +00:00
|
|
|
podInfraContainer := sb.infraContainer
|
|
|
|
s.releaseContainerName(podInfraContainer.Name())
|
|
|
|
s.removeContainer(podInfraContainer)
|
|
|
|
if err = s.ctrIDIndex.Delete(podInfraContainer.ID()); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sb.infraContainer = nil
|
|
|
|
s.releasePodName(sb.name)
|
|
|
|
s.removeSandbox(sb.id)
|
|
|
|
if err = s.podIDIndex.Delete(sb.id); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
logrus.Debugf("forgetting removed pod %s", sb.id)
|
|
|
|
}
|
|
|
|
for sandboxID := range newPods {
|
|
|
|
// load this pod
|
|
|
|
if err = s.loadSandbox(sandboxID); err != nil {
|
|
|
|
logrus.Warnf("could not load new pod sandbox %s: %v, ignoring", sandboxID, err)
|
|
|
|
} else {
|
|
|
|
logrus.Debugf("loaded new pod sandbox %s", sandboxID, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for containerID := range newPodContainers {
|
|
|
|
// load this container
|
|
|
|
if err = s.loadContainer(containerID); err != nil {
|
|
|
|
logrus.Warnf("could not load new sandbox container %s: %v, ignoring", containerID, err)
|
|
|
|
} else {
|
|
|
|
logrus.Debugf("loaded new pod container %s", containerID, err)
|
2016-09-18 14:35:17 +00:00
|
|
|
}
|
2016-09-19 07:39:13 +00:00
|
|
|
}
|
2016-10-18 14:48:33 +00:00
|
|
|
return nil
|
2016-09-18 14:35:17 +00:00
|
|
|
}
|
|
|
|
|
2016-09-20 08:27:11 +00:00
|
|
|
func (s *Server) reservePodName(id, name string) (string, error) {
|
|
|
|
if err := s.podNameIndex.Reserve(name, id); err != nil {
|
|
|
|
if err == registrar.ErrNameReserved {
|
|
|
|
id, err := s.podNameIndex.Get(name)
|
|
|
|
if err != nil {
|
2016-12-11 16:46:37 +00:00
|
|
|
logrus.Warnf("conflict, pod name %q already reserved", name)
|
2016-09-20 08:27:11 +00:00
|
|
|
return "", err
|
|
|
|
}
|
2016-12-11 16:46:37 +00:00
|
|
|
return "", fmt.Errorf("conflict, name %q already reserved for pod %q", name, id)
|
2016-09-20 08:27:11 +00:00
|
|
|
}
|
2016-12-11 16:46:37 +00:00
|
|
|
return "", fmt.Errorf("error reserving pod name %q", name)
|
2016-09-20 08:27:11 +00:00
|
|
|
}
|
|
|
|
return name, nil
|
|
|
|
}
|
|
|
|
|
2016-09-26 22:35:34 +00:00
|
|
|
func (s *Server) releasePodName(name string) {
|
|
|
|
s.podNameIndex.Release(name)
|
|
|
|
}
|
|
|
|
|
2016-10-04 23:00:04 +00:00
|
|
|
func (s *Server) reserveContainerName(id, name string) (string, error) {
|
|
|
|
if err := s.ctrNameIndex.Reserve(name, id); err != nil {
|
|
|
|
if err == registrar.ErrNameReserved {
|
|
|
|
id, err := s.ctrNameIndex.Get(name)
|
|
|
|
if err != nil {
|
2016-12-11 16:46:37 +00:00
|
|
|
logrus.Warnf("conflict, ctr name %q already reserved", name)
|
2016-10-04 23:00:04 +00:00
|
|
|
return "", err
|
|
|
|
}
|
2016-12-11 16:46:37 +00:00
|
|
|
return "", fmt.Errorf("conflict, name %q already reserved for ctr %q", name, id)
|
2016-10-04 23:00:04 +00:00
|
|
|
}
|
2016-12-11 16:46:37 +00:00
|
|
|
return "", fmt.Errorf("error reserving ctr name %s", name)
|
2016-10-04 23:00:04 +00:00
|
|
|
}
|
|
|
|
return name, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) releaseContainerName(name string) {
|
|
|
|
s.ctrNameIndex.Release(name)
|
|
|
|
}
|
|
|
|
|
2017-03-14 18:19:18 +00:00
|
|
|
// cleanupSandboxesOnShutdown Remove all running Sandboxes on system shutdown
|
|
|
|
func (s *Server) cleanupSandboxesOnShutdown() {
|
|
|
|
_, err := os.Stat(shutdownFile)
|
|
|
|
if err == nil || !os.IsNotExist(err) {
|
|
|
|
logrus.Debugf("shutting down all sandboxes, on shutdown")
|
2017-05-15 10:12:29 +00:00
|
|
|
s.StopAllPodSandboxes()
|
2017-03-14 18:19:18 +00:00
|
|
|
err = os.Remove(shutdownFile)
|
|
|
|
if err != nil {
|
|
|
|
logrus.Warnf("Failed to remove %q", shutdownFile)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-18 14:48:33 +00:00
|
|
|
// Shutdown attempts to shut down the server's storage cleanly
|
|
|
|
func (s *Server) Shutdown() error {
|
2017-07-11 23:19:18 +00:00
|
|
|
// why do this on clean shutdown! we want containers left running when crio
|
2017-05-15 13:50:41 +00:00
|
|
|
// is down for whatever reason no?!
|
|
|
|
// notice this won't trigger just on system halt but also on normal
|
2017-07-11 23:19:18 +00:00
|
|
|
// crio.service restart!!!
|
2017-03-14 18:19:18 +00:00
|
|
|
s.cleanupSandboxesOnShutdown()
|
2016-10-18 14:48:33 +00:00
|
|
|
_, err := s.store.Shutdown(false)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-19 18:53:57 +00:00
|
|
|
// New creates a new Server with options provided
|
2016-10-10 09:57:40 +00:00
|
|
|
func New(config *Config) (*Server, error) {
|
2016-10-18 14:48:33 +00:00
|
|
|
store, err := sstorage.GetStore(sstorage.StoreOptions{
|
|
|
|
RunRoot: config.RunRoot,
|
|
|
|
GraphRoot: config.Root,
|
|
|
|
GraphDriverName: config.Storage,
|
|
|
|
GraphDriverOptions: config.StorageOptions,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-06-08 13:45:34 +00:00
|
|
|
imageService, err := storage.GetImageService(store, config.DefaultTransport, config.InsecureRegistries)
|
2016-10-18 14:48:33 +00:00
|
|
|
if err != nil {
|
2016-08-04 14:34:30 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-01-16 18:19:44 +00:00
|
|
|
storageRuntimeService := storage.GetRuntimeService(imageService, config.PauseImage)
|
2016-10-18 14:48:33 +00:00
|
|
|
if err != nil {
|
2016-09-18 15:00:37 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-05-30 21:35:12 +00:00
|
|
|
if err := os.MkdirAll("/var/run/crio", 0755); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-06-02 21:15:19 +00:00
|
|
|
r, err := oci.New(config.Runtime, config.RuntimeUntrustedWorkload, config.DefaultWorkloadTrust, config.Conmon, config.ConmonEnv, config.CgroupManager)
|
2016-07-19 18:53:57 +00:00
|
|
|
if err != nil {
|
2016-07-29 22:35:10 +00:00
|
|
|
return nil, err
|
2016-07-19 18:53:57 +00:00
|
|
|
}
|
2016-08-01 23:05:37 +00:00
|
|
|
sandboxes := make(map[string]*sandbox)
|
2016-09-19 11:09:30 +00:00
|
|
|
containers := oci.NewMemoryStore()
|
2017-01-24 17:12:51 +00:00
|
|
|
netPlugin, err := ocicni.InitCNI(config.NetworkDir, config.PluginDir)
|
2016-09-02 19:38:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-06-16 21:22:20 +00:00
|
|
|
iptInterface := utiliptables.New(utilexec.New(), utildbus.New(), utiliptables.ProtocolIpv4)
|
|
|
|
iptInterface.EnsureChain(utiliptables.TableNAT, iptablesproxy.KubeMarkMasqChain)
|
2017-06-15 20:56:17 +00:00
|
|
|
hostportManager := hostport.NewHostportManager()
|
2016-09-18 14:35:17 +00:00
|
|
|
s := &Server{
|
2017-04-19 19:17:10 +00:00
|
|
|
runtime: r,
|
|
|
|
store: store,
|
|
|
|
storageImageServer: imageService,
|
|
|
|
storageRuntimeServer: storageRuntimeService,
|
|
|
|
netPlugin: netPlugin,
|
2017-06-15 20:56:17 +00:00
|
|
|
hostportManager: hostportManager,
|
2017-04-19 19:17:10 +00:00
|
|
|
config: *config,
|
2016-08-01 23:05:37 +00:00
|
|
|
state: &serverState{
|
2016-08-01 17:39:42 +00:00
|
|
|
sandboxes: sandboxes,
|
|
|
|
containers: containers,
|
2016-08-01 23:05:37 +00:00
|
|
|
},
|
2017-02-22 00:39:31 +00:00
|
|
|
seccompEnabled: seccomp.IsEnabled(),
|
2016-11-29 12:34:15 +00:00
|
|
|
appArmorEnabled: apparmor.IsEnabled(),
|
2016-12-12 07:55:17 +00:00
|
|
|
appArmorProfile: config.ApparmorProfile,
|
2016-11-23 09:41:48 +00:00
|
|
|
}
|
2017-02-22 00:21:04 +00:00
|
|
|
if s.seccompEnabled {
|
2017-03-27 10:02:26 +00:00
|
|
|
seccompProfile, fileErr := ioutil.ReadFile(config.SeccompProfile)
|
|
|
|
if fileErr != nil {
|
|
|
|
return nil, fmt.Errorf("opening seccomp profile (%s) failed: %v", config.SeccompProfile, fileErr)
|
2017-02-22 00:21:04 +00:00
|
|
|
}
|
|
|
|
var seccompConfig seccomp.Seccomp
|
2017-03-27 10:02:26 +00:00
|
|
|
if jsonErr := json.Unmarshal(seccompProfile, &seccompConfig); jsonErr != nil {
|
|
|
|
return nil, fmt.Errorf("decoding seccomp profile failed: %v", jsonErr)
|
2017-02-22 00:21:04 +00:00
|
|
|
}
|
|
|
|
s.seccompProfile = seccompConfig
|
2016-09-18 14:35:17 +00:00
|
|
|
}
|
2016-10-04 23:00:04 +00:00
|
|
|
|
2016-12-12 07:55:17 +00:00
|
|
|
if s.appArmorEnabled && s.appArmorProfile == apparmor.DefaultApparmorProfile {
|
2017-03-27 10:02:26 +00:00
|
|
|
if apparmorErr := apparmor.EnsureDefaultApparmorProfile(); apparmorErr != nil {
|
|
|
|
return nil, fmt.Errorf("ensuring the default apparmor profile is installed failed: %v", apparmorErr)
|
2016-12-12 07:55:17 +00:00
|
|
|
}
|
2016-11-29 12:34:15 +00:00
|
|
|
}
|
|
|
|
|
2016-09-20 08:27:11 +00:00
|
|
|
s.podIDIndex = truncindex.NewTruncIndex([]string{})
|
|
|
|
s.podNameIndex = registrar.NewRegistrar()
|
2016-10-04 23:00:04 +00:00
|
|
|
s.ctrIDIndex = truncindex.NewTruncIndex([]string{})
|
|
|
|
s.ctrNameIndex = registrar.NewRegistrar()
|
2016-10-18 14:48:33 +00:00
|
|
|
s.imageContext = &types.SystemContext{
|
|
|
|
SignaturePolicyPath: config.ImageConfig.SignaturePolicyPath,
|
|
|
|
}
|
2016-10-04 23:00:04 +00:00
|
|
|
|
2016-10-07 14:20:04 +00:00
|
|
|
s.restore()
|
2017-03-14 18:19:18 +00:00
|
|
|
s.cleanupSandboxesOnShutdown()
|
2016-10-07 14:20:04 +00:00
|
|
|
|
2017-06-12 22:31:16 +00:00
|
|
|
bindAddress := net.ParseIP(config.StreamAddress)
|
|
|
|
if bindAddress == nil {
|
|
|
|
bindAddress, err = knet.ChooseBindAddress(net.IP{0, 0, 0, 0})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = net.LookupPort("tcp", config.StreamPort)
|
2017-06-03 16:17:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-02-08 14:09:03 +00:00
|
|
|
// Prepare streaming server
|
|
|
|
streamServerConfig := streaming.DefaultConfig
|
2017-06-12 22:31:16 +00:00
|
|
|
streamServerConfig.Addr = net.JoinHostPort(bindAddress.String(), config.StreamPort)
|
2017-02-08 14:09:03 +00:00
|
|
|
s.stream.runtimeServer = s
|
|
|
|
s.stream.streamServer, err = streaming.NewServer(streamServerConfig, s.stream)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to create streaming server")
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Is it should be started somewhere else?
|
|
|
|
go func() {
|
|
|
|
s.stream.streamServer.Start(true)
|
|
|
|
}()
|
|
|
|
|
2016-09-18 14:35:17 +00:00
|
|
|
logrus.Debugf("sandboxes: %v", s.state.sandboxes)
|
|
|
|
logrus.Debugf("containers: %v", s.state.containers)
|
|
|
|
return s, nil
|
2016-07-08 19:04:00 +00:00
|
|
|
}
|
2016-07-20 01:30:05 +00:00
|
|
|
|
2016-08-01 23:05:37 +00:00
|
|
|
type serverState struct {
|
2016-08-01 17:39:42 +00:00
|
|
|
sandboxes map[string]*sandbox
|
2017-04-19 18:55:17 +00:00
|
|
|
containers oci.ContainerStorer
|
2016-08-01 23:05:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) addSandbox(sb *sandbox) {
|
2016-09-17 14:10:35 +00:00
|
|
|
s.stateLock.Lock()
|
2016-09-20 08:27:11 +00:00
|
|
|
s.state.sandboxes[sb.id] = sb
|
2016-09-17 14:10:35 +00:00
|
|
|
s.stateLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2016-09-20 08:27:11 +00:00
|
|
|
func (s *Server) getSandbox(id string) *sandbox {
|
2016-09-17 14:10:35 +00:00
|
|
|
s.stateLock.Lock()
|
2016-09-20 08:27:11 +00:00
|
|
|
sb := s.state.sandboxes[id]
|
2016-09-17 14:10:35 +00:00
|
|
|
s.stateLock.Unlock()
|
|
|
|
return sb
|
2016-08-01 23:05:37 +00:00
|
|
|
}
|
2016-08-01 17:39:42 +00:00
|
|
|
|
2016-09-20 08:27:11 +00:00
|
|
|
func (s *Server) hasSandbox(id string) bool {
|
2016-09-17 14:10:35 +00:00
|
|
|
s.stateLock.Lock()
|
2016-09-20 08:27:11 +00:00
|
|
|
_, ok := s.state.sandboxes[id]
|
2016-09-17 14:10:35 +00:00
|
|
|
s.stateLock.Unlock()
|
2016-08-01 17:39:42 +00:00
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2016-09-27 08:40:08 +00:00
|
|
|
func (s *Server) removeSandbox(id string) {
|
|
|
|
s.stateLock.Lock()
|
|
|
|
delete(s.state.sandboxes, id)
|
|
|
|
s.stateLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2016-08-01 17:39:42 +00:00
|
|
|
func (s *Server) addContainer(c *oci.Container) {
|
2016-09-17 14:10:35 +00:00
|
|
|
s.stateLock.Lock()
|
2016-08-01 17:39:42 +00:00
|
|
|
sandbox := s.state.sandboxes[c.Sandbox()]
|
2016-09-20 08:27:11 +00:00
|
|
|
// TODO(runcom): handle !ok above!!! otherwise it panics!
|
2016-08-01 17:39:42 +00:00
|
|
|
sandbox.addContainer(c)
|
2016-10-04 23:50:29 +00:00
|
|
|
s.state.containers.Add(c.ID(), c)
|
2016-09-17 14:10:35 +00:00
|
|
|
s.stateLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2016-10-04 23:50:29 +00:00
|
|
|
func (s *Server) getContainer(id string) *oci.Container {
|
2016-09-17 14:10:35 +00:00
|
|
|
s.stateLock.Lock()
|
2016-10-04 23:50:29 +00:00
|
|
|
c := s.state.containers.Get(id)
|
2016-09-17 14:10:35 +00:00
|
|
|
s.stateLock.Unlock()
|
|
|
|
return c
|
2016-08-01 17:39:42 +00:00
|
|
|
}
|
2016-08-25 19:14:59 +00:00
|
|
|
|
server: fix PortForward panic
During "Port forwarding" e2e tests, the following panic happened:
```
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x64981d]
goroutine 52788 [running]:
panic(0x1830ee0, 0xc4200100c0)
/usr/lib/golang/src/runtime/panic.go:500 +0x1a1
github.com/kubernetes-incubator/cri-o/oci.(*Runtime).UpdateStatus(0xc4202afc00,
0x0, 0x0, 0x0)
/home/amurdaca/go/src/github.com/kubernetes-incubator/cri-o/oci/oci.go:549
+0x7d
github.com/kubernetes-incubator/cri-o/server.streamService.PortForward(0xc42026e000,
0x0, 0x0, 0x0, 0x0, 0xc420d9af40, 0x40, 0xc400000050, 0x7fe660659a28,
0xc4201cd0e0, ...)
```
The issue is `streamService.PortForward` assumed the first argument to
be the sandbox's infra container ID, thus trying to get it from memory
store using `.state.containers.Get`. Since that ID is of the sandbox
itself, it fails to get the container object from memory and panics in
`UpdateStatus`.
Fix it by looking for the sandbox's infra container ID starting from a
sandbox ID.
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-05-28 15:22:46 +00:00
|
|
|
// GetSandboxContainer returns the infra container for a given sandbox
|
|
|
|
func (s *Server) GetSandboxContainer(id string) *oci.Container {
|
|
|
|
sb := s.getSandbox(id)
|
|
|
|
return sb.infraContainer
|
|
|
|
}
|
|
|
|
|
2017-05-29 11:16:27 +00:00
|
|
|
// GetContainer returns a container by its ID
|
|
|
|
func (s *Server) GetContainer(id string) *oci.Container {
|
|
|
|
return s.getContainer(id)
|
|
|
|
}
|
|
|
|
|
2016-08-25 19:14:59 +00:00
|
|
|
func (s *Server) removeContainer(c *oci.Container) {
|
2016-09-17 14:10:35 +00:00
|
|
|
s.stateLock.Lock()
|
2016-08-25 19:14:59 +00:00
|
|
|
sandbox := s.state.sandboxes[c.Sandbox()]
|
|
|
|
sandbox.removeContainer(c)
|
2016-10-04 23:50:29 +00:00
|
|
|
s.state.containers.Delete(c.ID())
|
2016-09-17 14:10:35 +00:00
|
|
|
s.stateLock.Unlock()
|
2016-08-25 19:14:59 +00:00
|
|
|
}
|