Merge pull request #207 from runcom/refactor-actions
split files under server/
This commit is contained in:
commit
2e3ad167bb
23 changed files with 1341 additions and 1179 deletions
|
@ -1,39 +1,11 @@
|
||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/docker/pkg/stringid"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"github.com/kubernetes-incubator/cri-o/utils"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/label"
|
|
||||||
"github.com/opencontainers/runtime-tools/generate"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Server) generateContainerIDandName(podName string, name string, attempt uint32) (string, string, error) {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
id = stringid.GenerateNonCryptoID()
|
|
||||||
)
|
|
||||||
nameStr := fmt.Sprintf("%s-%s-%v", podName, name, attempt)
|
|
||||||
if name == "infra" {
|
|
||||||
nameStr = fmt.Sprintf("%s-%s", podName, name)
|
|
||||||
}
|
|
||||||
if name, err = s.reserveContainerName(id, nameStr); err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
return id, name, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type containerRequest interface {
|
type containerRequest interface {
|
||||||
GetContainerId() string
|
GetContainerId() string
|
||||||
}
|
}
|
||||||
|
@ -55,605 +27,3 @@ func (s *Server) getContainerFromRequest(req containerRequest) (*oci.Container,
|
||||||
}
|
}
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateContainer creates a new container in specified PodSandbox
|
|
||||||
func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) {
|
|
||||||
logrus.Debugf("CreateContainerRequest %+v", req)
|
|
||||||
sbID := req.GetPodSandboxId()
|
|
||||||
if sbID == "" {
|
|
||||||
return nil, fmt.Errorf("PodSandboxId should not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
sandboxID, err := s.podIDIndex.Get(sbID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("PodSandbox with ID starting with %s not found: %v", sbID, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sb := s.getSandbox(sandboxID)
|
|
||||||
if sb == nil {
|
|
||||||
return nil, fmt.Errorf("specified sandbox not found: %s", sandboxID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The config of the container
|
|
||||||
containerConfig := req.GetConfig()
|
|
||||||
if containerConfig == nil {
|
|
||||||
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
name := containerConfig.GetMetadata().GetName()
|
|
||||||
if name == "" {
|
|
||||||
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Name is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
attempt := containerConfig.GetMetadata().GetAttempt()
|
|
||||||
containerID, containerName, err := s.generateContainerIDandName(sb.name, name, attempt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// containerDir is the dir for the container bundle.
|
|
||||||
containerDir := filepath.Join(s.runtime.ContainerDir(), containerID)
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
s.releaseContainerName(containerName)
|
|
||||||
err1 := os.RemoveAll(containerDir)
|
|
||||||
if err1 != nil {
|
|
||||||
logrus.Warnf("Failed to cleanup container directory: %v")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if _, err = os.Stat(containerDir); err == nil {
|
|
||||||
return nil, fmt.Errorf("container (%s) already exists", containerDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = os.MkdirAll(containerDir, 0755); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
container, err := s.createSandboxContainer(containerID, containerName, sb, req.GetSandboxConfig(), containerDir, containerConfig)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = s.runtime.CreateContainer(container); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = s.runtime.UpdateStatus(container); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.addContainer(container)
|
|
||||||
|
|
||||||
if err = s.ctrIDIndex.Add(containerID); err != nil {
|
|
||||||
s.removeContainer(container)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.CreateContainerResponse{
|
|
||||||
ContainerId: &containerID,
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Debugf("CreateContainerResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) createSandboxContainer(containerID string, containerName string, sb *sandbox, SandboxConfig *pb.PodSandboxConfig, containerDir string, containerConfig *pb.ContainerConfig) (*oci.Container, error) {
|
|
||||||
if sb == nil {
|
|
||||||
return nil, errors.New("createSandboxContainer needs a sandbox")
|
|
||||||
}
|
|
||||||
// creates a spec Generator with the default spec.
|
|
||||||
specgen := generate.New()
|
|
||||||
|
|
||||||
// by default, the root path is an empty string.
|
|
||||||
// here set it to be "rootfs".
|
|
||||||
specgen.SetRootPath("rootfs")
|
|
||||||
|
|
||||||
args := containerConfig.GetArgs()
|
|
||||||
if args == nil {
|
|
||||||
args = []string{"/bin/sh"}
|
|
||||||
}
|
|
||||||
specgen.SetProcessArgs(args)
|
|
||||||
|
|
||||||
cwd := containerConfig.GetWorkingDir()
|
|
||||||
if cwd == "" {
|
|
||||||
cwd = "/"
|
|
||||||
}
|
|
||||||
specgen.SetProcessCwd(cwd)
|
|
||||||
|
|
||||||
envs := containerConfig.GetEnvs()
|
|
||||||
if envs != nil {
|
|
||||||
for _, item := range envs {
|
|
||||||
key := item.GetKey()
|
|
||||||
value := item.GetValue()
|
|
||||||
if key == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
env := fmt.Sprintf("%s=%s", key, value)
|
|
||||||
specgen.AddProcessEnv(env)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mounts := containerConfig.GetMounts()
|
|
||||||
for _, mount := range mounts {
|
|
||||||
dest := mount.GetContainerPath()
|
|
||||||
if dest == "" {
|
|
||||||
return nil, fmt.Errorf("Mount.ContainerPath is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
src := mount.GetHostPath()
|
|
||||||
if src == "" {
|
|
||||||
return nil, fmt.Errorf("Mount.HostPath is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
options := "rw"
|
|
||||||
if mount.GetReadonly() {
|
|
||||||
options = "ro"
|
|
||||||
}
|
|
||||||
|
|
||||||
if mount.GetSelinuxRelabel() {
|
|
||||||
// Need a way in kubernetes to determine if the volume is shared or private
|
|
||||||
if err := label.Relabel(src, sb.mountLabel, true); err != nil && err != syscall.ENOTSUP {
|
|
||||||
return nil, fmt.Errorf("relabel failed %s: %v", src, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
specgen.AddBindMount(src, dest, options)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
labels := containerConfig.GetLabels()
|
|
||||||
|
|
||||||
metadata := containerConfig.GetMetadata()
|
|
||||||
|
|
||||||
annotations := containerConfig.GetAnnotations()
|
|
||||||
if annotations != nil {
|
|
||||||
for k, v := range annotations {
|
|
||||||
specgen.AddAnnotation(k, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if containerConfig.GetLinux().GetSecurityContext().GetPrivileged() {
|
|
||||||
specgen.SetupPrivileged(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
if containerConfig.GetLinux().GetSecurityContext().GetReadonlyRootfs() {
|
|
||||||
specgen.SetRootReadonly(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
logPath := containerConfig.GetLogPath()
|
|
||||||
|
|
||||||
if containerConfig.GetTty() {
|
|
||||||
specgen.SetProcessTerminal(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
linux := containerConfig.GetLinux()
|
|
||||||
if linux != nil {
|
|
||||||
resources := linux.GetResources()
|
|
||||||
if resources != nil {
|
|
||||||
cpuPeriod := resources.GetCpuPeriod()
|
|
||||||
if cpuPeriod != 0 {
|
|
||||||
specgen.SetLinuxResourcesCPUPeriod(uint64(cpuPeriod))
|
|
||||||
}
|
|
||||||
|
|
||||||
cpuQuota := resources.GetCpuQuota()
|
|
||||||
if cpuQuota != 0 {
|
|
||||||
specgen.SetLinuxResourcesCPUQuota(uint64(cpuQuota))
|
|
||||||
}
|
|
||||||
|
|
||||||
cpuShares := resources.GetCpuShares()
|
|
||||||
if cpuShares != 0 {
|
|
||||||
specgen.SetLinuxResourcesCPUShares(uint64(cpuShares))
|
|
||||||
}
|
|
||||||
|
|
||||||
memoryLimit := resources.GetMemoryLimitInBytes()
|
|
||||||
if memoryLimit != 0 {
|
|
||||||
specgen.SetLinuxResourcesMemoryLimit(uint64(memoryLimit))
|
|
||||||
}
|
|
||||||
|
|
||||||
oomScoreAdj := resources.GetOomScoreAdj()
|
|
||||||
specgen.SetLinuxResourcesOOMScoreAdj(int(oomScoreAdj))
|
|
||||||
}
|
|
||||||
|
|
||||||
capabilities := linux.GetSecurityContext().GetCapabilities()
|
|
||||||
if capabilities != nil {
|
|
||||||
addCaps := capabilities.GetAddCapabilities()
|
|
||||||
if addCaps != nil {
|
|
||||||
for _, cap := range addCaps {
|
|
||||||
if err := specgen.AddProcessCapability(cap); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dropCaps := capabilities.GetDropCapabilities()
|
|
||||||
if dropCaps != nil {
|
|
||||||
for _, cap := range dropCaps {
|
|
||||||
if err := specgen.DropProcessCapability(cap); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
specgen.SetProcessSelinuxLabel(sb.processLabel)
|
|
||||||
specgen.SetLinuxMountLabel(sb.mountLabel)
|
|
||||||
|
|
||||||
user := linux.GetSecurityContext().GetRunAsUser()
|
|
||||||
specgen.SetProcessUID(uint32(user))
|
|
||||||
|
|
||||||
specgen.SetProcessGID(uint32(user))
|
|
||||||
|
|
||||||
groups := linux.GetSecurityContext().GetSupplementalGroups()
|
|
||||||
for _, group := range groups {
|
|
||||||
specgen.AddProcessAdditionalGid(uint32(group))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Join the namespace paths for the pod sandbox container.
|
|
||||||
podInfraState := s.runtime.ContainerStatus(sb.infraContainer)
|
|
||||||
|
|
||||||
logrus.Debugf("pod container state %+v", podInfraState)
|
|
||||||
|
|
||||||
for nsType, nsFile := range map[string]string{
|
|
||||||
"ipc": "ipc",
|
|
||||||
"network": "net",
|
|
||||||
} {
|
|
||||||
nsPath := fmt.Sprintf("/proc/%d/ns/%s", podInfraState.Pid, nsFile)
|
|
||||||
if err := specgen.AddOrReplaceLinuxNamespace(nsType, nsPath); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
specgen.AddAnnotation("ocid/name", containerName)
|
|
||||||
specgen.AddAnnotation("ocid/sandbox_id", sb.id)
|
|
||||||
specgen.AddAnnotation("ocid/log_path", logPath)
|
|
||||||
specgen.AddAnnotation("ocid/tty", fmt.Sprintf("%v", containerConfig.GetTty()))
|
|
||||||
|
|
||||||
metadataJSON, err := json.Marshal(metadata)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
specgen.AddAnnotation("ocid/metadata", string(metadataJSON))
|
|
||||||
|
|
||||||
labelsJSON, err := json.Marshal(labels)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
specgen.AddAnnotation("ocid/labels", string(labelsJSON))
|
|
||||||
|
|
||||||
if err = specgen.SaveToFile(filepath.Join(containerDir, "config.json")); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
imageSpec := containerConfig.GetImage()
|
|
||||||
if imageSpec == nil {
|
|
||||||
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
image := imageSpec.GetImage()
|
|
||||||
if image == "" {
|
|
||||||
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image.Image is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: copy the rootfs into the bundle.
|
|
||||||
// Currently, utils.CreateFakeRootfs is used to populate the rootfs.
|
|
||||||
if err = utils.CreateFakeRootfs(containerDir, image); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
container, err := oci.NewContainer(containerID, containerName, containerDir, logPath, labels, metadata, sb.id, containerConfig.GetTty())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return container, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartContainer starts the container.
|
|
||||||
func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) {
|
|
||||||
logrus.Debugf("StartContainerRequest %+v", req)
|
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.StartContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.StartContainerResponse{}
|
|
||||||
logrus.Debugf("StartContainerResponse %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopContainer stops a running container with a grace period (i.e., timeout).
|
|
||||||
func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) {
|
|
||||||
logrus.Debugf("StopContainerRequest %+v", req)
|
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.StopContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to stop container %s: %v", c.ID(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.StopContainerResponse{}
|
|
||||||
logrus.Debugf("StopContainerResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveContainer removes the container. If the container is running, the container
|
|
||||||
// should be force removed.
|
|
||||||
func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) {
|
|
||||||
logrus.Debugf("RemoveContainerRequest %+v", req)
|
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to update container state: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(c)
|
|
||||||
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
|
|
||||||
if err := s.runtime.StopContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to stop container %s: %v", c.ID(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.DeleteContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to delete container %s: %v", c.ID(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID())
|
|
||||||
if err := os.RemoveAll(containerDir); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.ID(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.releaseContainerName(c.Name())
|
|
||||||
s.removeContainer(c)
|
|
||||||
|
|
||||||
if err := s.ctrIDIndex.Delete(c.ID()); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.RemoveContainerResponse{}
|
|
||||||
logrus.Debugf("RemoveContainerResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterContainer returns whether passed container matches filtering criteria
|
|
||||||
func filterContainer(c *pb.Container, filter *pb.ContainerFilter) bool {
|
|
||||||
if filter != nil {
|
|
||||||
if filter.State != nil {
|
|
||||||
if *c.State != *filter.State {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if filter.LabelSelector != nil {
|
|
||||||
sel := fields.SelectorFromSet(filter.LabelSelector)
|
|
||||||
if !sel.Matches(fields.Set(c.Labels)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListContainers lists all containers by filters.
|
|
||||||
func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) {
|
|
||||||
logrus.Debugf("ListContainersRequest %+v", req)
|
|
||||||
var ctrs []*pb.Container
|
|
||||||
filter := req.Filter
|
|
||||||
ctrList := s.state.containers.List()
|
|
||||||
|
|
||||||
// Filter using container id and pod id first.
|
|
||||||
if filter != nil {
|
|
||||||
if filter.Id != nil {
|
|
||||||
c := s.state.containers.Get(*filter.Id)
|
|
||||||
if c != nil {
|
|
||||||
if filter.PodSandboxId != nil {
|
|
||||||
if c.Sandbox() == *filter.PodSandboxId {
|
|
||||||
ctrList = []*oci.Container{c}
|
|
||||||
} else {
|
|
||||||
ctrList = []*oci.Container{}
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
ctrList = []*oci.Container{c}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if filter.PodSandboxId != nil {
|
|
||||||
pod := s.state.sandboxes[*filter.PodSandboxId]
|
|
||||||
if pod == nil {
|
|
||||||
ctrList = []*oci.Container{}
|
|
||||||
} else {
|
|
||||||
ctrList = pod.containers.List()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ctr := range ctrList {
|
|
||||||
if err := s.runtime.UpdateStatus(ctr); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
podSandboxID := ctr.Sandbox()
|
|
||||||
cState := s.runtime.ContainerStatus(ctr)
|
|
||||||
created := cState.Created.UnixNano()
|
|
||||||
rState := pb.ContainerState_CONTAINER_UNKNOWN
|
|
||||||
cID := ctr.ID()
|
|
||||||
|
|
||||||
c := &pb.Container{
|
|
||||||
Id: &cID,
|
|
||||||
PodSandboxId: &podSandboxID,
|
|
||||||
CreatedAt: int64Ptr(created),
|
|
||||||
Labels: ctr.Labels(),
|
|
||||||
Metadata: ctr.Metadata(),
|
|
||||||
}
|
|
||||||
|
|
||||||
switch cState.Status {
|
|
||||||
case oci.ContainerStateCreated:
|
|
||||||
rState = pb.ContainerState_CONTAINER_CREATED
|
|
||||||
case oci.ContainerStateRunning:
|
|
||||||
rState = pb.ContainerState_CONTAINER_RUNNING
|
|
||||||
case oci.ContainerStateStopped:
|
|
||||||
rState = pb.ContainerState_CONTAINER_EXITED
|
|
||||||
}
|
|
||||||
c.State = &rState
|
|
||||||
|
|
||||||
// Filter by other criteria such as state and labels.
|
|
||||||
if filterContainer(c, req.Filter) {
|
|
||||||
ctrs = append(ctrs, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.ListContainersResponse{
|
|
||||||
Containers: ctrs,
|
|
||||||
}
|
|
||||||
logrus.Debugf("ListContainersResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerStatus returns status of the container.
|
|
||||||
func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) {
|
|
||||||
logrus.Debugf("ContainerStatusRequest %+v", req)
|
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
containerID := c.ID()
|
|
||||||
resp := &pb.ContainerStatusResponse{
|
|
||||||
Status: &pb.ContainerStatus{
|
|
||||||
Id: &containerID,
|
|
||||||
Metadata: c.Metadata(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(c)
|
|
||||||
rStatus := pb.ContainerState_CONTAINER_UNKNOWN
|
|
||||||
|
|
||||||
switch cState.Status {
|
|
||||||
case oci.ContainerStateCreated:
|
|
||||||
rStatus = pb.ContainerState_CONTAINER_CREATED
|
|
||||||
created := cState.Created.UnixNano()
|
|
||||||
resp.Status.CreatedAt = int64Ptr(created)
|
|
||||||
case oci.ContainerStateRunning:
|
|
||||||
rStatus = pb.ContainerState_CONTAINER_RUNNING
|
|
||||||
created := cState.Created.UnixNano()
|
|
||||||
resp.Status.CreatedAt = int64Ptr(created)
|
|
||||||
started := cState.Started.UnixNano()
|
|
||||||
resp.Status.StartedAt = int64Ptr(started)
|
|
||||||
case oci.ContainerStateStopped:
|
|
||||||
rStatus = pb.ContainerState_CONTAINER_EXITED
|
|
||||||
created := cState.Created.UnixNano()
|
|
||||||
resp.Status.CreatedAt = int64Ptr(created)
|
|
||||||
started := cState.Started.UnixNano()
|
|
||||||
resp.Status.StartedAt = int64Ptr(started)
|
|
||||||
finished := cState.Finished.UnixNano()
|
|
||||||
resp.Status.FinishedAt = int64Ptr(finished)
|
|
||||||
resp.Status.ExitCode = int32Ptr(cState.ExitCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp.Status.State = &rStatus
|
|
||||||
|
|
||||||
logrus.Debugf("ContainerStatusResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateRuntimeConfig updates the configuration of a running container.
|
|
||||||
func (s *Server) UpdateRuntimeConfig(ctx context.Context, req *pb.UpdateRuntimeConfigRequest) (*pb.UpdateRuntimeConfigResponse, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecSync runs a command in a container synchronously.
|
|
||||||
func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (*pb.ExecSyncResponse, error) {
|
|
||||||
logrus.Debugf("ExecSyncRequest %+v", req)
|
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(c)
|
|
||||||
if !(cState.Status == oci.ContainerStateRunning || cState.Status == oci.ContainerStateCreated) {
|
|
||||||
return nil, fmt.Errorf("container is not created or running")
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := req.GetCmd()
|
|
||||||
if cmd == nil {
|
|
||||||
return nil, fmt.Errorf("exec command cannot be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
execResp, err := s.runtime.ExecSync(c, cmd, req.GetTimeout())
|
|
||||||
resp := &pb.ExecSyncResponse{
|
|
||||||
Stdout: execResp.Stdout,
|
|
||||||
Stderr: execResp.Stderr,
|
|
||||||
ExitCode: &execResp.ExitCode,
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Debugf("ExecSyncResponse: %+v", resp)
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec prepares a streaming endpoint to execute a command in the container.
|
|
||||||
func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecResponse, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attach prepares a streaming endpoint to attach to a running container.
|
|
||||||
func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (*pb.AttachResponse, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox.
|
|
||||||
func (s *Server) PortForward(ctx context.Context, req *pb.PortForwardRequest) (*pb.PortForwardResponse, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status returns the status of the runtime
|
|
||||||
func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) {
|
|
||||||
|
|
||||||
// Deal with Runtime conditions
|
|
||||||
runtimeReady, err := s.runtime.RuntimeReady()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
networkReady, err := s.runtime.NetworkReady()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use vendored strings
|
|
||||||
runtimeReadyConditionString := pb.RuntimeReady
|
|
||||||
networkReadyConditionString := pb.NetworkReady
|
|
||||||
|
|
||||||
resp := &pb.StatusResponse{
|
|
||||||
Status: &pb.RuntimeStatus{
|
|
||||||
Conditions: []*pb.RuntimeCondition{
|
|
||||||
&pb.RuntimeCondition{
|
|
||||||
Type: &runtimeReadyConditionString,
|
|
||||||
Status: &runtimeReady,
|
|
||||||
},
|
|
||||||
&pb.RuntimeCondition{
|
|
||||||
Type: &networkReadyConditionString,
|
|
||||||
Status: &networkReady,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
11
server/container_attach.go
Normal file
11
server/container_attach.go
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Attach prepares a streaming endpoint to attach to a running container.
|
||||||
|
func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (*pb.AttachResponse, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
327
server/container_create.go
Normal file
327
server/container_create.go
Normal file
|
@ -0,0 +1,327 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/stringid"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/utils"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/label"
|
||||||
|
"github.com/opencontainers/runtime-tools/generate"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateContainer creates a new container in specified PodSandbox
|
||||||
|
func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) {
|
||||||
|
logrus.Debugf("CreateContainerRequest %+v", req)
|
||||||
|
sbID := req.GetPodSandboxId()
|
||||||
|
if sbID == "" {
|
||||||
|
return nil, fmt.Errorf("PodSandboxId should not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
sandboxID, err := s.podIDIndex.Get(sbID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("PodSandbox with ID starting with %s not found: %v", sbID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sb := s.getSandbox(sandboxID)
|
||||||
|
if sb == nil {
|
||||||
|
return nil, fmt.Errorf("specified sandbox not found: %s", sandboxID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The config of the container
|
||||||
|
containerConfig := req.GetConfig()
|
||||||
|
if containerConfig == nil {
|
||||||
|
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
name := containerConfig.GetMetadata().GetName()
|
||||||
|
if name == "" {
|
||||||
|
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Name is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
attempt := containerConfig.GetMetadata().GetAttempt()
|
||||||
|
containerID, containerName, err := s.generateContainerIDandName(sb.name, name, attempt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// containerDir is the dir for the container bundle.
|
||||||
|
containerDir := filepath.Join(s.runtime.ContainerDir(), containerID)
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
s.releaseContainerName(containerName)
|
||||||
|
err1 := os.RemoveAll(containerDir)
|
||||||
|
if err1 != nil {
|
||||||
|
logrus.Warnf("Failed to cleanup container directory: %v")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if _, err = os.Stat(containerDir); err == nil {
|
||||||
|
return nil, fmt.Errorf("container (%s) already exists", containerDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = os.MkdirAll(containerDir, 0755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
container, err := s.createSandboxContainer(containerID, containerName, sb, req.GetSandboxConfig(), containerDir, containerConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = s.runtime.CreateContainer(container); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = s.runtime.UpdateStatus(container); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.addContainer(container)
|
||||||
|
|
||||||
|
if err = s.ctrIDIndex.Add(containerID); err != nil {
|
||||||
|
s.removeContainer(container)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.CreateContainerResponse{
|
||||||
|
ContainerId: &containerID,
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("CreateContainerResponse: %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) createSandboxContainer(containerID string, containerName string, sb *sandbox, SandboxConfig *pb.PodSandboxConfig, containerDir string, containerConfig *pb.ContainerConfig) (*oci.Container, error) {
|
||||||
|
if sb == nil {
|
||||||
|
return nil, errors.New("createSandboxContainer needs a sandbox")
|
||||||
|
}
|
||||||
|
// creates a spec Generator with the default spec.
|
||||||
|
specgen := generate.New()
|
||||||
|
|
||||||
|
// by default, the root path is an empty string.
|
||||||
|
// here set it to be "rootfs".
|
||||||
|
specgen.SetRootPath("rootfs")
|
||||||
|
|
||||||
|
args := containerConfig.GetArgs()
|
||||||
|
if args == nil {
|
||||||
|
args = []string{"/bin/sh"}
|
||||||
|
}
|
||||||
|
specgen.SetProcessArgs(args)
|
||||||
|
|
||||||
|
cwd := containerConfig.GetWorkingDir()
|
||||||
|
if cwd == "" {
|
||||||
|
cwd = "/"
|
||||||
|
}
|
||||||
|
specgen.SetProcessCwd(cwd)
|
||||||
|
|
||||||
|
envs := containerConfig.GetEnvs()
|
||||||
|
if envs != nil {
|
||||||
|
for _, item := range envs {
|
||||||
|
key := item.GetKey()
|
||||||
|
value := item.GetValue()
|
||||||
|
if key == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
env := fmt.Sprintf("%s=%s", key, value)
|
||||||
|
specgen.AddProcessEnv(env)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mounts := containerConfig.GetMounts()
|
||||||
|
for _, mount := range mounts {
|
||||||
|
dest := mount.GetContainerPath()
|
||||||
|
if dest == "" {
|
||||||
|
return nil, fmt.Errorf("Mount.ContainerPath is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
src := mount.GetHostPath()
|
||||||
|
if src == "" {
|
||||||
|
return nil, fmt.Errorf("Mount.HostPath is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
options := "rw"
|
||||||
|
if mount.GetReadonly() {
|
||||||
|
options = "ro"
|
||||||
|
}
|
||||||
|
|
||||||
|
if mount.GetSelinuxRelabel() {
|
||||||
|
// Need a way in kubernetes to determine if the volume is shared or private
|
||||||
|
if err := label.Relabel(src, sb.mountLabel, true); err != nil && err != syscall.ENOTSUP {
|
||||||
|
return nil, fmt.Errorf("relabel failed %s: %v", src, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
specgen.AddBindMount(src, dest, options)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
labels := containerConfig.GetLabels()
|
||||||
|
|
||||||
|
metadata := containerConfig.GetMetadata()
|
||||||
|
|
||||||
|
annotations := containerConfig.GetAnnotations()
|
||||||
|
if annotations != nil {
|
||||||
|
for k, v := range annotations {
|
||||||
|
specgen.AddAnnotation(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if containerConfig.GetLinux().GetSecurityContext().GetPrivileged() {
|
||||||
|
specgen.SetupPrivileged(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
if containerConfig.GetLinux().GetSecurityContext().GetReadonlyRootfs() {
|
||||||
|
specgen.SetRootReadonly(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
logPath := containerConfig.GetLogPath()
|
||||||
|
|
||||||
|
if containerConfig.GetTty() {
|
||||||
|
specgen.SetProcessTerminal(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
linux := containerConfig.GetLinux()
|
||||||
|
if linux != nil {
|
||||||
|
resources := linux.GetResources()
|
||||||
|
if resources != nil {
|
||||||
|
cpuPeriod := resources.GetCpuPeriod()
|
||||||
|
if cpuPeriod != 0 {
|
||||||
|
specgen.SetLinuxResourcesCPUPeriod(uint64(cpuPeriod))
|
||||||
|
}
|
||||||
|
|
||||||
|
cpuQuota := resources.GetCpuQuota()
|
||||||
|
if cpuQuota != 0 {
|
||||||
|
specgen.SetLinuxResourcesCPUQuota(uint64(cpuQuota))
|
||||||
|
}
|
||||||
|
|
||||||
|
cpuShares := resources.GetCpuShares()
|
||||||
|
if cpuShares != 0 {
|
||||||
|
specgen.SetLinuxResourcesCPUShares(uint64(cpuShares))
|
||||||
|
}
|
||||||
|
|
||||||
|
memoryLimit := resources.GetMemoryLimitInBytes()
|
||||||
|
if memoryLimit != 0 {
|
||||||
|
specgen.SetLinuxResourcesMemoryLimit(uint64(memoryLimit))
|
||||||
|
}
|
||||||
|
|
||||||
|
oomScoreAdj := resources.GetOomScoreAdj()
|
||||||
|
specgen.SetLinuxResourcesOOMScoreAdj(int(oomScoreAdj))
|
||||||
|
}
|
||||||
|
|
||||||
|
capabilities := linux.GetSecurityContext().GetCapabilities()
|
||||||
|
if capabilities != nil {
|
||||||
|
addCaps := capabilities.GetAddCapabilities()
|
||||||
|
if addCaps != nil {
|
||||||
|
for _, cap := range addCaps {
|
||||||
|
if err := specgen.AddProcessCapability(cap); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dropCaps := capabilities.GetDropCapabilities()
|
||||||
|
if dropCaps != nil {
|
||||||
|
for _, cap := range dropCaps {
|
||||||
|
if err := specgen.DropProcessCapability(cap); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
specgen.SetProcessSelinuxLabel(sb.processLabel)
|
||||||
|
specgen.SetLinuxMountLabel(sb.mountLabel)
|
||||||
|
|
||||||
|
user := linux.GetSecurityContext().GetRunAsUser()
|
||||||
|
specgen.SetProcessUID(uint32(user))
|
||||||
|
|
||||||
|
specgen.SetProcessGID(uint32(user))
|
||||||
|
|
||||||
|
groups := linux.GetSecurityContext().GetSupplementalGroups()
|
||||||
|
for _, group := range groups {
|
||||||
|
specgen.AddProcessAdditionalGid(uint32(group))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Join the namespace paths for the pod sandbox container.
|
||||||
|
podInfraState := s.runtime.ContainerStatus(sb.infraContainer)
|
||||||
|
|
||||||
|
logrus.Debugf("pod container state %+v", podInfraState)
|
||||||
|
|
||||||
|
for nsType, nsFile := range map[string]string{
|
||||||
|
"ipc": "ipc",
|
||||||
|
"network": "net",
|
||||||
|
} {
|
||||||
|
nsPath := fmt.Sprintf("/proc/%d/ns/%s", podInfraState.Pid, nsFile)
|
||||||
|
if err := specgen.AddOrReplaceLinuxNamespace(nsType, nsPath); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
specgen.AddAnnotation("ocid/name", containerName)
|
||||||
|
specgen.AddAnnotation("ocid/sandbox_id", sb.id)
|
||||||
|
specgen.AddAnnotation("ocid/log_path", logPath)
|
||||||
|
specgen.AddAnnotation("ocid/tty", fmt.Sprintf("%v", containerConfig.GetTty()))
|
||||||
|
|
||||||
|
metadataJSON, err := json.Marshal(metadata)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
specgen.AddAnnotation("ocid/metadata", string(metadataJSON))
|
||||||
|
|
||||||
|
labelsJSON, err := json.Marshal(labels)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
specgen.AddAnnotation("ocid/labels", string(labelsJSON))
|
||||||
|
|
||||||
|
if err = specgen.SaveToFile(filepath.Join(containerDir, "config.json")); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
imageSpec := containerConfig.GetImage()
|
||||||
|
if imageSpec == nil {
|
||||||
|
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
image := imageSpec.GetImage()
|
||||||
|
if image == "" {
|
||||||
|
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image.Image is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: copy the rootfs into the bundle.
|
||||||
|
// Currently, utils.CreateFakeRootfs is used to populate the rootfs.
|
||||||
|
if err = utils.CreateFakeRootfs(containerDir, image); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
container, err := oci.NewContainer(containerID, containerName, containerDir, logPath, labels, metadata, sb.id, containerConfig.GetTty())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return container, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) generateContainerIDandName(podName string, name string, attempt uint32) (string, string, error) {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
id = stringid.GenerateNonCryptoID()
|
||||||
|
)
|
||||||
|
nameStr := fmt.Sprintf("%s-%s-%v", podName, name, attempt)
|
||||||
|
if name == "infra" {
|
||||||
|
nameStr = fmt.Sprintf("%s-%s", podName, name)
|
||||||
|
}
|
||||||
|
if name, err = s.reserveContainerName(id, nameStr); err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
return id, name, err
|
||||||
|
}
|
11
server/container_exec.go
Normal file
11
server/container_exec.go
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Exec prepares a streaming endpoint to execute a command in the container.
|
||||||
|
func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecResponse, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
43
server/container_execsync.go
Normal file
43
server/container_execsync.go
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ExecSync runs a command in a container synchronously.
|
||||||
|
func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (*pb.ExecSyncResponse, error) {
|
||||||
|
logrus.Debugf("ExecSyncRequest %+v", req)
|
||||||
|
c, err := s.getContainerFromRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.runtime.UpdateStatus(c); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cState := s.runtime.ContainerStatus(c)
|
||||||
|
if !(cState.Status == oci.ContainerStateRunning || cState.Status == oci.ContainerStateCreated) {
|
||||||
|
return nil, fmt.Errorf("container is not created or running")
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := req.GetCmd()
|
||||||
|
if cmd == nil {
|
||||||
|
return nil, fmt.Errorf("exec command cannot be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
execResp, err := s.runtime.ExecSync(c, cmd, req.GetTimeout())
|
||||||
|
resp := &pb.ExecSyncResponse{
|
||||||
|
Stdout: execResp.Stdout,
|
||||||
|
Stderr: execResp.Stderr,
|
||||||
|
ExitCode: &execResp.ExitCode,
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("ExecSyncResponse: %+v", resp)
|
||||||
|
return resp, err
|
||||||
|
}
|
104
server/container_list.go
Normal file
104
server/container_list.go
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// filterContainer returns whether passed container matches filtering criteria
|
||||||
|
func filterContainer(c *pb.Container, filter *pb.ContainerFilter) bool {
|
||||||
|
if filter != nil {
|
||||||
|
if filter.State != nil {
|
||||||
|
if *c.State != *filter.State {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if filter.LabelSelector != nil {
|
||||||
|
sel := fields.SelectorFromSet(filter.LabelSelector)
|
||||||
|
if !sel.Matches(fields.Set(c.Labels)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListContainers lists all containers by filters.
|
||||||
|
func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) {
|
||||||
|
logrus.Debugf("ListContainersRequest %+v", req)
|
||||||
|
var ctrs []*pb.Container
|
||||||
|
filter := req.Filter
|
||||||
|
ctrList := s.state.containers.List()
|
||||||
|
|
||||||
|
// Filter using container id and pod id first.
|
||||||
|
if filter != nil {
|
||||||
|
if filter.Id != nil {
|
||||||
|
c := s.state.containers.Get(*filter.Id)
|
||||||
|
if c != nil {
|
||||||
|
if filter.PodSandboxId != nil {
|
||||||
|
if c.Sandbox() == *filter.PodSandboxId {
|
||||||
|
ctrList = []*oci.Container{c}
|
||||||
|
} else {
|
||||||
|
ctrList = []*oci.Container{}
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
ctrList = []*oci.Container{c}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if filter.PodSandboxId != nil {
|
||||||
|
pod := s.state.sandboxes[*filter.PodSandboxId]
|
||||||
|
if pod == nil {
|
||||||
|
ctrList = []*oci.Container{}
|
||||||
|
} else {
|
||||||
|
ctrList = pod.containers.List()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ctr := range ctrList {
|
||||||
|
if err := s.runtime.UpdateStatus(ctr); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
podSandboxID := ctr.Sandbox()
|
||||||
|
cState := s.runtime.ContainerStatus(ctr)
|
||||||
|
created := cState.Created.UnixNano()
|
||||||
|
rState := pb.ContainerState_CONTAINER_UNKNOWN
|
||||||
|
cID := ctr.ID()
|
||||||
|
|
||||||
|
c := &pb.Container{
|
||||||
|
Id: &cID,
|
||||||
|
PodSandboxId: &podSandboxID,
|
||||||
|
CreatedAt: int64Ptr(created),
|
||||||
|
Labels: ctr.Labels(),
|
||||||
|
Metadata: ctr.Metadata(),
|
||||||
|
}
|
||||||
|
|
||||||
|
switch cState.Status {
|
||||||
|
case oci.ContainerStateCreated:
|
||||||
|
rState = pb.ContainerState_CONTAINER_CREATED
|
||||||
|
case oci.ContainerStateRunning:
|
||||||
|
rState = pb.ContainerState_CONTAINER_RUNNING
|
||||||
|
case oci.ContainerStateStopped:
|
||||||
|
rState = pb.ContainerState_CONTAINER_EXITED
|
||||||
|
}
|
||||||
|
c.State = &rState
|
||||||
|
|
||||||
|
// Filter by other criteria such as state and labels.
|
||||||
|
if filterContainer(c, req.Filter) {
|
||||||
|
ctrs = append(ctrs, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.ListContainersResponse{
|
||||||
|
Containers: ctrs,
|
||||||
|
}
|
||||||
|
logrus.Debugf("ListContainersResponse: %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
11
server/container_portforward.go
Normal file
11
server/container_portforward.go
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox.
|
||||||
|
func (s *Server) PortForward(ctx context.Context, req *pb.PortForwardRequest) (*pb.PortForwardResponse, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
53
server/container_remove.go
Normal file
53
server/container_remove.go
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RemoveContainer removes the container. If the container is running, the container
|
||||||
|
// should be force removed.
|
||||||
|
func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) {
|
||||||
|
logrus.Debugf("RemoveContainerRequest %+v", req)
|
||||||
|
c, err := s.getContainerFromRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.runtime.UpdateStatus(c); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to update container state: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cState := s.runtime.ContainerStatus(c)
|
||||||
|
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
|
||||||
|
if err := s.runtime.StopContainer(c); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to stop container %s: %v", c.ID(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.runtime.DeleteContainer(c); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to delete container %s: %v", c.ID(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID())
|
||||||
|
if err := os.RemoveAll(containerDir); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.ID(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.releaseContainerName(c.Name())
|
||||||
|
s.removeContainer(c)
|
||||||
|
|
||||||
|
if err := s.ctrIDIndex.Delete(c.ID()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.RemoveContainerResponse{}
|
||||||
|
logrus.Debugf("RemoveContainerResponse: %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
26
server/container_start.go
Normal file
26
server/container_start.go
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StartContainer starts the container.
|
||||||
|
func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) {
|
||||||
|
logrus.Debugf("StartContainerRequest %+v", req)
|
||||||
|
c, err := s.getContainerFromRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.runtime.StartContainer(c); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.StartContainerResponse{}
|
||||||
|
logrus.Debugf("StartContainerResponse %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
59
server/container_status.go
Normal file
59
server/container_status.go
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ContainerStatus returns status of the container.
|
||||||
|
func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) {
|
||||||
|
logrus.Debugf("ContainerStatusRequest %+v", req)
|
||||||
|
c, err := s.getContainerFromRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.runtime.UpdateStatus(c); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
containerID := c.ID()
|
||||||
|
resp := &pb.ContainerStatusResponse{
|
||||||
|
Status: &pb.ContainerStatus{
|
||||||
|
Id: &containerID,
|
||||||
|
Metadata: c.Metadata(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cState := s.runtime.ContainerStatus(c)
|
||||||
|
rStatus := pb.ContainerState_CONTAINER_UNKNOWN
|
||||||
|
|
||||||
|
switch cState.Status {
|
||||||
|
case oci.ContainerStateCreated:
|
||||||
|
rStatus = pb.ContainerState_CONTAINER_CREATED
|
||||||
|
created := cState.Created.UnixNano()
|
||||||
|
resp.Status.CreatedAt = int64Ptr(created)
|
||||||
|
case oci.ContainerStateRunning:
|
||||||
|
rStatus = pb.ContainerState_CONTAINER_RUNNING
|
||||||
|
created := cState.Created.UnixNano()
|
||||||
|
resp.Status.CreatedAt = int64Ptr(created)
|
||||||
|
started := cState.Started.UnixNano()
|
||||||
|
resp.Status.StartedAt = int64Ptr(started)
|
||||||
|
case oci.ContainerStateStopped:
|
||||||
|
rStatus = pb.ContainerState_CONTAINER_EXITED
|
||||||
|
created := cState.Created.UnixNano()
|
||||||
|
resp.Status.CreatedAt = int64Ptr(created)
|
||||||
|
started := cState.Started.UnixNano()
|
||||||
|
resp.Status.StartedAt = int64Ptr(started)
|
||||||
|
finished := cState.Finished.UnixNano()
|
||||||
|
resp.Status.FinishedAt = int64Ptr(finished)
|
||||||
|
resp.Status.ExitCode = int32Ptr(cState.ExitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.Status.State = &rStatus
|
||||||
|
|
||||||
|
logrus.Debugf("ContainerStatusResponse: %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
26
server/container_stop.go
Normal file
26
server/container_stop.go
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StopContainer stops a running container with a grace period (i.e., timeout).
|
||||||
|
func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) {
|
||||||
|
logrus.Debugf("StopContainerRequest %+v", req)
|
||||||
|
c, err := s.getContainerFromRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.runtime.StopContainer(c); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to stop container %s: %v", c.ID(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.StopContainerResponse{}
|
||||||
|
logrus.Debugf("StopContainerResponse: %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
11
server/container_updateruntimeconfig.go
Normal file
11
server/container_updateruntimeconfig.go
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UpdateRuntimeConfig updates the configuration of a running container.
|
||||||
|
func (s *Server) UpdateRuntimeConfig(ctx context.Context, req *pb.UpdateRuntimeConfigRequest) (*pb.UpdateRuntimeConfigResponse, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
16
server/image_list.go
Normal file
16
server/image_list.go
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ListImages lists existing images.
|
||||||
|
func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) {
|
||||||
|
logrus.Debugf("ListImages: %+v", req)
|
||||||
|
// TODO
|
||||||
|
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
||||||
|
// and listing images.
|
||||||
|
return &pb.ListImagesResponse{}, nil
|
||||||
|
}
|
|
@ -14,24 +14,6 @@ import (
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ListImages lists existing images.
|
|
||||||
func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) {
|
|
||||||
logrus.Debugf("ListImages: %+v", req)
|
|
||||||
// TODO
|
|
||||||
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
|
||||||
// and listing images.
|
|
||||||
return &pb.ListImagesResponse{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageStatus returns the status of the image.
|
|
||||||
func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) {
|
|
||||||
logrus.Debugf("ImageStatus: %+v", req)
|
|
||||||
// TODO
|
|
||||||
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
|
||||||
// and getting the image status
|
|
||||||
return &pb.ImageStatusResponse{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PullImage pulls a image with authentication config.
|
// PullImage pulls a image with authentication config.
|
||||||
func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) {
|
func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) {
|
||||||
logrus.Debugf("PullImage: %+v", req)
|
logrus.Debugf("PullImage: %+v", req)
|
||||||
|
@ -98,9 +80,3 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P
|
||||||
|
|
||||||
return &pb.PullImageResponse{}, nil
|
return &pb.PullImageResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveImage removes the image.
|
|
||||||
func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) {
|
|
||||||
logrus.Debugf("RemoveImage: %+v", req)
|
|
||||||
return &pb.RemoveImageResponse{}, nil
|
|
||||||
}
|
|
13
server/image_remove.go
Normal file
13
server/image_remove.go
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RemoveImage removes the image.
|
||||||
|
func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) {
|
||||||
|
logrus.Debugf("RemoveImage: %+v", req)
|
||||||
|
return &pb.RemoveImageResponse{}, nil
|
||||||
|
}
|
16
server/image_status.go
Normal file
16
server/image_status.go
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImageStatus returns the status of the image.
|
||||||
|
func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) {
|
||||||
|
logrus.Debugf("ImageStatus: %+v", req)
|
||||||
|
// TODO
|
||||||
|
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
||||||
|
// and getting the image status
|
||||||
|
return &pb.ImageStatusResponse{}, nil
|
||||||
|
}
|
41
server/runtime_status.go
Normal file
41
server/runtime_status.go
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Status returns the status of the runtime
|
||||||
|
func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||||
|
|
||||||
|
// Deal with Runtime conditions
|
||||||
|
runtimeReady, err := s.runtime.RuntimeReady()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
networkReady, err := s.runtime.NetworkReady()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use vendored strings
|
||||||
|
runtimeReadyConditionString := pb.RuntimeReady
|
||||||
|
networkReadyConditionString := pb.NetworkReady
|
||||||
|
|
||||||
|
resp := &pb.StatusResponse{
|
||||||
|
Status: &pb.RuntimeStatus{
|
||||||
|
Conditions: []*pb.RuntimeCondition{
|
||||||
|
&pb.RuntimeCondition{
|
||||||
|
Type: &runtimeReadyConditionString,
|
||||||
|
Status: &runtimeReady,
|
||||||
|
},
|
||||||
|
&pb.RuntimeCondition{
|
||||||
|
Type: &networkReadyConditionString,
|
||||||
|
Status: &networkReady,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
|
@ -1,18 +1,10 @@
|
||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/docker/pkg/stringid"
|
"github.com/docker/docker/pkg/stringid"
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"github.com/kubernetes-incubator/cri-o/utils"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/label"
|
|
||||||
"github.com/opencontainers/runtime-tools/generate"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
@ -82,520 +74,3 @@ func (s *Server) getPodSandboxFromRequest(req podSandboxRequest) (*sandbox, erro
|
||||||
}
|
}
|
||||||
return sb, nil
|
return sb, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunPodSandbox creates and runs a pod-level sandbox.
|
|
||||||
func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest) (*pb.RunPodSandboxResponse, error) {
|
|
||||||
logrus.Debugf("RunPodSandboxRequest %+v", req)
|
|
||||||
var processLabel, mountLabel string
|
|
||||||
// process req.Name
|
|
||||||
name := req.GetConfig().GetMetadata().GetName()
|
|
||||||
if name == "" {
|
|
||||||
return nil, fmt.Errorf("PodSandboxConfig.Name should not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace := req.GetConfig().GetMetadata().GetNamespace()
|
|
||||||
attempt := req.GetConfig().GetMetadata().GetAttempt()
|
|
||||||
|
|
||||||
var err error
|
|
||||||
id, name, err := s.generatePodIDandName(name, namespace, attempt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
s.releasePodName(name)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err = s.podIDIndex.Add(id); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
if err = s.podIDIndex.Delete(id); err != nil {
|
|
||||||
logrus.Warnf("couldn't delete pod id %s from idIndex", id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
podSandboxDir := filepath.Join(s.config.SandboxDir, id)
|
|
||||||
if _, err = os.Stat(podSandboxDir); err == nil {
|
|
||||||
return nil, fmt.Errorf("pod sandbox (%s) already exists", podSandboxDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
if err2 := os.RemoveAll(podSandboxDir); err2 != nil {
|
|
||||||
logrus.Warnf("couldn't cleanup podSandboxDir %s: %v", podSandboxDir, err2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err = os.MkdirAll(podSandboxDir, 0755); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// creates a spec Generator with the default spec.
|
|
||||||
g := generate.New()
|
|
||||||
|
|
||||||
// TODO: Make the `graph/vfs` part of this configurable once the storage
|
|
||||||
// integration has been merged.
|
|
||||||
podInfraRootfs := filepath.Join(s.config.Root, "graph/vfs/pause")
|
|
||||||
// setup defaults for the pod sandbox
|
|
||||||
g.SetRootPath(filepath.Join(podInfraRootfs, "rootfs"))
|
|
||||||
g.SetRootReadonly(true)
|
|
||||||
g.SetProcessArgs([]string{"/pause"})
|
|
||||||
|
|
||||||
// set hostname
|
|
||||||
hostname := req.GetConfig().GetHostname()
|
|
||||||
if hostname != "" {
|
|
||||||
g.SetHostname(hostname)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set log directory
|
|
||||||
logDir := req.GetConfig().GetLogDirectory()
|
|
||||||
if logDir == "" {
|
|
||||||
logDir = filepath.Join(s.config.LogDir, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set DNS options
|
|
||||||
dnsServers := req.GetConfig().GetDnsConfig().GetServers()
|
|
||||||
dnsSearches := req.GetConfig().GetDnsConfig().GetSearches()
|
|
||||||
dnsOptions := req.GetConfig().GetDnsConfig().GetOptions()
|
|
||||||
resolvPath := fmt.Sprintf("%s/resolv.conf", podSandboxDir)
|
|
||||||
err = parseDNSOptions(dnsServers, dnsSearches, dnsOptions, resolvPath)
|
|
||||||
if err != nil {
|
|
||||||
err1 := removeFile(resolvPath)
|
|
||||||
if err1 != nil {
|
|
||||||
err = err1
|
|
||||||
return nil, fmt.Errorf("%v; failed to remove %s: %v", err, resolvPath, err1)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
g.AddBindMount(resolvPath, "/etc/resolv.conf", "ro")
|
|
||||||
|
|
||||||
// add metadata
|
|
||||||
metadata := req.GetConfig().GetMetadata()
|
|
||||||
metadataJSON, err := json.Marshal(metadata)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// add labels
|
|
||||||
labels := req.GetConfig().GetLabels()
|
|
||||||
labelsJSON, err := json.Marshal(labels)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// add annotations
|
|
||||||
annotations := req.GetConfig().GetAnnotations()
|
|
||||||
annotationsJSON, err := json.Marshal(annotations)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't use SELinux separation with Host Pid or IPC Namespace,
|
|
||||||
if !req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostPid() && !req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
|
||||||
processLabel, mountLabel, err = getSELinuxLabels(nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
g.SetProcessSelinuxLabel(processLabel)
|
|
||||||
}
|
|
||||||
|
|
||||||
containerID, containerName, err := s.generateContainerIDandName(name, "infra", 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
s.releaseContainerName(containerName)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err = s.ctrIDIndex.Add(containerID); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
if err = s.ctrIDIndex.Delete(containerID); err != nil {
|
|
||||||
logrus.Warnf("couldn't delete ctr id %s from idIndex", containerID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
g.AddAnnotation("ocid/metadata", string(metadataJSON))
|
|
||||||
g.AddAnnotation("ocid/labels", string(labelsJSON))
|
|
||||||
g.AddAnnotation("ocid/annotations", string(annotationsJSON))
|
|
||||||
g.AddAnnotation("ocid/log_path", logDir)
|
|
||||||
g.AddAnnotation("ocid/name", name)
|
|
||||||
g.AddAnnotation("ocid/container_name", containerName)
|
|
||||||
g.AddAnnotation("ocid/container_id", containerID)
|
|
||||||
|
|
||||||
sb := &sandbox{
|
|
||||||
id: id,
|
|
||||||
name: name,
|
|
||||||
logDir: logDir,
|
|
||||||
labels: labels,
|
|
||||||
annotations: annotations,
|
|
||||||
containers: oci.NewMemoryStore(),
|
|
||||||
processLabel: processLabel,
|
|
||||||
mountLabel: mountLabel,
|
|
||||||
metadata: metadata,
|
|
||||||
}
|
|
||||||
|
|
||||||
s.addSandbox(sb)
|
|
||||||
|
|
||||||
for k, v := range annotations {
|
|
||||||
g.AddAnnotation(k, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// setup cgroup settings
|
|
||||||
cgroupParent := req.GetConfig().GetLinux().GetCgroupParent()
|
|
||||||
if cgroupParent != "" {
|
|
||||||
g.SetLinuxCgroupsPath(cgroupParent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set up namespaces
|
|
||||||
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostNetwork() {
|
|
||||||
err = g.RemoveLinuxNamespace("network")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostPid() {
|
|
||||||
err = g.RemoveLinuxNamespace("pid")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
|
||||||
err = g.RemoveLinuxNamespace("ipc")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = g.SaveToFile(filepath.Join(podSandboxDir, "config.json"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = os.Stat(podInfraRootfs); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
// TODO: Replace by rootfs creation API when it is ready
|
|
||||||
if err = utils.CreateInfraRootfs(podInfraRootfs, s.config.Pause); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
container, err := oci.NewContainer(containerID, containerName, podSandboxDir, podSandboxDir, labels, nil, id, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sb.infraContainer = container
|
|
||||||
|
|
||||||
if err = s.runtime.CreateContainer(container); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = s.runtime.UpdateStatus(container); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// setup the network
|
|
||||||
podNamespace := ""
|
|
||||||
netnsPath, err := container.NetNsPath()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = s.netPlugin.SetUpPod(netnsPath, podNamespace, id, containerName); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create network for container %s in sandbox %s: %v", containerName, id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = s.runtime.StartContainer(container); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = s.runtime.UpdateStatus(container); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.RunPodSandboxResponse{PodSandboxId: &id}
|
|
||||||
logrus.Debugf("RunPodSandboxResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopPodSandbox stops the sandbox. If there are any running containers in the
|
|
||||||
// sandbox, they should be force terminated.
|
|
||||||
func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) {
|
|
||||||
logrus.Debugf("StopPodSandboxRequest %+v", req)
|
|
||||||
sb, err := s.getPodSandboxFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
podNamespace := ""
|
|
||||||
podInfraContainer := sb.infraContainer
|
|
||||||
netnsPath, err := podInfraContainer.NetNsPath()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.netPlugin.TearDownPod(netnsPath, podNamespace, sb.id, podInfraContainer.Name()); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to destroy network for container %s in sandbox %s: %v",
|
|
||||||
podInfraContainer.Name(), sb.id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
containers := sb.containers.List()
|
|
||||||
containers = append(containers, podInfraContainer)
|
|
||||||
|
|
||||||
for _, c := range containers {
|
|
||||||
cStatus := s.runtime.ContainerStatus(c)
|
|
||||||
if cStatus.Status != oci.ContainerStateStopped {
|
|
||||||
if err := s.runtime.StopContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to stop container %s in sandbox %s: %v", c.Name(), sb.id, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.StopPodSandboxResponse{}
|
|
||||||
logrus.Debugf("StopPodSandboxResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemovePodSandbox deletes the sandbox. If there are any running containers in the
|
|
||||||
// sandbox, they should be force deleted.
|
|
||||||
func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) {
|
|
||||||
logrus.Debugf("RemovePodSandboxRequest %+v", req)
|
|
||||||
sb, err := s.getPodSandboxFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
podInfraContainer := sb.infraContainer
|
|
||||||
containers := sb.containers.List()
|
|
||||||
containers = append(containers, podInfraContainer)
|
|
||||||
|
|
||||||
// Delete all the containers in the sandbox
|
|
||||||
for _, c := range containers {
|
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to update container state: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(c)
|
|
||||||
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
|
|
||||||
if err := s.runtime.StopContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to stop container %s: %v", c.Name(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.DeleteContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to delete container %s in sandbox %s: %v", c.Name(), sb.id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c == podInfraContainer {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID())
|
|
||||||
if err := os.RemoveAll(containerDir); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.Name(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.releaseContainerName(c.Name())
|
|
||||||
s.removeContainer(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := label.UnreserveLabel(sb.processLabel); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the files related to the sandbox
|
|
||||||
podSandboxDir := filepath.Join(s.config.SandboxDir, sb.id)
|
|
||||||
if err := os.RemoveAll(podSandboxDir); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to remove sandbox %s directory: %v", sb.id, err)
|
|
||||||
}
|
|
||||||
s.releaseContainerName(podInfraContainer.Name())
|
|
||||||
s.removeContainer(podInfraContainer)
|
|
||||||
sb.infraContainer = nil
|
|
||||||
|
|
||||||
s.releasePodName(sb.name)
|
|
||||||
s.removeSandbox(sb.id)
|
|
||||||
|
|
||||||
resp := &pb.RemovePodSandboxResponse{}
|
|
||||||
logrus.Debugf("RemovePodSandboxResponse %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PodSandboxStatus returns the Status of the PodSandbox.
|
|
||||||
func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) {
|
|
||||||
logrus.Debugf("PodSandboxStatusRequest %+v", req)
|
|
||||||
sb, err := s.getPodSandboxFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
podInfraContainer := sb.infraContainer
|
|
||||||
if err = s.runtime.UpdateStatus(podInfraContainer); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(podInfraContainer)
|
|
||||||
created := cState.Created.UnixNano()
|
|
||||||
|
|
||||||
netNsPath, err := podInfraContainer.NetNsPath()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
podNamespace := ""
|
|
||||||
ip, err := s.netPlugin.GetContainerNetworkStatus(netNsPath, podNamespace, sb.id, podInfraContainer.Name())
|
|
||||||
if err != nil {
|
|
||||||
// ignore the error on network status
|
|
||||||
ip = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
rStatus := pb.PodSandboxState_SANDBOX_NOTREADY
|
|
||||||
if cState.Status == oci.ContainerStateRunning {
|
|
||||||
rStatus = pb.PodSandboxState_SANDBOX_READY
|
|
||||||
}
|
|
||||||
|
|
||||||
sandboxID := sb.id
|
|
||||||
resp := &pb.PodSandboxStatusResponse{
|
|
||||||
Status: &pb.PodSandboxStatus{
|
|
||||||
Id: &sandboxID,
|
|
||||||
CreatedAt: int64Ptr(created),
|
|
||||||
Linux: &pb.LinuxPodSandboxStatus{
|
|
||||||
Namespaces: &pb.Namespace{
|
|
||||||
Network: sPtr(netNsPath),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Network: &pb.PodSandboxNetworkStatus{Ip: &ip},
|
|
||||||
State: &rStatus,
|
|
||||||
Labels: sb.labels,
|
|
||||||
Annotations: sb.annotations,
|
|
||||||
Metadata: sb.metadata,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Infof("PodSandboxStatusResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterSandbox returns whether passed container matches filtering criteria
|
|
||||||
func filterSandbox(p *pb.PodSandbox, filter *pb.PodSandboxFilter) bool {
|
|
||||||
if filter != nil {
|
|
||||||
if filter.State != nil {
|
|
||||||
if *p.State != *filter.State {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if filter.LabelSelector != nil {
|
|
||||||
sel := fields.SelectorFromSet(filter.LabelSelector)
|
|
||||||
if !sel.Matches(fields.Set(p.Labels)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListPodSandbox returns a list of SandBoxes.
|
|
||||||
func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) {
|
|
||||||
logrus.Debugf("ListPodSandboxRequest %+v", req)
|
|
||||||
var pods []*pb.PodSandbox
|
|
||||||
var podList []*sandbox
|
|
||||||
for _, sb := range s.state.sandboxes {
|
|
||||||
podList = append(podList, sb)
|
|
||||||
}
|
|
||||||
|
|
||||||
filter := req.Filter
|
|
||||||
// Filter by pod id first.
|
|
||||||
if filter != nil {
|
|
||||||
if filter.Id != nil {
|
|
||||||
sb := s.getSandbox(*filter.Id)
|
|
||||||
if sb == nil {
|
|
||||||
podList = []*sandbox{}
|
|
||||||
} else {
|
|
||||||
podList = []*sandbox{sb}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, sb := range podList {
|
|
||||||
podInfraContainer := sb.infraContainer
|
|
||||||
if podInfraContainer == nil {
|
|
||||||
// this can't really happen, but if it does because of a bug
|
|
||||||
// it's better not to panic
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := s.runtime.UpdateStatus(podInfraContainer); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cState := s.runtime.ContainerStatus(podInfraContainer)
|
|
||||||
created := cState.Created.UnixNano()
|
|
||||||
rStatus := pb.PodSandboxState_SANDBOX_NOTREADY
|
|
||||||
if cState.Status == oci.ContainerStateRunning {
|
|
||||||
rStatus = pb.PodSandboxState_SANDBOX_READY
|
|
||||||
}
|
|
||||||
|
|
||||||
pod := &pb.PodSandbox{
|
|
||||||
Id: &sb.id,
|
|
||||||
CreatedAt: int64Ptr(created),
|
|
||||||
State: &rStatus,
|
|
||||||
Labels: sb.labels,
|
|
||||||
Annotations: sb.annotations,
|
|
||||||
Metadata: sb.metadata,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter by other criteria such as state and labels.
|
|
||||||
if filterSandbox(pod, req.Filter) {
|
|
||||||
pods = append(pods, pod)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.ListPodSandboxResponse{
|
|
||||||
Items: pods,
|
|
||||||
}
|
|
||||||
logrus.Debugf("ListPodSandboxResponse %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mountLabel string, err error) {
|
|
||||||
processLabel = ""
|
|
||||||
if selinuxOptions != nil {
|
|
||||||
user := selinuxOptions.GetUser()
|
|
||||||
if user == "" {
|
|
||||||
return "", "", fmt.Errorf("SELinuxOption.User is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
role := selinuxOptions.GetRole()
|
|
||||||
if role == "" {
|
|
||||||
return "", "", fmt.Errorf("SELinuxOption.Role is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
t := selinuxOptions.GetType()
|
|
||||||
if t == "" {
|
|
||||||
return "", "", fmt.Errorf("SELinuxOption.Type is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
level := selinuxOptions.GetLevel()
|
|
||||||
if level == "" {
|
|
||||||
return "", "", fmt.Errorf("SELinuxOption.Level is empty")
|
|
||||||
}
|
|
||||||
processLabel = fmt.Sprintf("%s:%s:%s:%s", user, role, t, level)
|
|
||||||
}
|
|
||||||
return label.InitLabels(label.DupSecOpt(processLabel))
|
|
||||||
}
|
|
||||||
|
|
88
server/sandbox_list.go
Normal file
88
server/sandbox_list.go
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// filterSandbox returns whether passed container matches filtering criteria
|
||||||
|
func filterSandbox(p *pb.PodSandbox, filter *pb.PodSandboxFilter) bool {
|
||||||
|
if filter != nil {
|
||||||
|
if filter.State != nil {
|
||||||
|
if *p.State != *filter.State {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if filter.LabelSelector != nil {
|
||||||
|
sel := fields.SelectorFromSet(filter.LabelSelector)
|
||||||
|
if !sel.Matches(fields.Set(p.Labels)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListPodSandbox returns a list of SandBoxes.
|
||||||
|
func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) {
|
||||||
|
logrus.Debugf("ListPodSandboxRequest %+v", req)
|
||||||
|
var pods []*pb.PodSandbox
|
||||||
|
var podList []*sandbox
|
||||||
|
for _, sb := range s.state.sandboxes {
|
||||||
|
podList = append(podList, sb)
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := req.Filter
|
||||||
|
// Filter by pod id first.
|
||||||
|
if filter != nil {
|
||||||
|
if filter.Id != nil {
|
||||||
|
sb := s.getSandbox(*filter.Id)
|
||||||
|
if sb == nil {
|
||||||
|
podList = []*sandbox{}
|
||||||
|
} else {
|
||||||
|
podList = []*sandbox{sb}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sb := range podList {
|
||||||
|
podInfraContainer := sb.infraContainer
|
||||||
|
if podInfraContainer == nil {
|
||||||
|
// this can't really happen, but if it does because of a bug
|
||||||
|
// it's better not to panic
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := s.runtime.UpdateStatus(podInfraContainer); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cState := s.runtime.ContainerStatus(podInfraContainer)
|
||||||
|
created := cState.Created.UnixNano()
|
||||||
|
rStatus := pb.PodSandboxState_SANDBOX_NOTREADY
|
||||||
|
if cState.Status == oci.ContainerStateRunning {
|
||||||
|
rStatus = pb.PodSandboxState_SANDBOX_READY
|
||||||
|
}
|
||||||
|
|
||||||
|
pod := &pb.PodSandbox{
|
||||||
|
Id: &sb.id,
|
||||||
|
CreatedAt: int64Ptr(created),
|
||||||
|
State: &rStatus,
|
||||||
|
Labels: sb.labels,
|
||||||
|
Annotations: sb.annotations,
|
||||||
|
Metadata: sb.metadata,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter by other criteria such as state and labels.
|
||||||
|
if filterSandbox(pod, req.Filter) {
|
||||||
|
pods = append(pods, pod)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.ListPodSandboxResponse{
|
||||||
|
Items: pods,
|
||||||
|
}
|
||||||
|
logrus.Debugf("ListPodSandboxResponse %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
77
server/sandbox_remove.go
Normal file
77
server/sandbox_remove.go
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/label"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RemovePodSandbox deletes the sandbox. If there are any running containers in the
|
||||||
|
// sandbox, they should be force deleted.
|
||||||
|
func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) {
|
||||||
|
logrus.Debugf("RemovePodSandboxRequest %+v", req)
|
||||||
|
sb, err := s.getPodSandboxFromRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
podInfraContainer := sb.infraContainer
|
||||||
|
containers := sb.containers.List()
|
||||||
|
containers = append(containers, podInfraContainer)
|
||||||
|
|
||||||
|
// Delete all the containers in the sandbox
|
||||||
|
for _, c := range containers {
|
||||||
|
if err := s.runtime.UpdateStatus(c); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to update container state: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cState := s.runtime.ContainerStatus(c)
|
||||||
|
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
|
||||||
|
if err := s.runtime.StopContainer(c); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to stop container %s: %v", c.Name(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.runtime.DeleteContainer(c); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to delete container %s in sandbox %s: %v", c.Name(), sb.id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c == podInfraContainer {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID())
|
||||||
|
if err := os.RemoveAll(containerDir); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.Name(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.releaseContainerName(c.Name())
|
||||||
|
s.removeContainer(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := label.UnreserveLabel(sb.processLabel); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the files related to the sandbox
|
||||||
|
podSandboxDir := filepath.Join(s.config.SandboxDir, sb.id)
|
||||||
|
if err := os.RemoveAll(podSandboxDir); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to remove sandbox %s directory: %v", sb.id, err)
|
||||||
|
}
|
||||||
|
s.releaseContainerName(podInfraContainer.Name())
|
||||||
|
s.removeContainer(podInfraContainer)
|
||||||
|
sb.infraContainer = nil
|
||||||
|
|
||||||
|
s.releasePodName(sb.name)
|
||||||
|
s.removeSandbox(sb.id)
|
||||||
|
|
||||||
|
resp := &pb.RemovePodSandboxResponse{}
|
||||||
|
logrus.Debugf("RemovePodSandboxResponse %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
298
server/sandbox_run.go
Normal file
298
server/sandbox_run.go
Normal file
|
@ -0,0 +1,298 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/utils"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/label"
|
||||||
|
"github.com/opencontainers/runtime-tools/generate"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RunPodSandbox creates and runs a pod-level sandbox.
|
||||||
|
func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest) (*pb.RunPodSandboxResponse, error) {
|
||||||
|
logrus.Debugf("RunPodSandboxRequest %+v", req)
|
||||||
|
var processLabel, mountLabel string
|
||||||
|
// process req.Name
|
||||||
|
name := req.GetConfig().GetMetadata().GetName()
|
||||||
|
if name == "" {
|
||||||
|
return nil, fmt.Errorf("PodSandboxConfig.Name should not be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace := req.GetConfig().GetMetadata().GetNamespace()
|
||||||
|
attempt := req.GetConfig().GetMetadata().GetAttempt()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
id, name, err := s.generatePodIDandName(name, namespace, attempt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
s.releasePodName(name)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err = s.podIDIndex.Add(id); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
if err = s.podIDIndex.Delete(id); err != nil {
|
||||||
|
logrus.Warnf("couldn't delete pod id %s from idIndex", id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
podSandboxDir := filepath.Join(s.config.SandboxDir, id)
|
||||||
|
if _, err = os.Stat(podSandboxDir); err == nil {
|
||||||
|
return nil, fmt.Errorf("pod sandbox (%s) already exists", podSandboxDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
if err2 := os.RemoveAll(podSandboxDir); err2 != nil {
|
||||||
|
logrus.Warnf("couldn't cleanup podSandboxDir %s: %v", podSandboxDir, err2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err = os.MkdirAll(podSandboxDir, 0755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// creates a spec Generator with the default spec.
|
||||||
|
g := generate.New()
|
||||||
|
|
||||||
|
// TODO: Make the `graph/vfs` part of this configurable once the storage
|
||||||
|
// integration has been merged.
|
||||||
|
podInfraRootfs := filepath.Join(s.config.Root, "graph/vfs/pause")
|
||||||
|
// setup defaults for the pod sandbox
|
||||||
|
g.SetRootPath(filepath.Join(podInfraRootfs, "rootfs"))
|
||||||
|
g.SetRootReadonly(true)
|
||||||
|
g.SetProcessArgs([]string{"/pause"})
|
||||||
|
|
||||||
|
// set hostname
|
||||||
|
hostname := req.GetConfig().GetHostname()
|
||||||
|
if hostname != "" {
|
||||||
|
g.SetHostname(hostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// set log directory
|
||||||
|
logDir := req.GetConfig().GetLogDirectory()
|
||||||
|
if logDir == "" {
|
||||||
|
logDir = filepath.Join(s.config.LogDir, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// set DNS options
|
||||||
|
dnsServers := req.GetConfig().GetDnsConfig().GetServers()
|
||||||
|
dnsSearches := req.GetConfig().GetDnsConfig().GetSearches()
|
||||||
|
dnsOptions := req.GetConfig().GetDnsConfig().GetOptions()
|
||||||
|
resolvPath := fmt.Sprintf("%s/resolv.conf", podSandboxDir)
|
||||||
|
err = parseDNSOptions(dnsServers, dnsSearches, dnsOptions, resolvPath)
|
||||||
|
if err != nil {
|
||||||
|
err1 := removeFile(resolvPath)
|
||||||
|
if err1 != nil {
|
||||||
|
err = err1
|
||||||
|
return nil, fmt.Errorf("%v; failed to remove %s: %v", err, resolvPath, err1)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
g.AddBindMount(resolvPath, "/etc/resolv.conf", "ro")
|
||||||
|
|
||||||
|
// add metadata
|
||||||
|
metadata := req.GetConfig().GetMetadata()
|
||||||
|
metadataJSON, err := json.Marshal(metadata)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// add labels
|
||||||
|
labels := req.GetConfig().GetLabels()
|
||||||
|
labelsJSON, err := json.Marshal(labels)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// add annotations
|
||||||
|
annotations := req.GetConfig().GetAnnotations()
|
||||||
|
annotationsJSON, err := json.Marshal(annotations)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't use SELinux separation with Host Pid or IPC Namespace,
|
||||||
|
if !req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostPid() && !req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
||||||
|
processLabel, mountLabel, err = getSELinuxLabels(nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
g.SetProcessSelinuxLabel(processLabel)
|
||||||
|
}
|
||||||
|
|
||||||
|
containerID, containerName, err := s.generateContainerIDandName(name, "infra", 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
s.releaseContainerName(containerName)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err = s.ctrIDIndex.Add(containerID); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
if err = s.ctrIDIndex.Delete(containerID); err != nil {
|
||||||
|
logrus.Warnf("couldn't delete ctr id %s from idIndex", containerID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
g.AddAnnotation("ocid/metadata", string(metadataJSON))
|
||||||
|
g.AddAnnotation("ocid/labels", string(labelsJSON))
|
||||||
|
g.AddAnnotation("ocid/annotations", string(annotationsJSON))
|
||||||
|
g.AddAnnotation("ocid/log_path", logDir)
|
||||||
|
g.AddAnnotation("ocid/name", name)
|
||||||
|
g.AddAnnotation("ocid/container_name", containerName)
|
||||||
|
g.AddAnnotation("ocid/container_id", containerID)
|
||||||
|
|
||||||
|
sb := &sandbox{
|
||||||
|
id: id,
|
||||||
|
name: name,
|
||||||
|
logDir: logDir,
|
||||||
|
labels: labels,
|
||||||
|
annotations: annotations,
|
||||||
|
containers: oci.NewMemoryStore(),
|
||||||
|
processLabel: processLabel,
|
||||||
|
mountLabel: mountLabel,
|
||||||
|
metadata: metadata,
|
||||||
|
}
|
||||||
|
|
||||||
|
s.addSandbox(sb)
|
||||||
|
|
||||||
|
for k, v := range annotations {
|
||||||
|
g.AddAnnotation(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup cgroup settings
|
||||||
|
cgroupParent := req.GetConfig().GetLinux().GetCgroupParent()
|
||||||
|
if cgroupParent != "" {
|
||||||
|
g.SetLinuxCgroupsPath(cgroupParent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// set up namespaces
|
||||||
|
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostNetwork() {
|
||||||
|
err = g.RemoveLinuxNamespace("network")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostPid() {
|
||||||
|
err = g.RemoveLinuxNamespace("pid")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
||||||
|
err = g.RemoveLinuxNamespace("ipc")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = g.SaveToFile(filepath.Join(podSandboxDir, "config.json"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = os.Stat(podInfraRootfs); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
// TODO: Replace by rootfs creation API when it is ready
|
||||||
|
if err = utils.CreateInfraRootfs(podInfraRootfs, s.config.Pause); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
container, err := oci.NewContainer(containerID, containerName, podSandboxDir, podSandboxDir, labels, nil, id, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.infraContainer = container
|
||||||
|
|
||||||
|
if err = s.runtime.CreateContainer(container); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = s.runtime.UpdateStatus(container); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup the network
|
||||||
|
podNamespace := ""
|
||||||
|
netnsPath, err := container.NetNsPath()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = s.netPlugin.SetUpPod(netnsPath, podNamespace, id, containerName); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create network for container %s in sandbox %s: %v", containerName, id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = s.runtime.StartContainer(container); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = s.runtime.UpdateStatus(container); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.RunPodSandboxResponse{PodSandboxId: &id}
|
||||||
|
logrus.Debugf("RunPodSandboxResponse: %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mountLabel string, err error) {
|
||||||
|
processLabel = ""
|
||||||
|
if selinuxOptions != nil {
|
||||||
|
user := selinuxOptions.GetUser()
|
||||||
|
if user == "" {
|
||||||
|
return "", "", fmt.Errorf("SELinuxOption.User is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
role := selinuxOptions.GetRole()
|
||||||
|
if role == "" {
|
||||||
|
return "", "", fmt.Errorf("SELinuxOption.Role is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
t := selinuxOptions.GetType()
|
||||||
|
if t == "" {
|
||||||
|
return "", "", fmt.Errorf("SELinuxOption.Type is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
level := selinuxOptions.GetLevel()
|
||||||
|
if level == "" {
|
||||||
|
return "", "", fmt.Errorf("SELinuxOption.Level is empty")
|
||||||
|
}
|
||||||
|
processLabel = fmt.Sprintf("%s:%s:%s:%s", user, role, t, level)
|
||||||
|
}
|
||||||
|
return label.InitLabels(label.DupSecOpt(processLabel))
|
||||||
|
}
|
62
server/sandbox_status.go
Normal file
62
server/sandbox_status.go
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PodSandboxStatus returns the Status of the PodSandbox.
|
||||||
|
func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) {
|
||||||
|
logrus.Debugf("PodSandboxStatusRequest %+v", req)
|
||||||
|
sb, err := s.getPodSandboxFromRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
podInfraContainer := sb.infraContainer
|
||||||
|
if err = s.runtime.UpdateStatus(podInfraContainer); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cState := s.runtime.ContainerStatus(podInfraContainer)
|
||||||
|
created := cState.Created.UnixNano()
|
||||||
|
|
||||||
|
netNsPath, err := podInfraContainer.NetNsPath()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
podNamespace := ""
|
||||||
|
ip, err := s.netPlugin.GetContainerNetworkStatus(netNsPath, podNamespace, sb.id, podInfraContainer.Name())
|
||||||
|
if err != nil {
|
||||||
|
// ignore the error on network status
|
||||||
|
ip = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
rStatus := pb.PodSandboxState_SANDBOX_NOTREADY
|
||||||
|
if cState.Status == oci.ContainerStateRunning {
|
||||||
|
rStatus = pb.PodSandboxState_SANDBOX_READY
|
||||||
|
}
|
||||||
|
|
||||||
|
sandboxID := sb.id
|
||||||
|
resp := &pb.PodSandboxStatusResponse{
|
||||||
|
Status: &pb.PodSandboxStatus{
|
||||||
|
Id: &sandboxID,
|
||||||
|
CreatedAt: int64Ptr(created),
|
||||||
|
Linux: &pb.LinuxPodSandboxStatus{
|
||||||
|
Namespaces: &pb.Namespace{
|
||||||
|
Network: sPtr(netNsPath),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Network: &pb.PodSandboxNetworkStatus{Ip: &ip},
|
||||||
|
State: &rStatus,
|
||||||
|
Labels: sb.labels,
|
||||||
|
Annotations: sb.annotations,
|
||||||
|
Metadata: sb.metadata,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Infof("PodSandboxStatusResponse: %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
48
server/sandbox_stop.go
Normal file
48
server/sandbox_stop.go
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StopPodSandbox stops the sandbox. If there are any running containers in the
|
||||||
|
// sandbox, they should be force terminated.
|
||||||
|
func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) {
|
||||||
|
logrus.Debugf("StopPodSandboxRequest %+v", req)
|
||||||
|
sb, err := s.getPodSandboxFromRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
podNamespace := ""
|
||||||
|
podInfraContainer := sb.infraContainer
|
||||||
|
netnsPath, err := podInfraContainer.NetNsPath()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.netPlugin.TearDownPod(netnsPath, podNamespace, sb.id, podInfraContainer.Name()); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to destroy network for container %s in sandbox %s: %v",
|
||||||
|
podInfraContainer.Name(), sb.id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
containers := sb.containers.List()
|
||||||
|
containers = append(containers, podInfraContainer)
|
||||||
|
|
||||||
|
for _, c := range containers {
|
||||||
|
cStatus := s.runtime.ContainerStatus(c)
|
||||||
|
if cStatus.Status != oci.ContainerStateStopped {
|
||||||
|
if err := s.runtime.StopContainer(c); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to stop container %s in sandbox %s: %v", c.Name(), sb.id, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.StopPodSandboxResponse{}
|
||||||
|
logrus.Debugf("StopPodSandboxResponse: %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue