2016-11-22 16:49:54 +00:00
|
|
|
package server
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"path/filepath"
|
2016-11-23 09:41:48 +00:00
|
|
|
"strings"
|
2016-11-22 16:49:54 +00:00
|
|
|
"syscall"
|
|
|
|
|
|
|
|
"github.com/Sirupsen/logrus"
|
|
|
|
"github.com/docker/docker/pkg/stringid"
|
|
|
|
"github.com/kubernetes-incubator/cri-o/oci"
|
2016-11-29 12:34:15 +00:00
|
|
|
"github.com/kubernetes-incubator/cri-o/server/apparmor"
|
2016-11-23 09:41:48 +00:00
|
|
|
"github.com/kubernetes-incubator/cri-o/server/seccomp"
|
2017-03-16 09:59:41 +00:00
|
|
|
"github.com/opencontainers/image-spec/specs-go/v1"
|
2016-11-22 16:49:54 +00:00
|
|
|
"github.com/opencontainers/runtime-tools/generate"
|
2017-03-22 17:58:35 +00:00
|
|
|
"github.com/opencontainers/selinux/go-selinux/label"
|
2016-11-22 16:49:54 +00:00
|
|
|
"golang.org/x/net/context"
|
|
|
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
|
|
)
|
|
|
|
|
2016-11-23 09:41:48 +00:00
|
|
|
const (
|
|
|
|
seccompUnconfined = "unconfined"
|
|
|
|
seccompRuntimeDefault = "runtime/default"
|
|
|
|
seccompLocalhostPrefix = "localhost/"
|
|
|
|
)
|
|
|
|
|
2017-02-22 18:42:44 +00:00
|
|
|
func addOciBindMounts(sb *sandbox, containerConfig *pb.ContainerConfig, specgen *generate.Generator) error {
|
|
|
|
mounts := containerConfig.GetMounts()
|
|
|
|
for _, mount := range mounts {
|
|
|
|
dest := mount.ContainerPath
|
|
|
|
if dest == "" {
|
|
|
|
return fmt.Errorf("Mount.ContainerPath is empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
src := mount.HostPath
|
|
|
|
if src == "" {
|
|
|
|
return fmt.Errorf("Mount.HostPath is empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
options := []string{"rw"}
|
|
|
|
if mount.Readonly {
|
|
|
|
options = []string{"ro"}
|
|
|
|
}
|
|
|
|
|
|
|
|
if mount.SelinuxRelabel {
|
|
|
|
// Need a way in kubernetes to determine if the volume is shared or private
|
|
|
|
if err := label.Relabel(src, sb.mountLabel, true); err != nil && err != syscall.ENOTSUP {
|
|
|
|
return fmt.Errorf("relabel failed %s: %v", src, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
specgen.AddBindMount(src, dest, options)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-16 09:59:41 +00:00
|
|
|
// buildOCIProcessArgs build an OCI compatible process arguments slice.
|
|
|
|
func buildOCIProcessArgs(containerKubeConfig *pb.ContainerConfig, imageOCIConfig *v1.Image) ([]string, error) {
|
|
|
|
processArgs := []string{}
|
2017-03-23 11:59:26 +00:00
|
|
|
var processEntryPoint, processCmd []string
|
2017-03-16 09:59:41 +00:00
|
|
|
|
|
|
|
kubeCommands := containerKubeConfig.Command
|
|
|
|
kubeArgs := containerKubeConfig.Args
|
|
|
|
|
|
|
|
if imageOCIConfig == nil {
|
|
|
|
// HACK We should error out here, not being able to get an Image config is fatal.
|
|
|
|
// When https://github.com/kubernetes-incubator/cri-o/issues/395 is fixed
|
|
|
|
// we'll remove that one and return an error here.
|
|
|
|
if containerKubeConfig.Metadata != nil {
|
|
|
|
logrus.Errorf("empty image config for %s", containerKubeConfig.Metadata.Name)
|
|
|
|
|
|
|
|
// HACK until https://github.com/kubernetes-incubator/cri-o/issues/395 is fixed.
|
|
|
|
// If the container is kubeadm's dummy, imageOCIConfig is nil, and both
|
|
|
|
// kubeCommands and kubeArgs are empty. So we set processArgs to /pause as the
|
|
|
|
// dummy container is just a pause one.
|
|
|
|
// (See https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/master/templates.go)
|
|
|
|
if containerKubeConfig.Metadata.Name == "dummy" {
|
|
|
|
return []string{podInfraCommand}, nil
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
logrus.Errorf("empty image config for %s", containerKubeConfig.Image.Image)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We got an OCI Image configuration.
|
|
|
|
// We will only use it if the kubelet information is incomplete.
|
2017-03-23 11:59:26 +00:00
|
|
|
|
|
|
|
// First we set the process entry point.
|
|
|
|
if kubeCommands != nil {
|
|
|
|
// The kubelet command slice is prioritized.
|
|
|
|
processEntryPoint = kubeCommands
|
2017-03-16 09:59:41 +00:00
|
|
|
} else {
|
2017-03-23 11:59:26 +00:00
|
|
|
// Here the kubelet command slice is empty.
|
|
|
|
if imageOCIConfig != nil {
|
|
|
|
// If the OCI image config has an ENTRYPOINT we
|
|
|
|
// use it as our process command.
|
|
|
|
// Otherwise we use the CMD slice if it's not
|
|
|
|
// empty.
|
|
|
|
if imageOCIConfig.Config.Entrypoint != nil {
|
|
|
|
processEntryPoint = imageOCIConfig.Config.Entrypoint
|
|
|
|
} else if imageOCIConfig.Config.Cmd != nil {
|
|
|
|
processEntryPoint = imageOCIConfig.Config.Cmd
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// We neither have a kubelet command not an image OCI config.
|
|
|
|
// Missing an image OCI config will no longer be supported after
|
|
|
|
// https://github.com/kubernetes-incubator/cri-o/issues/395 is fixed.
|
|
|
|
processEntryPoint = []string{"/bin/sh", "-c"}
|
|
|
|
}
|
2017-03-16 09:59:41 +00:00
|
|
|
}
|
|
|
|
|
2017-03-23 11:59:26 +00:00
|
|
|
// Then we build the process command arguments
|
|
|
|
if kubeArgs != nil {
|
|
|
|
// The kubelet command arguments slice is prioritized.
|
|
|
|
processCmd = kubeArgs
|
2017-03-16 09:59:41 +00:00
|
|
|
} else {
|
2017-03-23 11:59:26 +00:00
|
|
|
if kubeCommands != nil {
|
|
|
|
// kubelet gave us a command slice but explicitely
|
|
|
|
// left the arguments slice empty. We should keep
|
|
|
|
// it that way.
|
|
|
|
processCmd = []string{}
|
|
|
|
} else {
|
|
|
|
// Here kubelet kept both the command and arguments
|
|
|
|
// slices empty. We should try building the process
|
|
|
|
// arguments slice from the OCI image config.
|
|
|
|
// If the OCI image config has an ENTRYPOINT slice,
|
|
|
|
// we use the CMD slice as the process arguments.
|
|
|
|
// Otherwise, we already picked CMD as our process
|
|
|
|
// command and we must not add the CMD slice twice.
|
|
|
|
if imageOCIConfig != nil {
|
|
|
|
if imageOCIConfig.Config.Entrypoint != nil {
|
|
|
|
processCmd = imageOCIConfig.Config.Cmd
|
|
|
|
} else {
|
|
|
|
processCmd = []string{}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Missing an image OCI config will no longer
|
|
|
|
// be supported after https://github.com/kubernetes-incubator/cri-o/issues/395
|
|
|
|
// is fixed.
|
|
|
|
processCmd = []string{}
|
|
|
|
}
|
|
|
|
}
|
2017-03-16 09:59:41 +00:00
|
|
|
}
|
|
|
|
|
2017-03-23 11:59:26 +00:00
|
|
|
processArgs = append(processArgs, processEntryPoint...)
|
|
|
|
processArgs = append(processArgs, processCmd...)
|
2017-03-16 09:59:41 +00:00
|
|
|
|
|
|
|
logrus.Debugf("OCI process args %v", processArgs)
|
|
|
|
|
|
|
|
return processArgs, nil
|
|
|
|
}
|
|
|
|
|
2016-11-22 16:49:54 +00:00
|
|
|
// CreateContainer creates a new container in specified PodSandbox
|
|
|
|
func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) {
|
|
|
|
logrus.Debugf("CreateContainerRequest %+v", req)
|
2016-10-18 14:48:33 +00:00
|
|
|
s.Update()
|
2017-02-03 14:41:28 +00:00
|
|
|
sbID := req.PodSandboxId
|
2016-11-22 16:49:54 +00:00
|
|
|
if sbID == "" {
|
|
|
|
return nil, fmt.Errorf("PodSandboxId should not be empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
sandboxID, err := s.podIDIndex.Get(sbID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("PodSandbox with ID starting with %s not found: %v", sbID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
sb := s.getSandbox(sandboxID)
|
|
|
|
if sb == nil {
|
|
|
|
return nil, fmt.Errorf("specified sandbox not found: %s", sandboxID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The config of the container
|
|
|
|
containerConfig := req.GetConfig()
|
|
|
|
if containerConfig == nil {
|
|
|
|
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig is nil")
|
|
|
|
}
|
|
|
|
|
2017-02-03 14:41:28 +00:00
|
|
|
name := containerConfig.GetMetadata().Name
|
2016-11-22 16:49:54 +00:00
|
|
|
if name == "" {
|
|
|
|
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Name is empty")
|
|
|
|
}
|
|
|
|
|
2017-02-03 14:41:28 +00:00
|
|
|
attempt := containerConfig.GetMetadata().Attempt
|
2016-11-22 16:49:54 +00:00
|
|
|
containerID, containerName, err := s.generateContainerIDandName(sb.name, name, attempt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
s.releaseContainerName(containerName)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2016-10-18 14:48:33 +00:00
|
|
|
container, err := s.createSandboxContainer(ctx, containerID, containerName, sb, req.GetSandboxConfig(), containerConfig)
|
2016-11-22 16:49:54 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-10-18 14:48:33 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
err2 := s.storage.DeleteContainer(containerID)
|
|
|
|
if err2 != nil {
|
|
|
|
logrus.Warnf("Failed to cleanup container directory: %v", err2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-11-22 16:49:54 +00:00
|
|
|
|
2017-03-06 23:08:46 +00:00
|
|
|
if err = s.runtime.CreateContainer(container, sb.cgroupParent); err != nil {
|
2016-11-22 16:49:54 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = s.runtime.UpdateStatus(container); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.addContainer(container)
|
|
|
|
|
|
|
|
if err = s.ctrIDIndex.Add(containerID); err != nil {
|
|
|
|
s.removeContainer(container)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resp := &pb.CreateContainerResponse{
|
2017-02-03 14:41:28 +00:00
|
|
|
ContainerId: containerID,
|
2016-11-22 16:49:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
logrus.Debugf("CreateContainerResponse: %+v", resp)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2016-10-18 14:48:33 +00:00
|
|
|
func (s *Server) createSandboxContainer(ctx context.Context, containerID string, containerName string, sb *sandbox, SandboxConfig *pb.PodSandboxConfig, containerConfig *pb.ContainerConfig) (*oci.Container, error) {
|
2016-11-22 16:49:54 +00:00
|
|
|
if sb == nil {
|
|
|
|
return nil, errors.New("createSandboxContainer needs a sandbox")
|
|
|
|
}
|
2016-10-18 14:48:33 +00:00
|
|
|
|
2017-02-03 14:41:28 +00:00
|
|
|
// TODO: simplify this function (cyclomatic complexity here is high)
|
2016-10-18 14:48:33 +00:00
|
|
|
// TODO: factor generating/updating the spec into something other projects can vendor
|
|
|
|
|
2016-11-22 16:49:54 +00:00
|
|
|
// creates a spec Generator with the default spec.
|
|
|
|
specgen := generate.New()
|
|
|
|
|
2017-02-22 18:42:44 +00:00
|
|
|
if err := addOciBindMounts(sb, containerConfig, &specgen); err != nil {
|
|
|
|
return nil, err
|
2016-11-22 16:49:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
labels := containerConfig.GetLabels()
|
|
|
|
|
|
|
|
metadata := containerConfig.GetMetadata()
|
|
|
|
|
|
|
|
annotations := containerConfig.GetAnnotations()
|
|
|
|
if annotations != nil {
|
|
|
|
for k, v := range annotations {
|
|
|
|
specgen.AddAnnotation(k, v)
|
|
|
|
}
|
|
|
|
}
|
2016-11-24 13:27:56 +00:00
|
|
|
|
|
|
|
// set this container's apparmor profile if it is set by sandbox
|
2016-11-29 12:34:15 +00:00
|
|
|
if s.appArmorEnabled {
|
2017-02-03 14:41:28 +00:00
|
|
|
appArmorProfileName := s.getAppArmorProfileName(sb.annotations, metadata.Name)
|
2016-11-29 12:34:15 +00:00
|
|
|
if appArmorProfileName != "" {
|
2016-12-12 07:55:17 +00:00
|
|
|
// reload default apparmor profile if it is unloaded.
|
|
|
|
if s.appArmorProfile == apparmor.DefaultApparmorProfile {
|
|
|
|
if err := apparmor.EnsureDefaultApparmorProfile(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-29 12:34:15 +00:00
|
|
|
specgen.SetProcessApparmorProfile(appArmorProfileName)
|
|
|
|
}
|
2016-11-24 13:27:56 +00:00
|
|
|
}
|
2017-02-03 14:41:28 +00:00
|
|
|
if containerConfig.GetLinux().GetSecurityContext() != nil {
|
|
|
|
if containerConfig.GetLinux().GetSecurityContext().Privileged {
|
|
|
|
specgen.SetupPrivileged(true)
|
|
|
|
}
|
2016-11-24 13:27:56 +00:00
|
|
|
|
2017-02-03 14:41:28 +00:00
|
|
|
if containerConfig.GetLinux().GetSecurityContext().ReadonlyRootfs {
|
|
|
|
specgen.SetRootReadonly(true)
|
|
|
|
}
|
2016-11-22 16:49:54 +00:00
|
|
|
}
|
|
|
|
|
2017-02-03 14:41:28 +00:00
|
|
|
logPath := containerConfig.LogPath
|
|
|
|
specgen.SetProcessTerminal(containerConfig.Tty)
|
2016-11-22 16:49:54 +00:00
|
|
|
|
|
|
|
linux := containerConfig.GetLinux()
|
|
|
|
if linux != nil {
|
|
|
|
resources := linux.GetResources()
|
|
|
|
if resources != nil {
|
2017-02-03 14:41:28 +00:00
|
|
|
cpuPeriod := resources.CpuPeriod
|
2016-11-22 16:49:54 +00:00
|
|
|
if cpuPeriod != 0 {
|
|
|
|
specgen.SetLinuxResourcesCPUPeriod(uint64(cpuPeriod))
|
|
|
|
}
|
|
|
|
|
2017-02-03 14:41:28 +00:00
|
|
|
cpuQuota := resources.CpuQuota
|
2016-11-22 16:49:54 +00:00
|
|
|
if cpuQuota != 0 {
|
|
|
|
specgen.SetLinuxResourcesCPUQuota(uint64(cpuQuota))
|
|
|
|
}
|
|
|
|
|
2017-02-03 14:41:28 +00:00
|
|
|
cpuShares := resources.CpuShares
|
2016-11-22 16:49:54 +00:00
|
|
|
if cpuShares != 0 {
|
|
|
|
specgen.SetLinuxResourcesCPUShares(uint64(cpuShares))
|
|
|
|
}
|
|
|
|
|
2017-02-03 14:41:28 +00:00
|
|
|
memoryLimit := resources.MemoryLimitInBytes
|
2016-11-22 16:49:54 +00:00
|
|
|
if memoryLimit != 0 {
|
|
|
|
specgen.SetLinuxResourcesMemoryLimit(uint64(memoryLimit))
|
|
|
|
}
|
|
|
|
|
2017-02-03 14:41:28 +00:00
|
|
|
oomScoreAdj := resources.OomScoreAdj
|
2016-11-22 16:49:54 +00:00
|
|
|
specgen.SetLinuxResourcesOOMScoreAdj(int(oomScoreAdj))
|
|
|
|
}
|
|
|
|
|
2016-12-13 08:34:55 +00:00
|
|
|
if sb.cgroupParent != "" {
|
2016-12-19 23:06:27 +00:00
|
|
|
if s.config.CgroupManager == "systemd" {
|
|
|
|
cgPath := sb.cgroupParent + ":" + "ocid" + ":" + containerID
|
|
|
|
specgen.SetLinuxCgroupsPath(cgPath)
|
|
|
|
} else {
|
|
|
|
specgen.SetLinuxCgroupsPath(sb.cgroupParent + "/" + containerID)
|
|
|
|
}
|
2016-12-13 08:34:55 +00:00
|
|
|
}
|
|
|
|
|
2016-11-22 16:49:54 +00:00
|
|
|
capabilities := linux.GetSecurityContext().GetCapabilities()
|
|
|
|
if capabilities != nil {
|
2017-02-03 14:41:28 +00:00
|
|
|
addCaps := capabilities.AddCapabilities
|
2016-11-22 16:49:54 +00:00
|
|
|
if addCaps != nil {
|
|
|
|
for _, cap := range addCaps {
|
|
|
|
if err := specgen.AddProcessCapability(cap); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-03 14:41:28 +00:00
|
|
|
dropCaps := capabilities.DropCapabilities
|
2016-11-22 16:49:54 +00:00
|
|
|
if dropCaps != nil {
|
|
|
|
for _, cap := range dropCaps {
|
|
|
|
if err := specgen.DropProcessCapability(cap); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
specgen.SetProcessSelinuxLabel(sb.processLabel)
|
|
|
|
specgen.SetLinuxMountLabel(sb.mountLabel)
|
|
|
|
|
2017-02-03 14:41:28 +00:00
|
|
|
if linux.GetSecurityContext() != nil {
|
|
|
|
user := linux.GetSecurityContext().GetRunAsUser()
|
|
|
|
specgen.SetProcessUID(uint32(user.Value))
|
|
|
|
specgen.SetProcessGID(uint32(user.Value))
|
|
|
|
groups := linux.GetSecurityContext().SupplementalGroups
|
|
|
|
for _, group := range groups {
|
|
|
|
specgen.AddProcessAdditionalGid(uint32(group))
|
|
|
|
}
|
2016-11-22 16:49:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Join the namespace paths for the pod sandbox container.
|
|
|
|
podInfraState := s.runtime.ContainerStatus(sb.infraContainer)
|
|
|
|
|
|
|
|
logrus.Debugf("pod container state %+v", podInfraState)
|
|
|
|
|
2016-11-23 17:16:21 +00:00
|
|
|
ipcNsPath := fmt.Sprintf("/proc/%d/ns/ipc", podInfraState.Pid)
|
|
|
|
if err := specgen.AddOrReplaceLinuxNamespace("ipc", ipcNsPath); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
netNsPath := sb.netNsPath()
|
|
|
|
if netNsPath == "" {
|
|
|
|
// The sandbox does not have a permanent namespace,
|
|
|
|
// it's on the host one.
|
|
|
|
netNsPath = fmt.Sprintf("/proc/%d/ns/net", podInfraState.Pid)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := specgen.AddOrReplaceLinuxNamespace("network", netNsPath); err != nil {
|
|
|
|
return nil, err
|
2016-11-22 16:49:54 +00:00
|
|
|
}
|
|
|
|
|
2016-12-12 10:12:03 +00:00
|
|
|
imageSpec := containerConfig.GetImage()
|
|
|
|
if imageSpec == nil {
|
|
|
|
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image is nil")
|
|
|
|
}
|
|
|
|
|
2017-02-03 14:41:28 +00:00
|
|
|
image := imageSpec.Image
|
2016-12-12 10:12:03 +00:00
|
|
|
if image == "" {
|
|
|
|
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image.Image is empty")
|
|
|
|
}
|
|
|
|
|
2016-12-08 23:32:17 +00:00
|
|
|
// bind mount the pod shm
|
|
|
|
specgen.AddBindMount(sb.shmPath, "/dev/shm", []string{"rw"})
|
|
|
|
|
2017-03-24 14:32:16 +00:00
|
|
|
if sb.resolvPath != "" {
|
|
|
|
// bind mount the pod resolver file
|
|
|
|
specgen.AddBindMount(sb.resolvPath, "/etc/resolv.conf", []string{"ro"})
|
|
|
|
}
|
|
|
|
|
2016-11-22 16:49:54 +00:00
|
|
|
specgen.AddAnnotation("ocid/name", containerName)
|
|
|
|
specgen.AddAnnotation("ocid/sandbox_id", sb.id)
|
2016-11-12 03:15:59 +00:00
|
|
|
specgen.AddAnnotation("ocid/sandbox_name", sb.infraContainer.Name())
|
|
|
|
specgen.AddAnnotation("ocid/container_type", containerTypeContainer)
|
2016-11-22 16:49:54 +00:00
|
|
|
specgen.AddAnnotation("ocid/log_path", logPath)
|
2017-02-03 14:41:28 +00:00
|
|
|
specgen.AddAnnotation("ocid/tty", fmt.Sprintf("%v", containerConfig.Tty))
|
2016-12-12 10:12:03 +00:00
|
|
|
specgen.AddAnnotation("ocid/image", image)
|
2016-11-22 16:49:54 +00:00
|
|
|
|
|
|
|
metadataJSON, err := json.Marshal(metadata)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
specgen.AddAnnotation("ocid/metadata", string(metadataJSON))
|
|
|
|
|
|
|
|
labelsJSON, err := json.Marshal(labels)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
specgen.AddAnnotation("ocid/labels", string(labelsJSON))
|
|
|
|
|
2016-12-12 17:55:34 +00:00
|
|
|
annotationsJSON, err := json.Marshal(annotations)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
specgen.AddAnnotation("ocid/annotations", string(annotationsJSON))
|
|
|
|
|
2016-11-23 09:41:48 +00:00
|
|
|
if err = s.setupSeccomp(&specgen, containerName, sb.annotations); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-02-03 14:41:28 +00:00
|
|
|
metaname := metadata.Name
|
|
|
|
attempt := metadata.Attempt
|
2016-10-18 14:48:33 +00:00
|
|
|
containerInfo, err := s.storage.CreateContainer(s.imageContext,
|
|
|
|
sb.name, sb.id,
|
2017-01-31 15:32:27 +00:00
|
|
|
image, image,
|
2016-10-18 14:48:33 +00:00
|
|
|
containerName, containerID,
|
|
|
|
metaname,
|
|
|
|
attempt,
|
|
|
|
sb.mountLabel,
|
|
|
|
nil)
|
|
|
|
if err != nil {
|
2016-11-22 16:49:54 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-10-18 14:48:33 +00:00
|
|
|
mountPoint, err := s.storage.StartContainer(containerID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to mount container %s(%s): %v", containerName, containerID, err)
|
|
|
|
}
|
|
|
|
|
2017-03-16 09:59:41 +00:00
|
|
|
processArgs, err := buildOCIProcessArgs(containerConfig, containerInfo.Config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-10-18 14:48:33 +00:00
|
|
|
}
|
|
|
|
specgen.SetProcessArgs(processArgs)
|
|
|
|
|
2017-03-27 22:53:47 +00:00
|
|
|
// Add environment variables from CRI and image config
|
|
|
|
envs := containerConfig.GetEnvs()
|
|
|
|
if envs != nil {
|
|
|
|
for _, item := range envs {
|
|
|
|
key := item.Key
|
|
|
|
value := item.Value
|
|
|
|
if key == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
specgen.AddProcessEnv(key, value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, item := range containerInfo.Config.Config.Env {
|
|
|
|
parts := strings.SplitN(item, "=", 2)
|
|
|
|
if len(parts) != 2 {
|
|
|
|
return nil, fmt.Errorf("invalid env from image: %s", item)
|
|
|
|
}
|
|
|
|
|
|
|
|
if parts[0] == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
specgen.AddProcessEnv(parts[0], parts[1])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set working directory
|
|
|
|
// Pick it up from image config first and override if specified in CRI
|
|
|
|
imageCwd := containerInfo.Config.Config.WorkingDir
|
|
|
|
specgen.SetProcessCwd(imageCwd)
|
|
|
|
|
|
|
|
cwd := containerConfig.WorkingDir
|
|
|
|
if cwd != "" {
|
|
|
|
specgen.SetProcessCwd(cwd)
|
|
|
|
}
|
|
|
|
|
2016-10-18 14:48:33 +00:00
|
|
|
// by default, the root path is an empty string. set it now.
|
|
|
|
specgen.SetRootPath(mountPoint)
|
|
|
|
|
|
|
|
saveOptions := generate.ExportOptions{}
|
|
|
|
if err = specgen.SaveToFile(filepath.Join(containerInfo.Dir, "config.json"), saveOptions); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err = specgen.SaveToFile(filepath.Join(containerInfo.RunDir, "config.json"), saveOptions); err != nil {
|
2016-11-22 16:49:54 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-02-21 17:19:06 +00:00
|
|
|
container, err := oci.NewContainer(containerID, containerName, containerInfo.RunDir, logPath, sb.netNs(), labels, annotations, imageSpec, metadata, sb.id, containerConfig.Tty, sb.privileged)
|
2016-11-22 16:49:54 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return container, nil
|
|
|
|
}
|
|
|
|
|
2016-11-23 09:41:48 +00:00
|
|
|
func (s *Server) setupSeccomp(specgen *generate.Generator, cname string, sbAnnotations map[string]string) error {
|
|
|
|
profile, ok := sbAnnotations["security.alpha.kubernetes.io/seccomp/container/"+cname]
|
|
|
|
if !ok {
|
|
|
|
profile, ok = sbAnnotations["security.alpha.kubernetes.io/seccomp/pod"]
|
|
|
|
if !ok {
|
|
|
|
// running w/o seccomp, aka unconfined
|
|
|
|
profile = seccompUnconfined
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !s.seccompEnabled {
|
|
|
|
if profile != seccompUnconfined {
|
|
|
|
return fmt.Errorf("seccomp is not enabled in your kernel, cannot run with a profile")
|
|
|
|
}
|
|
|
|
logrus.Warn("seccomp is not enabled in your kernel, running container without profile")
|
|
|
|
}
|
|
|
|
if profile == seccompUnconfined {
|
|
|
|
// running w/o seccomp, aka unconfined
|
|
|
|
specgen.Spec().Linux.Seccomp = nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if profile == seccompRuntimeDefault {
|
|
|
|
return seccomp.LoadProfileFromStruct(s.seccompProfile, specgen)
|
|
|
|
}
|
|
|
|
if !strings.HasPrefix(profile, seccompLocalhostPrefix) {
|
|
|
|
return fmt.Errorf("unknown seccomp profile option: %q", profile)
|
|
|
|
}
|
|
|
|
//file, err := ioutil.ReadFile(filepath.Join(s.seccompProfileRoot, strings.TrimPrefix(profile, seccompLocalhostPrefix)))
|
|
|
|
//if err != nil {
|
|
|
|
//return err
|
|
|
|
//}
|
|
|
|
// TODO(runcom): setup from provided node's seccomp profile
|
|
|
|
// can't do this yet, see https://issues.k8s.io/36997
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-11-22 16:49:54 +00:00
|
|
|
func (s *Server) generateContainerIDandName(podName string, name string, attempt uint32) (string, string, error) {
|
|
|
|
var (
|
|
|
|
err error
|
|
|
|
id = stringid.GenerateNonCryptoID()
|
|
|
|
)
|
|
|
|
nameStr := fmt.Sprintf("%s-%s-%v", podName, name, attempt)
|
|
|
|
if name == "infra" {
|
|
|
|
nameStr = fmt.Sprintf("%s-%s", podName, name)
|
|
|
|
}
|
|
|
|
if name, err = s.reserveContainerName(id, nameStr); err != nil {
|
|
|
|
return "", "", err
|
|
|
|
}
|
|
|
|
return id, name, err
|
|
|
|
}
|
2016-11-30 08:19:36 +00:00
|
|
|
|
|
|
|
// getAppArmorProfileName gets the profile name for the given container.
|
|
|
|
func (s *Server) getAppArmorProfileName(annotations map[string]string, ctrName string) string {
|
|
|
|
profile := apparmor.GetProfileNameFromPodAnnotations(annotations, ctrName)
|
|
|
|
|
|
|
|
if profile == "" {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
if profile == apparmor.ProfileRuntimeDefault {
|
|
|
|
// If the value is runtime/default, then return default profile.
|
|
|
|
return s.appArmorProfile
|
|
|
|
}
|
|
|
|
|
2016-12-02 07:13:41 +00:00
|
|
|
return strings.TrimPrefix(profile, apparmor.ProfileNamePrefix)
|
2016-11-30 08:19:36 +00:00
|
|
|
}
|