[refator] move core implement code from server to manager
Signed-off-by: Crazykev <crazykev@zju.edu.cn>
This commit is contained in:
parent
322f7310e9
commit
24b8336a14
66 changed files with 683 additions and 3806 deletions
|
@ -5,7 +5,7 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
"github.com/kubernetes-incubator/cri-o/server"
|
"github.com/kubernetes-incubator/cri-o/manager"
|
||||||
"github.com/opencontainers/runc/libcontainer/selinux"
|
"github.com/opencontainers/runc/libcontainer/selinux"
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
)
|
)
|
||||||
|
@ -82,18 +82,18 @@ pause = "{{ .Pause }}"
|
||||||
// template. Add it once the storage code has been merged.
|
// template. Add it once the storage code has been merged.
|
||||||
|
|
||||||
// DefaultConfig returns the default configuration for ocid.
|
// DefaultConfig returns the default configuration for ocid.
|
||||||
func DefaultConfig() *server.Config {
|
func DefaultConfig() *manager.Config {
|
||||||
return &server.Config{
|
return &manager.Config{
|
||||||
RootConfig: server.RootConfig{
|
RootConfig: manager.RootConfig{
|
||||||
Root: ocidRoot,
|
Root: ocidRoot,
|
||||||
SandboxDir: filepath.Join(ocidRoot, "sandboxes"),
|
SandboxDir: filepath.Join(ocidRoot, "sandboxes"),
|
||||||
ContainerDir: filepath.Join(ocidRoot, "containers"),
|
ContainerDir: filepath.Join(ocidRoot, "containers"),
|
||||||
LogDir: "/var/log/ocid/pods",
|
LogDir: "/var/log/ocid/pods",
|
||||||
},
|
},
|
||||||
APIConfig: server.APIConfig{
|
APIConfig: manager.APIConfig{
|
||||||
Listen: "/var/run/ocid.sock",
|
Listen: "/var/run/ocid.sock",
|
||||||
},
|
},
|
||||||
RuntimeConfig: server.RuntimeConfig{
|
RuntimeConfig: manager.RuntimeConfig{
|
||||||
Runtime: "/usr/bin/runc",
|
Runtime: "/usr/bin/runc",
|
||||||
Conmon: conmonPath,
|
Conmon: conmonPath,
|
||||||
ConmonEnv: []string{
|
ConmonEnv: []string{
|
||||||
|
@ -103,7 +103,7 @@ func DefaultConfig() *server.Config {
|
||||||
SeccompProfile: seccompProfilePath,
|
SeccompProfile: seccompProfilePath,
|
||||||
ApparmorProfile: apparmorProfileName,
|
ApparmorProfile: apparmorProfileName,
|
||||||
},
|
},
|
||||||
ImageConfig: server.ImageConfig{
|
ImageConfig: manager.ImageConfig{
|
||||||
Pause: pausePath,
|
Pause: pausePath,
|
||||||
ImageDir: filepath.Join(ocidRoot, "store"),
|
ImageDir: filepath.Join(ocidRoot, "store"),
|
||||||
},
|
},
|
||||||
|
@ -122,7 +122,7 @@ var configCommand = cli.Command{
|
||||||
Action: func(c *cli.Context) error {
|
Action: func(c *cli.Context) error {
|
||||||
// At this point, app.Before has already parsed the user's chosen
|
// At this point, app.Before has already parsed the user's chosen
|
||||||
// config file. So no need to handle that here.
|
// config file. So no need to handle that here.
|
||||||
config := c.App.Metadata["config"].(*server.Config)
|
config := c.App.Metadata["config"].(*manager.Config)
|
||||||
if c.Bool("default") {
|
if c.Bool("default") {
|
||||||
config = DefaultConfig()
|
config = DefaultConfig()
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/manager"
|
||||||
"github.com/kubernetes-incubator/cri-o/server"
|
"github.com/kubernetes-incubator/cri-o/server"
|
||||||
"github.com/opencontainers/runc/libcontainer/selinux"
|
"github.com/opencontainers/runc/libcontainer/selinux"
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
|
@ -16,7 +17,7 @@ import (
|
||||||
|
|
||||||
const ociConfigPath = "/etc/ocid/ocid.conf"
|
const ociConfigPath = "/etc/ocid/ocid.conf"
|
||||||
|
|
||||||
func mergeConfig(config *server.Config, ctx *cli.Context) error {
|
func mergeConfig(config *manager.Config, ctx *cli.Context) error {
|
||||||
// Don't parse the config if the user explicitly set it to "".
|
// Don't parse the config if the user explicitly set it to "".
|
||||||
if path := ctx.GlobalString("config"); path != "" {
|
if path := ctx.GlobalString("config"); path != "" {
|
||||||
if err := config.FromFile(path); err != nil {
|
if err := config.FromFile(path); err != nil {
|
||||||
|
@ -158,7 +159,7 @@ func main() {
|
||||||
|
|
||||||
app.Before = func(c *cli.Context) error {
|
app.Before = func(c *cli.Context) error {
|
||||||
// Load the configuration file.
|
// Load the configuration file.
|
||||||
config := c.App.Metadata["config"].(*server.Config)
|
config := c.App.Metadata["config"].(*manager.Config)
|
||||||
if err := mergeConfig(config, c); err != nil {
|
if err := mergeConfig(config, c); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -195,7 +196,7 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
app.Action = func(c *cli.Context) error {
|
app.Action = func(c *cli.Context) error {
|
||||||
config := c.App.Metadata["config"].(*server.Config)
|
config := c.App.Metadata["config"].(*manager.Config)
|
||||||
|
|
||||||
if !config.SELinux {
|
if !config.SELinux {
|
||||||
selinux.SetDisabled()
|
selinux.SetDisabled()
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -13,22 +13,17 @@ const (
|
||||||
containerTypeContainer = "container"
|
containerTypeContainer = "container"
|
||||||
)
|
)
|
||||||
|
|
||||||
type containerRequest interface {
|
func (m *Manager) getContainerWithPartialID(ctrID string) (*oci.Container, error) {
|
||||||
GetContainerId() string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) getContainerFromRequest(req containerRequest) (*oci.Container, error) {
|
|
||||||
ctrID := req.GetContainerId()
|
|
||||||
if ctrID == "" {
|
if ctrID == "" {
|
||||||
return nil, fmt.Errorf("container ID should not be empty")
|
return nil, fmt.Errorf("container ID should not be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
containerID, err := s.ctrIDIndex.Get(ctrID)
|
containerID, err := m.ctrIDIndex.Get(ctrID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("container with ID starting with %s not found: %v", ctrID, err)
|
return nil, fmt.Errorf("container with ID starting with %s not found: %v", ctrID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := s.state.containers.Get(containerID)
|
c := m.state.containers.Get(containerID)
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return nil, fmt.Errorf("specified container not found: %s", containerID)
|
return nil, fmt.Errorf("specified container not found: %s", containerID)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Attach prepares a streaming endpoint to attach to a running container.
|
|
||||||
func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (*pb.AttachResponse, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
|
@ -1,4 +1,4 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
@ -11,13 +11,12 @@ import (
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/docker/docker/pkg/stringid"
|
"github.com/docker/docker/pkg/stringid"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/manager/apparmor"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/manager/seccomp"
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"github.com/kubernetes-incubator/cri-o/server/apparmor"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/server/seccomp"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/utils"
|
"github.com/kubernetes-incubator/cri-o/utils"
|
||||||
"github.com/opencontainers/runc/libcontainer/label"
|
"github.com/opencontainers/runc/libcontainer/label"
|
||||||
"github.com/opencontainers/runtime-tools/generate"
|
"github.com/opencontainers/runtime-tools/generate"
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -28,45 +27,42 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// CreateContainer creates a new container in specified PodSandbox
|
// CreateContainer creates a new container in specified PodSandbox
|
||||||
func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) {
|
func (m *Manager) CreateContainer(sbID string, containerConfig *pb.ContainerConfig, sandboxConfig *pb.PodSandboxConfig) (string, error) {
|
||||||
logrus.Debugf("CreateContainerRequest %+v", req)
|
|
||||||
sbID := req.GetPodSandboxId()
|
|
||||||
if sbID == "" {
|
if sbID == "" {
|
||||||
return nil, fmt.Errorf("PodSandboxId should not be empty")
|
return "", fmt.Errorf("PodSandboxId should not be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
sandboxID, err := s.podIDIndex.Get(sbID)
|
sandboxID, err := m.podIDIndex.Get(sbID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("PodSandbox with ID starting with %s not found: %v", sbID, err)
|
return "", fmt.Errorf("PodSandbox with ID starting with %s not found: %v", sbID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
sb := s.getSandbox(sandboxID)
|
sb := m.getSandbox(sandboxID)
|
||||||
if sb == nil {
|
if sb == nil {
|
||||||
return nil, fmt.Errorf("specified sandbox not found: %s", sandboxID)
|
return "", fmt.Errorf("specified sandbox not found: %s", sandboxID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The config of the container
|
// The config of the container
|
||||||
containerConfig := req.GetConfig()
|
|
||||||
if containerConfig == nil {
|
if containerConfig == nil {
|
||||||
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig is nil")
|
return "", fmt.Errorf("CreateContainerRequest.ContainerConfig is nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
name := containerConfig.GetMetadata().GetName()
|
name := containerConfig.GetMetadata().GetName()
|
||||||
if name == "" {
|
if name == "" {
|
||||||
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Name is empty")
|
return "", fmt.Errorf("CreateContainerRequest.ContainerConfig.Name is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
attempt := containerConfig.GetMetadata().GetAttempt()
|
attempt := containerConfig.GetMetadata().GetAttempt()
|
||||||
containerID, containerName, err := s.generateContainerIDandName(sb.name, name, attempt)
|
containerID, containerName, err := m.generateContainerIDandName(sb.name, name, attempt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// containerDir is the dir for the container bundle.
|
// containerDir is the dir for the container bundle.
|
||||||
containerDir := filepath.Join(s.runtime.ContainerDir(), containerID)
|
containerDir := filepath.Join(m.runtime.ContainerDir(), containerID)
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.releaseContainerName(containerName)
|
m.releaseContainerName(containerName)
|
||||||
err1 := os.RemoveAll(containerDir)
|
err1 := os.RemoveAll(containerDir)
|
||||||
if err1 != nil {
|
if err1 != nil {
|
||||||
logrus.Warnf("Failed to cleanup container directory: %v", err1)
|
logrus.Warnf("Failed to cleanup container directory: %v", err1)
|
||||||
|
@ -75,42 +71,37 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if _, err = os.Stat(containerDir); err == nil {
|
if _, err = os.Stat(containerDir); err == nil {
|
||||||
return nil, fmt.Errorf("container (%s) already exists", containerDir)
|
return "", fmt.Errorf("container (%s) already exists", containerDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = os.MkdirAll(containerDir, 0755); err != nil {
|
if err = os.MkdirAll(containerDir, 0755); err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
container, err := s.createSandboxContainer(containerID, containerName, sb, containerDir, containerConfig)
|
container, err := m.createSandboxContainer(containerID, containerName, sb, containerDir, containerConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = s.runtime.CreateContainer(container); err != nil {
|
if err = m.runtime.CreateContainer(container); err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = s.runtime.UpdateStatus(container); err != nil {
|
if err = m.runtime.UpdateStatus(container); err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
s.addContainer(container)
|
m.addContainer(container)
|
||||||
|
|
||||||
if err = s.ctrIDIndex.Add(containerID); err != nil {
|
if err = m.ctrIDIndex.Add(containerID); err != nil {
|
||||||
s.removeContainer(container)
|
m.removeContainer(container)
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := &pb.CreateContainerResponse{
|
return containerID, nil
|
||||||
ContainerId: &containerID,
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Debugf("CreateContainerResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) createSandboxContainer(containerID string, containerName string, sb *sandbox, containerDir string, containerConfig *pb.ContainerConfig) (*oci.Container, error) {
|
func (m *Manager) createSandboxContainer(containerID string, containerName string, sb *sandbox, containerDir string, containerConfig *pb.ContainerConfig) (*oci.Container, error) {
|
||||||
if sb == nil {
|
if sb == nil {
|
||||||
return nil, errors.New("createSandboxContainer needs a sandbox")
|
return nil, errors.New("createSandboxContainer needs a sandbox")
|
||||||
}
|
}
|
||||||
|
@ -121,11 +112,21 @@ func (s *Server) createSandboxContainer(containerID string, containerName string
|
||||||
// here set it to be "rootfs".
|
// here set it to be "rootfs".
|
||||||
specgen.SetRootPath("rootfs")
|
specgen.SetRootPath("rootfs")
|
||||||
|
|
||||||
|
processArgs := []string{}
|
||||||
|
commands := containerConfig.GetCommand()
|
||||||
args := containerConfig.GetArgs()
|
args := containerConfig.GetArgs()
|
||||||
if args == nil {
|
if commands == nil && args == nil {
|
||||||
args = []string{"/bin/sh"}
|
// TODO: override with image's config in #189
|
||||||
|
processArgs = []string{"/bin/sh"}
|
||||||
}
|
}
|
||||||
specgen.SetProcessArgs(args)
|
if commands != nil {
|
||||||
|
processArgs = append(processArgs, commands...)
|
||||||
|
}
|
||||||
|
if args != nil {
|
||||||
|
processArgs = append(processArgs, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
specgen.SetProcessArgs(processArgs)
|
||||||
|
|
||||||
cwd := containerConfig.GetWorkingDir()
|
cwd := containerConfig.GetWorkingDir()
|
||||||
if cwd == "" {
|
if cwd == "" {
|
||||||
|
@ -185,11 +186,11 @@ func (s *Server) createSandboxContainer(containerID string, containerName string
|
||||||
}
|
}
|
||||||
|
|
||||||
// set this container's apparmor profile if it is set by sandbox
|
// set this container's apparmor profile if it is set by sandbox
|
||||||
if s.appArmorEnabled {
|
if m.appArmorEnabled {
|
||||||
appArmorProfileName := s.getAppArmorProfileName(sb.annotations, metadata.GetName())
|
appArmorProfileName := m.getAppArmorProfileName(sb.annotations, metadata.GetName())
|
||||||
if appArmorProfileName != "" {
|
if appArmorProfileName != "" {
|
||||||
// reload default apparmor profile if it is unloaded.
|
// reload default apparmor profile if it is unloaded.
|
||||||
if s.appArmorProfile == apparmor.DefaultApparmorProfile {
|
if m.appArmorProfile == apparmor.DefaultApparmorProfile {
|
||||||
if err := apparmor.EnsureDefaultApparmorProfile(); err != nil {
|
if err := apparmor.EnsureDefaultApparmorProfile(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -276,7 +277,7 @@ func (s *Server) createSandboxContainer(containerID string, containerName string
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Join the namespace paths for the pod sandbox container.
|
// Join the namespace paths for the pod sandbox container.
|
||||||
podInfraState := s.runtime.ContainerStatus(sb.infraContainer)
|
podInfraState := m.runtime.ContainerStatus(sb.infraContainer)
|
||||||
|
|
||||||
logrus.Debugf("pod container state %+v", podInfraState)
|
logrus.Debugf("pod container state %+v", podInfraState)
|
||||||
|
|
||||||
|
@ -335,7 +336,7 @@ func (s *Server) createSandboxContainer(containerID string, containerName string
|
||||||
}
|
}
|
||||||
specgen.AddAnnotation("ocid/annotations", string(annotationsJSON))
|
specgen.AddAnnotation("ocid/annotations", string(annotationsJSON))
|
||||||
|
|
||||||
if err = s.setupSeccomp(&specgen, containerName, sb.annotations); err != nil {
|
if err = m.setupSeccomp(&specgen, containerName, sb.annotations); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -357,7 +358,7 @@ func (s *Server) createSandboxContainer(containerID string, containerName string
|
||||||
return container, nil
|
return container, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) setupSeccomp(specgen *generate.Generator, cname string, sbAnnotations map[string]string) error {
|
func (m *Manager) setupSeccomp(specgen *generate.Generator, cname string, sbAnnotations map[string]string) error {
|
||||||
profile, ok := sbAnnotations["security.alpha.kubernetes.io/seccomp/container/"+cname]
|
profile, ok := sbAnnotations["security.alpha.kubernetes.io/seccomp/container/"+cname]
|
||||||
if !ok {
|
if !ok {
|
||||||
profile, ok = sbAnnotations["security.alpha.kubernetes.io/seccomp/pod"]
|
profile, ok = sbAnnotations["security.alpha.kubernetes.io/seccomp/pod"]
|
||||||
|
@ -366,7 +367,7 @@ func (s *Server) setupSeccomp(specgen *generate.Generator, cname string, sbAnnot
|
||||||
profile = seccompUnconfined
|
profile = seccompUnconfined
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !s.seccompEnabled {
|
if !m.seccompEnabled {
|
||||||
if profile != seccompUnconfined {
|
if profile != seccompUnconfined {
|
||||||
return fmt.Errorf("seccomp is not enabled in your kernel, cannot run with a profile")
|
return fmt.Errorf("seccomp is not enabled in your kernel, cannot run with a profile")
|
||||||
}
|
}
|
||||||
|
@ -378,7 +379,7 @@ func (s *Server) setupSeccomp(specgen *generate.Generator, cname string, sbAnnot
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if profile == seccompRuntimeDefault {
|
if profile == seccompRuntimeDefault {
|
||||||
return seccomp.LoadProfileFromStruct(s.seccompProfile, specgen)
|
return seccomp.LoadProfileFromStruct(m.seccompProfile, specgen)
|
||||||
}
|
}
|
||||||
if !strings.HasPrefix(profile, seccompLocalhostPrefix) {
|
if !strings.HasPrefix(profile, seccompLocalhostPrefix) {
|
||||||
return fmt.Errorf("unknown seccomp profile option: %q", profile)
|
return fmt.Errorf("unknown seccomp profile option: %q", profile)
|
||||||
|
@ -392,7 +393,7 @@ func (s *Server) setupSeccomp(specgen *generate.Generator, cname string, sbAnnot
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) generateContainerIDandName(podName string, name string, attempt uint32) (string, string, error) {
|
func (m *Manager) generateContainerIDandName(podName string, name string, attempt uint32) (string, string, error) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
id = stringid.GenerateNonCryptoID()
|
id = stringid.GenerateNonCryptoID()
|
||||||
|
@ -401,14 +402,14 @@ func (s *Server) generateContainerIDandName(podName string, name string, attempt
|
||||||
if name == "infra" {
|
if name == "infra" {
|
||||||
nameStr = fmt.Sprintf("%s-%s", podName, name)
|
nameStr = fmt.Sprintf("%s-%s", podName, name)
|
||||||
}
|
}
|
||||||
if name, err = s.reserveContainerName(id, nameStr); err != nil {
|
if name, err = m.reserveContainerName(id, nameStr); err != nil {
|
||||||
return "", "", err
|
return "", "", err
|
||||||
}
|
}
|
||||||
return id, name, err
|
return id, name, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAppArmorProfileName gets the profile name for the given container.
|
// getAppArmorProfileName gets the profile name for the given container.
|
||||||
func (s *Server) getAppArmorProfileName(annotations map[string]string, ctrName string) string {
|
func (m *Manager) getAppArmorProfileName(annotations map[string]string, ctrName string) string {
|
||||||
profile := apparmor.GetProfileNameFromPodAnnotations(annotations, ctrName)
|
profile := apparmor.GetProfileNameFromPodAnnotations(annotations, ctrName)
|
||||||
|
|
||||||
if profile == "" {
|
if profile == "" {
|
||||||
|
@ -417,7 +418,7 @@ func (s *Server) getAppArmorProfileName(annotations map[string]string, ctrName s
|
||||||
|
|
||||||
if profile == apparmor.ProfileRuntimeDefault {
|
if profile == apparmor.ProfileRuntimeDefault {
|
||||||
// If the value is runtime/default, then return default profile.
|
// If the value is runtime/default, then return default profile.
|
||||||
return s.appArmorProfile
|
return m.appArmorProfile
|
||||||
}
|
}
|
||||||
|
|
||||||
return strings.TrimPrefix(profile, apparmor.ProfileNamePrefix)
|
return strings.TrimPrefix(profile, apparmor.ProfileNamePrefix)
|
||||||
|
|
|
@ -1,11 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Exec prepares a streaming endpoint to execute a command in the container.
|
|
||||||
func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecResponse, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
|
@ -1,46 +1,35 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ExecSync runs a command in a container synchronously.
|
// ExecSync runs a command in a container synchronously.
|
||||||
func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (*pb.ExecSyncResponse, error) {
|
func (m *Manager) ExecSync(ctrID string, cmd []string, timeout int64) (*oci.ExecSyncResponse, error) {
|
||||||
logrus.Debugf("ExecSyncRequest %+v", req)
|
c, err := m.getContainerWithPartialID(ctrID)
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = s.runtime.UpdateStatus(c); err != nil {
|
if err = m.runtime.UpdateStatus(c); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(c)
|
cState := m.runtime.ContainerStatus(c)
|
||||||
if !(cState.Status == oci.ContainerStateRunning || cState.Status == oci.ContainerStateCreated) {
|
if !(cState.Status == oci.ContainerStateRunning || cState.Status == oci.ContainerStateCreated) {
|
||||||
return nil, fmt.Errorf("container is not created or running")
|
return nil, fmt.Errorf("container is not created or running")
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := req.GetCmd()
|
|
||||||
if cmd == nil {
|
if cmd == nil {
|
||||||
return nil, fmt.Errorf("exec command cannot be empty")
|
return nil, fmt.Errorf("exec command cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
execResp, err := s.runtime.ExecSync(c, cmd, req.GetTimeout())
|
execResp, err := m.runtime.ExecSync(c, cmd, timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
resp := &pb.ExecSyncResponse{
|
|
||||||
Stdout: execResp.Stdout,
|
|
||||||
Stderr: execResp.Stderr,
|
|
||||||
ExitCode: &execResp.ExitCode,
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Debugf("ExecSyncResponse: %+v", resp)
|
return execResp, nil
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,7 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
@ -27,20 +25,18 @@ func filterContainer(c *pb.Container, filter *pb.ContainerFilter) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListContainers lists all containers by filters.
|
// ListContainers lists all containers by filters.
|
||||||
func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) {
|
func (m *Manager) ListContainers(filter *pb.ContainerFilter) ([]*pb.Container, error) {
|
||||||
logrus.Debugf("ListContainersRequest %+v", req)
|
|
||||||
var ctrs []*pb.Container
|
var ctrs []*pb.Container
|
||||||
filter := req.Filter
|
ctrList := m.state.containers.List()
|
||||||
ctrList := s.state.containers.List()
|
|
||||||
|
|
||||||
// Filter using container id and pod id first.
|
// Filter using container id and pod id first.
|
||||||
if filter != nil {
|
if filter != nil {
|
||||||
if filter.Id != nil {
|
if filter.Id != nil {
|
||||||
id, err := s.ctrIDIndex.Get(*filter.Id)
|
id, err := m.ctrIDIndex.Get(*filter.Id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
c := s.state.containers.Get(id)
|
c := m.state.containers.Get(id)
|
||||||
if c != nil {
|
if c != nil {
|
||||||
if filter.PodSandboxId != nil {
|
if filter.PodSandboxId != nil {
|
||||||
if c.Sandbox() == *filter.PodSandboxId {
|
if c.Sandbox() == *filter.PodSandboxId {
|
||||||
|
@ -55,7 +51,7 @@ func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersReque
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if filter.PodSandboxId != nil {
|
if filter.PodSandboxId != nil {
|
||||||
pod := s.state.sandboxes[*filter.PodSandboxId]
|
pod := m.state.sandboxes[*filter.PodSandboxId]
|
||||||
if pod == nil {
|
if pod == nil {
|
||||||
ctrList = []*oci.Container{}
|
ctrList = []*oci.Container{}
|
||||||
} else {
|
} else {
|
||||||
|
@ -66,12 +62,12 @@ func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersReque
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ctr := range ctrList {
|
for _, ctr := range ctrList {
|
||||||
if err := s.runtime.UpdateStatus(ctr); err != nil {
|
if err := m.runtime.UpdateStatus(ctr); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
podSandboxID := ctr.Sandbox()
|
podSandboxID := ctr.Sandbox()
|
||||||
cState := s.runtime.ContainerStatus(ctr)
|
cState := m.runtime.ContainerStatus(ctr)
|
||||||
created := cState.Created.UnixNano()
|
created := cState.Created.UnixNano()
|
||||||
rState := pb.ContainerState_CONTAINER_UNKNOWN
|
rState := pb.ContainerState_CONTAINER_UNKNOWN
|
||||||
cID := ctr.ID()
|
cID := ctr.ID()
|
||||||
|
@ -97,14 +93,10 @@ func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersReque
|
||||||
c.State = &rState
|
c.State = &rState
|
||||||
|
|
||||||
// Filter by other criteria such as state and labels.
|
// Filter by other criteria such as state and labels.
|
||||||
if filterContainer(c, req.Filter) {
|
if filterContainer(c, filter) {
|
||||||
ctrs = append(ctrs, c)
|
ctrs = append(ctrs, c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := &pb.ListContainersResponse{
|
return ctrs, nil
|
||||||
Containers: ctrs,
|
|
||||||
}
|
|
||||||
logrus.Debugf("ListContainersResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox.
|
|
||||||
func (s *Server) PortForward(ctx context.Context, req *pb.PortForwardRequest) (*pb.PortForwardResponse, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
|
@ -1,53 +1,47 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// RemoveContainer removes the container. If the container is running, the container
|
// RemoveContainer removes the container. If the container is running, the container
|
||||||
// should be force removed.
|
// should be force removed.
|
||||||
func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) {
|
func (m *Manager) RemoveContainer(ctrID string) error {
|
||||||
logrus.Debugf("RemoveContainerRequest %+v", req)
|
c, err := m.getContainerWithPartialID(ctrID)
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
if err := m.runtime.UpdateStatus(c); err != nil {
|
||||||
return nil, fmt.Errorf("failed to update container state: %v", err)
|
return fmt.Errorf("failed to update container state: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(c)
|
cState := m.runtime.ContainerStatus(c)
|
||||||
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
|
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
|
||||||
if err := s.runtime.StopContainer(c); err != nil {
|
if err := m.runtime.StopContainer(c); err != nil {
|
||||||
return nil, fmt.Errorf("failed to stop container %s: %v", c.ID(), err)
|
return fmt.Errorf("failed to stop container %s: %v", c.ID(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.runtime.DeleteContainer(c); err != nil {
|
if err := m.runtime.DeleteContainer(c); err != nil {
|
||||||
return nil, fmt.Errorf("failed to delete container %s: %v", c.ID(), err)
|
return fmt.Errorf("failed to delete container %s: %v", c.ID(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID())
|
containerDir := filepath.Join(m.runtime.ContainerDir(), c.ID())
|
||||||
if err := os.RemoveAll(containerDir); err != nil {
|
if err := os.RemoveAll(containerDir); err != nil {
|
||||||
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.ID(), err)
|
return fmt.Errorf("failed to remove container %s directory: %v", c.ID(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.releaseContainerName(c.Name())
|
m.releaseContainerName(c.Name())
|
||||||
s.removeContainer(c)
|
m.removeContainer(c)
|
||||||
|
|
||||||
if err := s.ctrIDIndex.Delete(c.ID()); err != nil {
|
if err := m.ctrIDIndex.Delete(c.ID()); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := &pb.RemoveContainerResponse{}
|
return nil
|
||||||
logrus.Debugf("RemoveContainerResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,26 +1,17 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import "fmt"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StartContainer starts the container.
|
// StartContainer starts the container.
|
||||||
func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) {
|
func (m *Manager) StartContainer(cID string) error {
|
||||||
logrus.Debugf("StartContainerRequest %+v", req)
|
c, err := m.getContainerWithPartialID(cID)
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.runtime.StartContainer(c); err != nil {
|
if err := m.runtime.StartContainer(c); err != nil {
|
||||||
return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err)
|
return fmt.Errorf("failed to start container %s: %v", c.ID(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := &pb.StartContainerResponse{}
|
return nil
|
||||||
logrus.Debugf("StartContainerResponse %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,59 +1,53 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ContainerStatus returns status of the container.
|
// ContainerStatus returns status of the container.
|
||||||
func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) {
|
func (m *Manager) ContainerStatus(ctrID string) (*pb.ContainerStatus, error) {
|
||||||
logrus.Debugf("ContainerStatusRequest %+v", req)
|
c, err := m.getContainerWithPartialID(ctrID)
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
if err := m.runtime.UpdateStatus(c); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
containerID := c.ID()
|
containerID := c.ID()
|
||||||
resp := &pb.ContainerStatusResponse{
|
status := &pb.ContainerStatus{
|
||||||
Status: &pb.ContainerStatus{
|
Id: &containerID,
|
||||||
Id: &containerID,
|
Metadata: c.Metadata(),
|
||||||
Metadata: c.Metadata(),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(c)
|
cState := m.runtime.ContainerStatus(c)
|
||||||
rStatus := pb.ContainerState_CONTAINER_UNKNOWN
|
rStatus := pb.ContainerState_CONTAINER_UNKNOWN
|
||||||
|
|
||||||
switch cState.Status {
|
switch cState.Status {
|
||||||
case oci.ContainerStateCreated:
|
case oci.ContainerStateCreated:
|
||||||
rStatus = pb.ContainerState_CONTAINER_CREATED
|
rStatus = pb.ContainerState_CONTAINER_CREATED
|
||||||
created := cState.Created.UnixNano()
|
created := cState.Created.UnixNano()
|
||||||
resp.Status.CreatedAt = int64Ptr(created)
|
status.CreatedAt = int64Ptr(created)
|
||||||
case oci.ContainerStateRunning:
|
case oci.ContainerStateRunning:
|
||||||
rStatus = pb.ContainerState_CONTAINER_RUNNING
|
rStatus = pb.ContainerState_CONTAINER_RUNNING
|
||||||
created := cState.Created.UnixNano()
|
created := cState.Created.UnixNano()
|
||||||
resp.Status.CreatedAt = int64Ptr(created)
|
status.CreatedAt = int64Ptr(created)
|
||||||
started := cState.Started.UnixNano()
|
started := cState.Started.UnixNano()
|
||||||
resp.Status.StartedAt = int64Ptr(started)
|
status.StartedAt = int64Ptr(started)
|
||||||
case oci.ContainerStateStopped:
|
case oci.ContainerStateStopped:
|
||||||
rStatus = pb.ContainerState_CONTAINER_EXITED
|
rStatus = pb.ContainerState_CONTAINER_EXITED
|
||||||
created := cState.Created.UnixNano()
|
created := cState.Created.UnixNano()
|
||||||
resp.Status.CreatedAt = int64Ptr(created)
|
status.CreatedAt = int64Ptr(created)
|
||||||
started := cState.Started.UnixNano()
|
started := cState.Started.UnixNano()
|
||||||
resp.Status.StartedAt = int64Ptr(started)
|
status.StartedAt = int64Ptr(started)
|
||||||
finished := cState.Finished.UnixNano()
|
finished := cState.Finished.UnixNano()
|
||||||
resp.Status.FinishedAt = int64Ptr(finished)
|
status.FinishedAt = int64Ptr(finished)
|
||||||
resp.Status.ExitCode = int32Ptr(cState.ExitCode)
|
status.ExitCode = int32Ptr(cState.ExitCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp.Status.State = &rStatus
|
status.State = &rStatus
|
||||||
|
|
||||||
logrus.Debugf("ContainerStatusResponse: %+v", resp)
|
return status, nil
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,33 +1,27 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// StopContainer stops a running container with a grace period (i.e., timeout).
|
// StopContainer stops a running container with a grace period (i.e., timeout).
|
||||||
func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) {
|
func (m *Manager) StopContainer(ctrID string, timeout int64) error {
|
||||||
logrus.Debugf("StopContainerRequest %+v", req)
|
c, err := m.getContainerWithPartialID(ctrID)
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
if err := m.runtime.UpdateStatus(c); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
cStatus := s.runtime.ContainerStatus(c)
|
cStatus := m.runtime.ContainerStatus(c)
|
||||||
if cStatus.Status != oci.ContainerStateStopped {
|
if cStatus.Status != oci.ContainerStateStopped {
|
||||||
if err := s.runtime.StopContainer(c); err != nil {
|
if err := m.runtime.StopContainer(c); err != nil {
|
||||||
return nil, fmt.Errorf("failed to stop container %s: %v", c.ID(), err)
|
return fmt.Errorf("failed to stop container %s: %v", c.ID(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := &pb.StopContainerResponse{}
|
return nil
|
||||||
logrus.Debugf("StopContainerResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UpdateRuntimeConfig updates the configuration of a running container.
|
|
||||||
func (s *Server) UpdateRuntimeConfig(ctx context.Context, req *pb.UpdateRuntimeConfigRequest) (*pb.UpdateRuntimeConfigResponse, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
|
@ -1,16 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ListImages lists existing images.
|
|
||||||
func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) {
|
|
||||||
logrus.Debugf("ListImages: %+v", req)
|
|
||||||
// TODO
|
|
||||||
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
|
||||||
// and listing images.
|
|
||||||
return &pb.ListImagesResponse{}, nil
|
|
||||||
}
|
|
|
@ -1,4 +1,4 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
@ -6,20 +6,17 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/directory"
|
"github.com/containers/image/directory"
|
||||||
"github.com/containers/image/image"
|
"github.com/containers/image/image"
|
||||||
"github.com/containers/image/transports"
|
"github.com/containers/image/transports"
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PullImage pulls a image with authentication config.
|
// PullImage pulls a image with authentication config.
|
||||||
func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) {
|
func (m *Manager) PullImage(imageSpec *pb.ImageSpec, auth *pb.AuthConfig, sandboxConfig *pb.PodSandboxConfig) error {
|
||||||
logrus.Debugf("PullImage: %+v", req)
|
img := imageSpec.GetImage()
|
||||||
img := req.GetImage().GetImage()
|
|
||||||
if img == "" {
|
if img == "" {
|
||||||
return nil, errors.New("got empty imagespec name")
|
return errors.New("got empty imagespec name")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(runcom): deal with AuthConfig in req.GetAuth()
|
// TODO(runcom): deal with AuthConfig in req.GetAuth()
|
||||||
|
@ -28,30 +25,30 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P
|
||||||
// how do we pull in a specified sandbox?
|
// how do we pull in a specified sandbox?
|
||||||
tr, err := transports.ParseImageName(img)
|
tr, err := transports.ParseImageName(img)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
||||||
src, err := tr.NewImageSource(nil, nil)
|
src, err := tr.NewImageSource(nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
i := image.FromSource(src)
|
i := image.FromSource(src)
|
||||||
blobs, err := i.BlobDigests()
|
blobs, err := i.BlobDigests()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = os.Mkdir(filepath.Join(s.config.ImageDir, tr.StringWithinTransport()), 0755); err != nil {
|
if err = os.Mkdir(filepath.Join(m.config.ImageDir, tr.StringWithinTransport()), 0755); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
dir, err := directory.NewReference(filepath.Join(s.config.ImageDir, tr.StringWithinTransport()))
|
dir, err := directory.NewReference(filepath.Join(m.config.ImageDir, tr.StringWithinTransport()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
||||||
dest, err := dir.NewImageDestination(nil)
|
dest, err := dir.NewImageDestination(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
// save blobs (layer + config for docker v2s2, layers only for docker v2s1 [the config is in the manifest])
|
// save blobs (layer + config for docker v2s2, layers only for docker v2s1 [the config is in the manifest])
|
||||||
for _, b := range blobs {
|
for _, b := range blobs {
|
||||||
|
@ -59,24 +56,24 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P
|
||||||
var r io.ReadCloser
|
var r io.ReadCloser
|
||||||
r, _, err = src.GetBlob(b)
|
r, _, err = src.GetBlob(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if _, _, err = dest.PutBlob(r, b, -1); err != nil {
|
if _, _, err = dest.PutBlob(r, b, -1); err != nil {
|
||||||
r.Close()
|
r.Close()
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
r.Close()
|
r.Close()
|
||||||
}
|
}
|
||||||
// save manifest
|
// save manifest
|
||||||
m, _, err := i.Manifest()
|
mf, _, err := i.Manifest()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if err := dest.PutManifest(m); err != nil {
|
if err := dest.PutManifest(mf); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://)
|
// TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://)
|
||||||
|
|
||||||
return &pb.PullImageResponse{}, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RemoveImage removes the image.
|
|
||||||
func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) {
|
|
||||||
logrus.Debugf("RemoveImage: %+v", req)
|
|
||||||
return &pb.RemoveImageResponse{}, nil
|
|
||||||
}
|
|
|
@ -1,16 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ImageStatus returns the status of the image.
|
|
||||||
func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) {
|
|
||||||
logrus.Debugf("ImageStatus: %+v", req)
|
|
||||||
// TODO
|
|
||||||
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
|
||||||
// and getting the image status
|
|
||||||
return &pb.ImageStatusResponse{}, nil
|
|
||||||
}
|
|
|
@ -1,4 +1,4 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
@ -12,26 +12,21 @@ import (
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/docker/docker/pkg/registrar"
|
"github.com/docker/docker/pkg/registrar"
|
||||||
"github.com/docker/docker/pkg/truncindex"
|
"github.com/docker/docker/pkg/truncindex"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/manager/apparmor"
|
||||||
|
"github.com/kubernetes-incubator/cri-o/manager/seccomp"
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"github.com/kubernetes-incubator/cri-o/server/apparmor"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/server/seccomp"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/utils"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/label"
|
"github.com/opencontainers/runc/libcontainer/label"
|
||||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/rajatchopra/ocicni"
|
"github.com/rajatchopra/ocicni"
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
// Manager implements the RuntimeService and ImageService
|
||||||
runtimeAPIVersion = "v1alpha1"
|
type Manager struct {
|
||||||
)
|
|
||||||
|
|
||||||
// Server implements the RuntimeService and ImageService
|
|
||||||
type Server struct {
|
|
||||||
config Config
|
config Config
|
||||||
runtime *oci.Runtime
|
runtime *oci.Runtime
|
||||||
stateLock sync.Mutex
|
stateLock sync.Mutex
|
||||||
state *serverState
|
state *managerState
|
||||||
netPlugin ocicni.CNIPlugin
|
netPlugin ocicni.CNIPlugin
|
||||||
podNameIndex *registrar.Registrar
|
podNameIndex *registrar.Registrar
|
||||||
podIDIndex *truncindex.TruncIndex
|
podIDIndex *truncindex.TruncIndex
|
||||||
|
@ -45,42 +40,42 @@ type Server struct {
|
||||||
appArmorProfile string
|
appArmorProfile string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) loadContainer(id string) error {
|
func (m *Manager) loadContainer(id string) error {
|
||||||
config, err := ioutil.ReadFile(filepath.Join(s.runtime.ContainerDir(), id, "config.json"))
|
config, err := ioutil.ReadFile(filepath.Join(m.runtime.ContainerDir(), id, "config.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var m rspec.Spec
|
var s rspec.Spec
|
||||||
if err = json.Unmarshal(config, &m); err != nil {
|
if err = json.Unmarshal(config, &s); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
labels := make(map[string]string)
|
labels := make(map[string]string)
|
||||||
if err = json.Unmarshal([]byte(m.Annotations["ocid/labels"]), &labels); err != nil {
|
if err = json.Unmarshal([]byte(s.Annotations["ocid/labels"]), &labels); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
name := m.Annotations["ocid/name"]
|
name := s.Annotations["ocid/name"]
|
||||||
name, err = s.reserveContainerName(id, name)
|
name, err = m.reserveContainerName(id, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var metadata pb.ContainerMetadata
|
var metadata pb.ContainerMetadata
|
||||||
if err = json.Unmarshal([]byte(m.Annotations["ocid/metadata"]), &metadata); err != nil {
|
if err = json.Unmarshal([]byte(s.Annotations["ocid/metadata"]), &metadata); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sb := s.getSandbox(m.Annotations["ocid/sandbox_id"])
|
sb := m.getSandbox(s.Annotations["ocid/sandbox_id"])
|
||||||
if sb == nil {
|
if sb == nil {
|
||||||
logrus.Warnf("could not get sandbox with id %s, skipping", m.Annotations["ocid/sandbox_id"])
|
logrus.Warnf("could not get sandbox with id %s, skipping", s.Annotations["ocid/sandbox_id"])
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var tty bool
|
var tty bool
|
||||||
if v := m.Annotations["ocid/tty"]; v == "true" {
|
if v := s.Annotations["ocid/tty"]; v == "true" {
|
||||||
tty = true
|
tty = true
|
||||||
}
|
}
|
||||||
containerPath := filepath.Join(s.runtime.ContainerDir(), id)
|
containerPath := filepath.Join(m.runtime.ContainerDir(), id)
|
||||||
|
|
||||||
var img *pb.ImageSpec
|
var img *pb.ImageSpec
|
||||||
image, ok := m.Annotations["ocid/image"]
|
image, ok := s.Annotations["ocid/image"]
|
||||||
if ok {
|
if ok {
|
||||||
img = &pb.ImageSpec{
|
img = &pb.ImageSpec{
|
||||||
Image: &image,
|
Image: &image,
|
||||||
|
@ -88,19 +83,19 @@ func (s *Server) loadContainer(id string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
annotations := make(map[string]string)
|
annotations := make(map[string]string)
|
||||||
if err = json.Unmarshal([]byte(m.Annotations["ocid/annotations"]), &annotations); err != nil {
|
if err = json.Unmarshal([]byte(s.Annotations["ocid/annotations"]), &annotations); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctr, err := oci.NewContainer(id, name, containerPath, m.Annotations["ocid/log_path"], sb.netNs(), labels, annotations, img, &metadata, sb.id, tty)
|
ctr, err := oci.NewContainer(id, name, containerPath, s.Annotations["ocid/log_path"], sb.netNs(), labels, annotations, img, &metadata, sb.id, tty)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.addContainer(ctr)
|
m.addContainer(ctr)
|
||||||
if err = s.runtime.UpdateStatus(ctr); err != nil {
|
if err = m.runtime.UpdateStatus(ctr); err != nil {
|
||||||
logrus.Warnf("error updating status for container %s: %v", ctr.ID(), err)
|
logrus.Warnf("error updating status for container %s: %v", ctr.ID(), err)
|
||||||
}
|
}
|
||||||
if err = s.ctrIDIndex.Add(id); err != nil {
|
if err = m.ctrIDIndex.Add(id); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -122,55 +117,55 @@ func configNetNsPath(spec rspec.Spec) (string, error) {
|
||||||
return "", fmt.Errorf("missing networking namespace")
|
return "", fmt.Errorf("missing networking namespace")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) loadSandbox(id string) error {
|
func (m *Manager) loadSandbox(id string) error {
|
||||||
config, err := ioutil.ReadFile(filepath.Join(s.config.SandboxDir, id, "config.json"))
|
config, err := ioutil.ReadFile(filepath.Join(m.config.SandboxDir, id, "config.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var m rspec.Spec
|
var s rspec.Spec
|
||||||
if err = json.Unmarshal(config, &m); err != nil {
|
if err = json.Unmarshal(config, &s); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
labels := make(map[string]string)
|
labels := make(map[string]string)
|
||||||
if err = json.Unmarshal([]byte(m.Annotations["ocid/labels"]), &labels); err != nil {
|
if err = json.Unmarshal([]byte(s.Annotations["ocid/labels"]), &labels); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
name := m.Annotations["ocid/name"]
|
name := s.Annotations["ocid/name"]
|
||||||
name, err = s.reservePodName(id, name)
|
name, err = m.reservePodName(id, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var metadata pb.PodSandboxMetadata
|
var metadata pb.PodSandboxMetadata
|
||||||
if err = json.Unmarshal([]byte(m.Annotations["ocid/metadata"]), &metadata); err != nil {
|
if err = json.Unmarshal([]byte(s.Annotations["ocid/metadata"]), &metadata); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
processLabel, mountLabel, err := label.InitLabels(label.DupSecOpt(m.Process.SelinuxLabel))
|
processLabel, mountLabel, err := label.InitLabels(label.DupSecOpt(s.Process.SelinuxLabel))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
annotations := make(map[string]string)
|
annotations := make(map[string]string)
|
||||||
if err = json.Unmarshal([]byte(m.Annotations["ocid/annotations"]), &annotations); err != nil {
|
if err = json.Unmarshal([]byte(s.Annotations["ocid/annotations"]), &annotations); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sb := &sandbox{
|
sb := &sandbox{
|
||||||
id: id,
|
id: id,
|
||||||
name: name,
|
name: name,
|
||||||
logDir: m.Annotations["ocid/log_path"],
|
logDir: s.Annotations["ocid/log_path"],
|
||||||
labels: labels,
|
labels: labels,
|
||||||
containers: oci.NewMemoryStore(),
|
containers: oci.NewMemoryStore(),
|
||||||
processLabel: processLabel,
|
processLabel: processLabel,
|
||||||
mountLabel: mountLabel,
|
mountLabel: mountLabel,
|
||||||
annotations: annotations,
|
annotations: annotations,
|
||||||
metadata: &metadata,
|
metadata: &metadata,
|
||||||
shmPath: m.Annotations["ocid/shm_path"],
|
shmPath: s.Annotations["ocid/shm_path"],
|
||||||
}
|
}
|
||||||
|
|
||||||
// We add a netNS only if we can load a permanent one.
|
// We add a netNS only if we can load a permanent one.
|
||||||
// Otherwise, the sandbox will live in the host namespace.
|
// Otherwise, the sandbox will live in the host namespace.
|
||||||
netNsPath, err := configNetNsPath(m)
|
netNsPath, err := configNetNsPath(s)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
netNS, nsErr := netNsGet(netNsPath, sb.name)
|
netNS, nsErr := netNsGet(netNsPath, sb.name)
|
||||||
// If we can't load the networking namespace
|
// If we can't load the networking namespace
|
||||||
|
@ -183,37 +178,37 @@ func (s *Server) loadSandbox(id string) error {
|
||||||
sb.netns = netNS
|
sb.netns = netNS
|
||||||
}
|
}
|
||||||
|
|
||||||
s.addSandbox(sb)
|
m.addSandbox(sb)
|
||||||
|
|
||||||
sandboxPath := filepath.Join(s.config.SandboxDir, id)
|
sandboxPath := filepath.Join(m.config.SandboxDir, id)
|
||||||
|
|
||||||
if err = label.ReserveLabel(processLabel); err != nil {
|
if err = label.ReserveLabel(processLabel); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cname, err := s.reserveContainerName(m.Annotations["ocid/container_id"], m.Annotations["ocid/container_name"])
|
cname, err := m.reserveContainerName(s.Annotations["ocid/container_id"], s.Annotations["ocid/container_name"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
scontainer, err := oci.NewContainer(m.Annotations["ocid/container_id"], cname, sandboxPath, sandboxPath, sb.netNs(), labels, annotations, nil, nil, id, false)
|
scontainer, err := oci.NewContainer(s.Annotations["ocid/container_id"], cname, sandboxPath, sandboxPath, sb.netNs(), labels, annotations, nil, nil, id, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sb.infraContainer = scontainer
|
sb.infraContainer = scontainer
|
||||||
if err = s.runtime.UpdateStatus(scontainer); err != nil {
|
if err = m.runtime.UpdateStatus(scontainer); err != nil {
|
||||||
logrus.Warnf("error updating status for container %s: %v", scontainer.ID(), err)
|
logrus.Warnf("error updating status for container %s: %v", scontainer.ID(), err)
|
||||||
}
|
}
|
||||||
if err = s.ctrIDIndex.Add(scontainer.ID()); err != nil {
|
if err = m.ctrIDIndex.Add(scontainer.ID()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = s.podIDIndex.Add(id); err != nil {
|
if err = m.podIDIndex.Add(id); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) restore() {
|
func (m *Manager) restore() {
|
||||||
sandboxDir, err := ioutil.ReadDir(s.config.SandboxDir)
|
sandboxDir, err := ioutil.ReadDir(m.config.SandboxDir)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
logrus.Warnf("could not read sandbox directory %s: %v", sandboxDir, err)
|
logrus.Warnf("could not read sandbox directory %s: %v", sandboxDir, err)
|
||||||
}
|
}
|
||||||
|
@ -221,29 +216,29 @@ func (s *Server) restore() {
|
||||||
if !v.IsDir() {
|
if !v.IsDir() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err = s.loadSandbox(v.Name()); err != nil {
|
if err = m.loadSandbox(v.Name()); err != nil {
|
||||||
logrus.Warnf("could not restore sandbox %s: %v", v.Name(), err)
|
logrus.Warnf("could not restore sandbox %s: %v", v.Name(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
containerDir, err := ioutil.ReadDir(s.runtime.ContainerDir())
|
containerDir, err := ioutil.ReadDir(m.runtime.ContainerDir())
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
logrus.Warnf("could not read container directory %s: %v", s.runtime.ContainerDir(), err)
|
logrus.Warnf("could not read container directory %s: %v", m.runtime.ContainerDir(), err)
|
||||||
}
|
}
|
||||||
for _, v := range containerDir {
|
for _, v := range containerDir {
|
||||||
if !v.IsDir() {
|
if !v.IsDir() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := s.loadContainer(v.Name()); err != nil {
|
if err := m.loadContainer(v.Name()); err != nil {
|
||||||
logrus.Warnf("could not restore container %s: %v", v.Name(), err)
|
logrus.Warnf("could not restore container %s: %v", v.Name(), err)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) reservePodName(id, name string) (string, error) {
|
func (m *Manager) reservePodName(id, name string) (string, error) {
|
||||||
if err := s.podNameIndex.Reserve(name, id); err != nil {
|
if err := m.podNameIndex.Reserve(name, id); err != nil {
|
||||||
if err == registrar.ErrNameReserved {
|
if err == registrar.ErrNameReserved {
|
||||||
id, err := s.podNameIndex.Get(name)
|
id, err := m.podNameIndex.Get(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Warnf("conflict, pod name %q already reserved", name)
|
logrus.Warnf("conflict, pod name %q already reserved", name)
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -255,14 +250,14 @@ func (s *Server) reservePodName(id, name string) (string, error) {
|
||||||
return name, nil
|
return name, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) releasePodName(name string) {
|
func (m *Manager) releasePodName(name string) {
|
||||||
s.podNameIndex.Release(name)
|
m.podNameIndex.Release(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) reserveContainerName(id, name string) (string, error) {
|
func (m *Manager) reserveContainerName(id, name string) (string, error) {
|
||||||
if err := s.ctrNameIndex.Reserve(name, id); err != nil {
|
if err := m.ctrNameIndex.Reserve(name, id); err != nil {
|
||||||
if err == registrar.ErrNameReserved {
|
if err == registrar.ErrNameReserved {
|
||||||
id, err := s.ctrNameIndex.Get(name)
|
id, err := m.ctrNameIndex.Get(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Warnf("conflict, ctr name %q already reserved", name)
|
logrus.Warnf("conflict, ctr name %q already reserved", name)
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -274,8 +269,8 @@ func (s *Server) reserveContainerName(id, name string) (string, error) {
|
||||||
return name, nil
|
return name, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) releaseContainerName(name string) {
|
func (m *Manager) releaseContainerName(name string) {
|
||||||
s.ctrNameIndex.Release(name)
|
m.ctrNameIndex.Release(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -295,16 +290,8 @@ func seccompEnabled() bool {
|
||||||
return enabled
|
return enabled
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new Server with options provided
|
// New creates a new Manager with options provided
|
||||||
func New(config *Config) (*Server, error) {
|
func New(config *Config) (*Manager, error) {
|
||||||
// TODO: This will go away later when we have wrapper process or systemd acting as
|
|
||||||
// subreaper.
|
|
||||||
if err := utils.SetSubreaper(1); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to set server as subreaper: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
utils.StartReaper()
|
|
||||||
|
|
||||||
if err := os.MkdirAll(config.ImageDir, 0755); err != nil {
|
if err := os.MkdirAll(config.ImageDir, 0755); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -323,11 +310,11 @@ func New(config *Config) (*Server, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
s := &Server{
|
m := &Manager{
|
||||||
runtime: r,
|
runtime: r,
|
||||||
netPlugin: netPlugin,
|
netPlugin: netPlugin,
|
||||||
config: *config,
|
config: *config,
|
||||||
state: &serverState{
|
state: &managerState{
|
||||||
sandboxes: sandboxes,
|
sandboxes: sandboxes,
|
||||||
containers: containers,
|
containers: containers,
|
||||||
},
|
},
|
||||||
|
@ -343,77 +330,77 @@ func New(config *Config) (*Server, error) {
|
||||||
if err := json.Unmarshal(seccompProfile, &seccompConfig); err != nil {
|
if err := json.Unmarshal(seccompProfile, &seccompConfig); err != nil {
|
||||||
return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
|
return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
|
||||||
}
|
}
|
||||||
s.seccompProfile = seccompConfig
|
m.seccompProfile = seccompConfig
|
||||||
|
|
||||||
if s.appArmorEnabled && s.appArmorProfile == apparmor.DefaultApparmorProfile {
|
if m.appArmorEnabled && m.appArmorProfile == apparmor.DefaultApparmorProfile {
|
||||||
if err := apparmor.EnsureDefaultApparmorProfile(); err != nil {
|
if err := apparmor.EnsureDefaultApparmorProfile(); err != nil {
|
||||||
return nil, fmt.Errorf("ensuring the default apparmor profile is installed failed: %v", err)
|
return nil, fmt.Errorf("ensuring the default apparmor profile is installed failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.podIDIndex = truncindex.NewTruncIndex([]string{})
|
m.podIDIndex = truncindex.NewTruncIndex([]string{})
|
||||||
s.podNameIndex = registrar.NewRegistrar()
|
m.podNameIndex = registrar.NewRegistrar()
|
||||||
s.ctrIDIndex = truncindex.NewTruncIndex([]string{})
|
m.ctrIDIndex = truncindex.NewTruncIndex([]string{})
|
||||||
s.ctrNameIndex = registrar.NewRegistrar()
|
m.ctrNameIndex = registrar.NewRegistrar()
|
||||||
|
|
||||||
s.restore()
|
m.restore()
|
||||||
|
|
||||||
logrus.Debugf("sandboxes: %v", s.state.sandboxes)
|
logrus.Debugf("sandboxes: %v", m.state.sandboxes)
|
||||||
logrus.Debugf("containers: %v", s.state.containers)
|
logrus.Debugf("containers: %v", m.state.containers)
|
||||||
return s, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type serverState struct {
|
type managerState struct {
|
||||||
sandboxes map[string]*sandbox
|
sandboxes map[string]*sandbox
|
||||||
containers oci.Store
|
containers oci.Store
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) addSandbox(sb *sandbox) {
|
func (m *Manager) addSandbox(sb *sandbox) {
|
||||||
s.stateLock.Lock()
|
m.stateLock.Lock()
|
||||||
s.state.sandboxes[sb.id] = sb
|
m.state.sandboxes[sb.id] = sb
|
||||||
s.stateLock.Unlock()
|
m.stateLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) getSandbox(id string) *sandbox {
|
func (m *Manager) getSandbox(id string) *sandbox {
|
||||||
s.stateLock.Lock()
|
m.stateLock.Lock()
|
||||||
sb := s.state.sandboxes[id]
|
sb := m.state.sandboxes[id]
|
||||||
s.stateLock.Unlock()
|
m.stateLock.Unlock()
|
||||||
return sb
|
return sb
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) hasSandbox(id string) bool {
|
func (m *Manager) hasSandbox(id string) bool {
|
||||||
s.stateLock.Lock()
|
m.stateLock.Lock()
|
||||||
_, ok := s.state.sandboxes[id]
|
_, ok := m.state.sandboxes[id]
|
||||||
s.stateLock.Unlock()
|
m.stateLock.Unlock()
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) removeSandbox(id string) {
|
func (m *Manager) removeSandbox(id string) {
|
||||||
s.stateLock.Lock()
|
m.stateLock.Lock()
|
||||||
delete(s.state.sandboxes, id)
|
delete(m.state.sandboxes, id)
|
||||||
s.stateLock.Unlock()
|
m.stateLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) addContainer(c *oci.Container) {
|
func (m *Manager) addContainer(c *oci.Container) {
|
||||||
s.stateLock.Lock()
|
m.stateLock.Lock()
|
||||||
sandbox := s.state.sandboxes[c.Sandbox()]
|
sandbox := m.state.sandboxes[c.Sandbox()]
|
||||||
// TODO(runcom): handle !ok above!!! otherwise it panics!
|
// TODO(runcom): handle !ok above!!! otherwise it panics!
|
||||||
sandbox.addContainer(c)
|
sandbox.addContainer(c)
|
||||||
s.state.containers.Add(c.ID(), c)
|
m.state.containers.Add(c.ID(), c)
|
||||||
s.stateLock.Unlock()
|
m.stateLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) getContainer(id string) *oci.Container {
|
func (m *Manager) getContainer(id string) *oci.Container {
|
||||||
s.stateLock.Lock()
|
m.stateLock.Lock()
|
||||||
c := s.state.containers.Get(id)
|
c := m.state.containers.Get(id)
|
||||||
s.stateLock.Unlock()
|
m.stateLock.Unlock()
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) removeContainer(c *oci.Container) {
|
func (m *Manager) removeContainer(c *oci.Container) {
|
||||||
s.stateLock.Lock()
|
m.stateLock.Lock()
|
||||||
sandbox := s.state.sandboxes[c.Sandbox()]
|
sandbox := m.state.sandboxes[c.Sandbox()]
|
||||||
sandbox.removeContainer(c)
|
sandbox.removeContainer(c)
|
||||||
s.state.containers.Delete(c.ID())
|
m.state.containers.Delete(c.ID())
|
||||||
s.stateLock.Unlock()
|
m.stateLock.Unlock()
|
||||||
}
|
}
|
|
@ -1,19 +1,16 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Status returns the status of the runtime
|
// Status returns the status of the runtime
|
||||||
func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) {
|
func (m *Manager) Status() (*pb.RuntimeStatus, error) {
|
||||||
|
|
||||||
// Deal with Runtime conditions
|
// Deal with Runtime conditions
|
||||||
runtimeReady, err := s.runtime.RuntimeReady()
|
runtimeReady, err := m.runtime.RuntimeReady()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
networkReady, err := s.runtime.NetworkReady()
|
networkReady, err := m.runtime.NetworkReady()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -22,20 +19,18 @@ func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusR
|
||||||
runtimeReadyConditionString := pb.RuntimeReady
|
runtimeReadyConditionString := pb.RuntimeReady
|
||||||
networkReadyConditionString := pb.NetworkReady
|
networkReadyConditionString := pb.NetworkReady
|
||||||
|
|
||||||
resp := &pb.StatusResponse{
|
status := &pb.RuntimeStatus{
|
||||||
Status: &pb.RuntimeStatus{
|
Conditions: []*pb.RuntimeCondition{
|
||||||
Conditions: []*pb.RuntimeCondition{
|
&pb.RuntimeCondition{
|
||||||
&pb.RuntimeCondition{
|
Type: &runtimeReadyConditionString,
|
||||||
Type: &runtimeReadyConditionString,
|
Status: &runtimeReady,
|
||||||
Status: &runtimeReady,
|
},
|
||||||
},
|
&pb.RuntimeCondition{
|
||||||
&pb.RuntimeCondition{
|
Type: &networkReadyConditionString,
|
||||||
Type: &networkReadyConditionString,
|
Status: &networkReady,
|
||||||
Status: &networkReady,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, nil
|
return status, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
@ -9,9 +9,9 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/containernetworking/cni/pkg/ns"
|
||||||
"github.com/docker/docker/pkg/stringid"
|
"github.com/docker/docker/pkg/stringid"
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"github.com/containernetworking/cni/pkg/ns"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
@ -94,7 +94,7 @@ func netNsGet(nspath, name string) (*sandboxNetNs, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
netNs := &sandboxNetNs{ns: netNS, closed: false,}
|
netNs := &sandboxNetNs{ns: netNS, closed: false}
|
||||||
|
|
||||||
if symlink {
|
if symlink {
|
||||||
fd, err := os.Open(nspath)
|
fd, err := os.Open(nspath)
|
||||||
|
@ -188,7 +188,7 @@ func (s *sandbox) netNsCreate() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
s.netns = &sandboxNetNs{
|
s.netns = &sandboxNetNs{
|
||||||
ns: netNS,
|
ns: netNS,
|
||||||
closed: false,
|
closed: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ func (s *sandbox) netNsRemove() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) generatePodIDandName(name string, namespace string, attempt uint32) (string, string, error) {
|
func (m *Manager) generatePodIDandName(name string, namespace string, attempt uint32) (string, string, error) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
id = stringid.GenerateNonCryptoID()
|
id = stringid.GenerateNonCryptoID()
|
||||||
|
@ -241,28 +241,23 @@ func (s *Server) generatePodIDandName(name string, namespace string, attempt uin
|
||||||
namespace = podDefaultNamespace
|
namespace = podDefaultNamespace
|
||||||
}
|
}
|
||||||
|
|
||||||
if name, err = s.reservePodName(id, fmt.Sprintf("%s-%s-%v", namespace, name, attempt)); err != nil {
|
if name, err = m.reservePodName(id, fmt.Sprintf("%s-%s-%v", namespace, name, attempt)); err != nil {
|
||||||
return "", "", err
|
return "", "", err
|
||||||
}
|
}
|
||||||
return id, name, err
|
return id, name, err
|
||||||
}
|
}
|
||||||
|
|
||||||
type podSandboxRequest interface {
|
func (m *Manager) getPodSandboxWithPartialID(sbID string) (*sandbox, error) {
|
||||||
GetPodSandboxId() string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) getPodSandboxFromRequest(req podSandboxRequest) (*sandbox, error) {
|
|
||||||
sbID := req.GetPodSandboxId()
|
|
||||||
if sbID == "" {
|
if sbID == "" {
|
||||||
return nil, errSandboxIDEmpty
|
return nil, errSandboxIDEmpty
|
||||||
}
|
}
|
||||||
|
|
||||||
sandboxID, err := s.podIDIndex.Get(sbID)
|
sandboxID, err := m.podIDIndex.Get(sbID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("PodSandbox with ID starting with %s not found: %v", sbID, err)
|
return nil, fmt.Errorf("PodSandbox with ID starting with %s not found: %v", sbID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
sb := s.getSandbox(sandboxID)
|
sb := m.getSandbox(sandboxID)
|
||||||
if sb == nil {
|
if sb == nil {
|
||||||
return nil, fmt.Errorf("specified sandbox not found: %s", sandboxID)
|
return nil, fmt.Errorf("specified sandbox not found: %s", sandboxID)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,7 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
@ -27,23 +25,21 @@ func filterSandbox(p *pb.PodSandbox, filter *pb.PodSandboxFilter) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListPodSandbox returns a list of SandBoxes.
|
// ListPodSandbox returns a list of SandBoxes.
|
||||||
func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) {
|
func (m *Manager) ListPodSandbox(filter *pb.PodSandboxFilter) ([]*pb.PodSandbox, error) {
|
||||||
logrus.Debugf("ListPodSandboxRequest %+v", req)
|
|
||||||
var pods []*pb.PodSandbox
|
var pods []*pb.PodSandbox
|
||||||
var podList []*sandbox
|
var podList []*sandbox
|
||||||
for _, sb := range s.state.sandboxes {
|
for _, sb := range m.state.sandboxes {
|
||||||
podList = append(podList, sb)
|
podList = append(podList, sb)
|
||||||
}
|
}
|
||||||
|
|
||||||
filter := req.Filter
|
|
||||||
// Filter by pod id first.
|
// Filter by pod id first.
|
||||||
if filter != nil {
|
if filter != nil {
|
||||||
if filter.Id != nil {
|
if filter.Id != nil {
|
||||||
id, err := s.podIDIndex.Get(*filter.Id)
|
id, err := m.podIDIndex.Get(*filter.Id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sb := s.getSandbox(id)
|
sb := m.getSandbox(id)
|
||||||
if sb == nil {
|
if sb == nil {
|
||||||
podList = []*sandbox{}
|
podList = []*sandbox{}
|
||||||
} else {
|
} else {
|
||||||
|
@ -59,10 +55,10 @@ func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxReque
|
||||||
// it's better not to panic
|
// it's better not to panic
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := s.runtime.UpdateStatus(podInfraContainer); err != nil {
|
if err := m.runtime.UpdateStatus(podInfraContainer); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
cState := s.runtime.ContainerStatus(podInfraContainer)
|
cState := m.runtime.ContainerStatus(podInfraContainer)
|
||||||
created := cState.Created.UnixNano()
|
created := cState.Created.UnixNano()
|
||||||
rStatus := pb.PodSandboxState_SANDBOX_NOTREADY
|
rStatus := pb.PodSandboxState_SANDBOX_NOTREADY
|
||||||
if cState.Status == oci.ContainerStateRunning {
|
if cState.Status == oci.ContainerStateRunning {
|
||||||
|
@ -79,14 +75,10 @@ func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxReque
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter by other criteria such as state and labels.
|
// Filter by other criteria such as state and labels.
|
||||||
if filterSandbox(pod, req.Filter) {
|
if filterSandbox(pod, filter) {
|
||||||
pods = append(pods, pod)
|
pods = append(pods, pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := &pb.ListPodSandboxResponse{
|
return pods, nil
|
||||||
Items: pods,
|
|
||||||
}
|
|
||||||
logrus.Debugf("ListPodSandboxResponse %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -9,23 +9,19 @@ import (
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"github.com/opencontainers/runc/libcontainer/label"
|
"github.com/opencontainers/runc/libcontainer/label"
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// RemovePodSandbox deletes the sandbox. If there are any running containers in the
|
// RemovePodSandbox deletes the sandbox. If there are any running containers in the
|
||||||
// sandbox, they should be force deleted.
|
// sandbox, they should be force deleted.
|
||||||
func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) {
|
func (m *Manager) RemovePodSandbox(sbID string) error {
|
||||||
logrus.Debugf("RemovePodSandboxRequest %+v", req)
|
sb, err := m.getPodSandboxWithPartialID(sbID)
|
||||||
sb, err := s.getPodSandboxFromRequest(req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == errSandboxIDEmpty {
|
if err == errSandboxIDEmpty {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := &pb.RemovePodSandboxResponse{}
|
logrus.Warnf("could not get sandbox %s, it's probably been removed already: %v", sbID, err)
|
||||||
logrus.Warnf("could not get sandbox %s, it's probably been removed already: %v", req.GetPodSandboxId(), err)
|
return nil
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
podInfraContainer := sb.infraContainer
|
podInfraContainer := sb.infraContainer
|
||||||
|
@ -34,62 +30,60 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
|
||||||
|
|
||||||
// Delete all the containers in the sandbox
|
// Delete all the containers in the sandbox
|
||||||
for _, c := range containers {
|
for _, c := range containers {
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
if err := m.runtime.UpdateStatus(c); err != nil {
|
||||||
return nil, fmt.Errorf("failed to update container state: %v", err)
|
return fmt.Errorf("failed to update container state: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(c)
|
cState := m.runtime.ContainerStatus(c)
|
||||||
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
|
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
|
||||||
if err := s.runtime.StopContainer(c); err != nil {
|
if err := m.runtime.StopContainer(c); err != nil {
|
||||||
return nil, fmt.Errorf("failed to stop container %s: %v", c.Name(), err)
|
return fmt.Errorf("failed to stop container %s: %v", c.Name(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.runtime.DeleteContainer(c); err != nil {
|
if err := m.runtime.DeleteContainer(c); err != nil {
|
||||||
return nil, fmt.Errorf("failed to delete container %s in sandbox %s: %v", c.Name(), sb.id, err)
|
return fmt.Errorf("failed to delete container %s in sandbox %s: %v", c.Name(), sb.id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c == podInfraContainer {
|
if c == podInfraContainer {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID())
|
containerDir := filepath.Join(m.runtime.ContainerDir(), c.ID())
|
||||||
if err := os.RemoveAll(containerDir); err != nil {
|
if err := os.RemoveAll(containerDir); err != nil {
|
||||||
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.Name(), err)
|
return fmt.Errorf("failed to remove container %s directory: %v", c.Name(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.releaseContainerName(c.Name())
|
m.releaseContainerName(c.Name())
|
||||||
s.removeContainer(c)
|
m.removeContainer(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := label.UnreserveLabel(sb.processLabel); err != nil {
|
if err := label.UnreserveLabel(sb.processLabel); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// unmount the shm for the pod
|
// unmount the shm for the pod
|
||||||
if sb.shmPath != "/dev/shm" {
|
if sb.shmPath != "/dev/shm" {
|
||||||
if err := syscall.Unmount(sb.shmPath, syscall.MNT_DETACH); err != nil {
|
if err := syscall.Unmount(sb.shmPath, syscall.MNT_DETACH); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sb.netNsRemove(); err != nil {
|
if err := sb.netNsRemove(); err != nil {
|
||||||
return nil, fmt.Errorf("failed to remove networking namespace for sandbox %s: %v", sb.id, err)
|
return fmt.Errorf("failed to remove networking namespace for sandbox %s: %v", sb.id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the files related to the sandbox
|
// Remove the files related to the sandbox
|
||||||
podSandboxDir := filepath.Join(s.config.SandboxDir, sb.id)
|
podSandboxDir := filepath.Join(m.config.SandboxDir, sb.id)
|
||||||
if err := os.RemoveAll(podSandboxDir); err != nil {
|
if err := os.RemoveAll(podSandboxDir); err != nil {
|
||||||
return nil, fmt.Errorf("failed to remove sandbox %s directory: %v", sb.id, err)
|
return fmt.Errorf("failed to remove sandbox %s directory: %v", sb.id, err)
|
||||||
}
|
}
|
||||||
s.releaseContainerName(podInfraContainer.Name())
|
m.releaseContainerName(podInfraContainer.Name())
|
||||||
s.removeContainer(podInfraContainer)
|
m.removeContainer(podInfraContainer)
|
||||||
sb.infraContainer = nil
|
sb.infraContainer = nil
|
||||||
|
|
||||||
s.releasePodName(sb.name)
|
m.releasePodName(sb.name)
|
||||||
s.removeSandbox(sb.id)
|
m.removeSandbox(sb.id)
|
||||||
|
|
||||||
resp := &pb.RemovePodSandboxResponse{}
|
return nil
|
||||||
logrus.Debugf("RemovePodSandboxResponse %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
@ -13,24 +13,23 @@ import (
|
||||||
"github.com/kubernetes-incubator/cri-o/utils"
|
"github.com/kubernetes-incubator/cri-o/utils"
|
||||||
"github.com/opencontainers/runc/libcontainer/label"
|
"github.com/opencontainers/runc/libcontainer/label"
|
||||||
"github.com/opencontainers/runtime-tools/generate"
|
"github.com/opencontainers/runtime-tools/generate"
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Server) runContainer(container *oci.Container) error {
|
func (m *Manager) runContainer(container *oci.Container) error {
|
||||||
if err := s.runtime.CreateContainer(container); err != nil {
|
if err := m.runtime.CreateContainer(container); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.runtime.UpdateStatus(container); err != nil {
|
if err := m.runtime.UpdateStatus(container); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.runtime.StartContainer(container); err != nil {
|
if err := m.runtime.StartContainer(container); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.runtime.UpdateStatus(container); err != nil {
|
if err := m.runtime.UpdateStatus(container); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,44 +37,46 @@ func (s *Server) runContainer(container *oci.Container) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunPodSandbox creates and runs a pod-level sandbox.
|
// RunPodSandbox creates and runs a pod-level sandbox.
|
||||||
func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest) (resp *pb.RunPodSandboxResponse, err error) {
|
func (m *Manager) RunPodSandbox(config *pb.PodSandboxConfig) (string, error) {
|
||||||
logrus.Debugf("RunPodSandboxRequest %+v", req)
|
|
||||||
var processLabel, mountLabel, netNsPath string
|
var processLabel, mountLabel, netNsPath string
|
||||||
|
if config == nil {
|
||||||
|
return "", fmt.Errorf("PodSandboxConfig should not be nil")
|
||||||
|
}
|
||||||
// process req.Name
|
// process req.Name
|
||||||
name := req.GetConfig().GetMetadata().GetName()
|
name := config.GetMetadata().GetName()
|
||||||
if name == "" {
|
if name == "" {
|
||||||
return nil, fmt.Errorf("PodSandboxConfig.Name should not be empty")
|
return "", fmt.Errorf("PodSandboxConfig.Name should not be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace := req.GetConfig().GetMetadata().GetNamespace()
|
namespace := config.GetMetadata().GetNamespace()
|
||||||
attempt := req.GetConfig().GetMetadata().GetAttempt()
|
attempt := config.GetMetadata().GetAttempt()
|
||||||
|
|
||||||
id, name, err := s.generatePodIDandName(name, namespace, attempt)
|
id, name, err := m.generatePodIDandName(name, namespace, attempt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.releasePodName(name)
|
m.releasePodName(name)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err = s.podIDIndex.Add(id); err != nil {
|
if err = m.podIDIndex.Add(id); err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err = s.podIDIndex.Delete(id); err != nil {
|
if err = m.podIDIndex.Delete(id); err != nil {
|
||||||
logrus.Warnf("couldn't delete pod id %s from idIndex", id)
|
logrus.Warnf("couldn't delete pod id %s from idIndex", id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
podSandboxDir := filepath.Join(s.config.SandboxDir, id)
|
podSandboxDir := filepath.Join(m.config.SandboxDir, id)
|
||||||
if _, err = os.Stat(podSandboxDir); err == nil {
|
if _, err = os.Stat(podSandboxDir); err == nil {
|
||||||
return nil, fmt.Errorf("pod sandbox (%s) already exists", podSandboxDir)
|
return "", fmt.Errorf("pod sandbox (%s) already exists", podSandboxDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -87,7 +88,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err = os.MkdirAll(podSandboxDir, 0755); err != nil {
|
if err = os.MkdirAll(podSandboxDir, 0755); err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// creates a spec Generator with the default spec.
|
// creates a spec Generator with the default spec.
|
||||||
|
@ -95,79 +96,79 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
||||||
|
|
||||||
// TODO: Make the `graph/vfs` part of this configurable once the storage
|
// TODO: Make the `graph/vfs` part of this configurable once the storage
|
||||||
// integration has been merged.
|
// integration has been merged.
|
||||||
podInfraRootfs := filepath.Join(s.config.Root, "graph/vfs/pause")
|
podInfraRootfs := filepath.Join(m.config.Root, "graph/vfs/pause")
|
||||||
// setup defaults for the pod sandbox
|
// setup defaults for the pod sandbox
|
||||||
g.SetRootPath(filepath.Join(podInfraRootfs, "rootfs"))
|
g.SetRootPath(filepath.Join(podInfraRootfs, "rootfs"))
|
||||||
g.SetRootReadonly(true)
|
g.SetRootReadonly(true)
|
||||||
g.SetProcessArgs([]string{"/pause"})
|
g.SetProcessArgs([]string{"/pause"})
|
||||||
|
|
||||||
// set hostname
|
// set hostname
|
||||||
hostname := req.GetConfig().GetHostname()
|
hostname := config.GetHostname()
|
||||||
if hostname != "" {
|
if hostname != "" {
|
||||||
g.SetHostname(hostname)
|
g.SetHostname(hostname)
|
||||||
}
|
}
|
||||||
|
|
||||||
// set log directory
|
// set log directory
|
||||||
logDir := req.GetConfig().GetLogDirectory()
|
logDir := config.GetLogDirectory()
|
||||||
if logDir == "" {
|
if logDir == "" {
|
||||||
logDir = filepath.Join(s.config.LogDir, id)
|
logDir = filepath.Join(m.config.LogDir, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// set DNS options
|
// set DNS options
|
||||||
dnsServers := req.GetConfig().GetDnsConfig().GetServers()
|
dnsServers := config.GetDnsConfig().GetServers()
|
||||||
dnsSearches := req.GetConfig().GetDnsConfig().GetSearches()
|
dnsSearches := config.GetDnsConfig().GetSearches()
|
||||||
dnsOptions := req.GetConfig().GetDnsConfig().GetOptions()
|
dnsOptions := config.GetDnsConfig().GetOptions()
|
||||||
resolvPath := fmt.Sprintf("%s/resolv.conf", podSandboxDir)
|
resolvPath := fmt.Sprintf("%s/resolv.conf", podSandboxDir)
|
||||||
err = parseDNSOptions(dnsServers, dnsSearches, dnsOptions, resolvPath)
|
err = parseDNSOptions(dnsServers, dnsSearches, dnsOptions, resolvPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err1 := removeFile(resolvPath)
|
err1 := removeFile(resolvPath)
|
||||||
if err1 != nil {
|
if err1 != nil {
|
||||||
err = err1
|
err = err1
|
||||||
return nil, fmt.Errorf("%v; failed to remove %s: %v", err, resolvPath, err1)
|
return "", fmt.Errorf("%v; failed to remove %s: %v", err, resolvPath, err1)
|
||||||
}
|
}
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
g.AddBindMount(resolvPath, "/etc/resolv.conf", []string{"ro"})
|
g.AddBindMount(resolvPath, "/etc/resolv.conf", []string{"ro"})
|
||||||
|
|
||||||
// add metadata
|
// add metadata
|
||||||
metadata := req.GetConfig().GetMetadata()
|
metadata := config.GetMetadata()
|
||||||
metadataJSON, err := json.Marshal(metadata)
|
metadataJSON, err := json.Marshal(metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// add labels
|
// add labels
|
||||||
labels := req.GetConfig().GetLabels()
|
labels := config.GetLabels()
|
||||||
labelsJSON, err := json.Marshal(labels)
|
labelsJSON, err := json.Marshal(labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// add annotations
|
// add annotations
|
||||||
annotations := req.GetConfig().GetAnnotations()
|
annotations := config.GetAnnotations()
|
||||||
annotationsJSON, err := json.Marshal(annotations)
|
annotationsJSON, err := json.Marshal(annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't use SELinux separation with Host Pid or IPC Namespace,
|
// Don't use SELinux separation with Host Pid or IPC Namespace,
|
||||||
if !req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostPid() && !req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
if !config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostPid() && !config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
||||||
processLabel, mountLabel, err = getSELinuxLabels(nil)
|
processLabel, mountLabel, err = getSELinuxLabels(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
g.SetProcessSelinuxLabel(processLabel)
|
g.SetProcessSelinuxLabel(processLabel)
|
||||||
}
|
}
|
||||||
|
|
||||||
// create shm mount for the pod containers.
|
// create shm mount for the pod containers.
|
||||||
var shmPath string
|
var shmPath string
|
||||||
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
||||||
shmPath = "/dev/shm"
|
shmPath = "/dev/shm"
|
||||||
} else {
|
} else {
|
||||||
shmPath, err = setupShm(podSandboxDir, mountLabel)
|
shmPath, err = setupShm(podSandboxDir, mountLabel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -178,24 +179,24 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
containerID, containerName, err := s.generateContainerIDandName(name, "infra", 0)
|
containerID, containerName, err := m.generateContainerIDandName(name, "infra", 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.releaseContainerName(containerName)
|
m.releaseContainerName(containerName)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err = s.ctrIDIndex.Add(containerID); err != nil {
|
if err = m.ctrIDIndex.Add(containerID); err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err = s.ctrIDIndex.Delete(containerID); err != nil {
|
if err = m.ctrIDIndex.Delete(containerID); err != nil {
|
||||||
logrus.Warnf("couldn't delete ctr id %s from idIndex", containerID)
|
logrus.Warnf("couldn't delete ctr id %s from idIndex", containerID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -224,7 +225,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
||||||
shmPath: shmPath,
|
shmPath: shmPath,
|
||||||
}
|
}
|
||||||
|
|
||||||
s.addSandbox(sb)
|
m.addSandbox(sb)
|
||||||
|
|
||||||
for k, v := range annotations {
|
for k, v := range annotations {
|
||||||
g.AddAnnotation(k, v)
|
g.AddAnnotation(k, v)
|
||||||
|
@ -233,7 +234,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
||||||
// extract linux sysctls from annotations and pass down to oci runtime
|
// extract linux sysctls from annotations and pass down to oci runtime
|
||||||
safe, unsafe, err := SysctlsFromPodAnnotations(annotations)
|
safe, unsafe, err := SysctlsFromPodAnnotations(annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
for _, sysctl := range safe {
|
for _, sysctl := range safe {
|
||||||
g.AddLinuxSysctl(sysctl.Name, sysctl.Value)
|
g.AddLinuxSysctl(sysctl.Name, sysctl.Value)
|
||||||
|
@ -243,26 +244,26 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
// setup cgroup settings
|
// setup cgroup settings
|
||||||
cgroupParent := req.GetConfig().GetLinux().GetCgroupParent()
|
cgroupParent := config.GetLinux().GetCgroupParent()
|
||||||
if cgroupParent != "" {
|
if cgroupParent != "" {
|
||||||
g.SetLinuxCgroupsPath(cgroupParent)
|
g.SetLinuxCgroupsPath(cgroupParent)
|
||||||
}
|
}
|
||||||
|
|
||||||
// set up namespaces
|
// set up namespaces
|
||||||
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostNetwork() {
|
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostNetwork() {
|
||||||
err = g.RemoveLinuxNamespace("network")
|
err = g.RemoveLinuxNamespace("network")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
netNsPath, err = hostNetNsPath()
|
netNsPath, err = hostNetNsPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Create the sandbox network namespace
|
// Create the sandbox network namespace
|
||||||
if err = sb.netNsCreate(); err != nil {
|
if err = sb.netNsCreate(); err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -273,67 +274,65 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
||||||
if netnsErr := sb.netNsRemove(); netnsErr != nil {
|
if netnsErr := sb.netNsRemove(); netnsErr != nil {
|
||||||
logrus.Warnf("Failed to remove networking namespace: %v", netnsErr)
|
logrus.Warnf("Failed to remove networking namespace: %v", netnsErr)
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
// Pass the created namespace path to the runtime
|
// Pass the created namespace path to the runtime
|
||||||
err = g.AddOrReplaceLinuxNamespace("network", sb.netNsPath())
|
err = g.AddOrReplaceLinuxNamespace("network", sb.netNsPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
netNsPath = sb.netNsPath()
|
netNsPath = sb.netNsPath()
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostPid() {
|
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostPid() {
|
||||||
err = g.RemoveLinuxNamespace("pid")
|
err = g.RemoveLinuxNamespace("pid")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
||||||
err = g.RemoveLinuxNamespace("ipc")
|
err = g.RemoveLinuxNamespace("ipc")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = g.SaveToFile(filepath.Join(podSandboxDir, "config.json"), generate.ExportOptions{})
|
err = g.SaveToFile(filepath.Join(podSandboxDir, "config.json"), generate.ExportOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = os.Stat(podInfraRootfs); err != nil {
|
if _, err = os.Stat(podInfraRootfs); err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
// TODO: Replace by rootfs creation API when it is ready
|
// TODO: Replace by rootfs creation API when it is ready
|
||||||
if err = utils.CreateInfraRootfs(podInfraRootfs, s.config.Pause); err != nil {
|
if err = utils.CreateInfraRootfs(podInfraRootfs, m.config.Pause); err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
container, err := oci.NewContainer(containerID, containerName, podSandboxDir, podSandboxDir, sb.netNs(), labels, annotations, nil, nil, id, false)
|
container, err := oci.NewContainer(containerID, containerName, podSandboxDir, podSandboxDir, sb.netNs(), labels, annotations, nil, nil, id, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
sb.infraContainer = container
|
sb.infraContainer = container
|
||||||
|
|
||||||
// setup the network
|
// setup the network
|
||||||
podNamespace := ""
|
podNamespace := ""
|
||||||
if err = s.netPlugin.SetUpPod(netNsPath, podNamespace, id, containerName); err != nil {
|
if err = m.netPlugin.SetUpPod(netNsPath, podNamespace, id, containerName); err != nil {
|
||||||
return nil, fmt.Errorf("failed to create network for container %s in sandbox %s: %v", containerName, id, err)
|
return "", fmt.Errorf("failed to create network for container %s in sandbox %s: %v", containerName, id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = s.runContainer(container); err != nil {
|
if err = m.runContainer(container); err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
resp = &pb.RunPodSandboxResponse{PodSandboxId: &id}
|
return id, nil
|
||||||
logrus.Debugf("RunPodSandboxResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mountLabel string, err error) {
|
func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mountLabel string, err error) {
|
||||||
|
|
|
@ -1,26 +1,23 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PodSandboxStatus returns the Status of the PodSandbox.
|
// PodSandboxStatus returns the Status of the PodSandbox.
|
||||||
func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) {
|
func (m *Manager) PodSandboxStatus(sbID string) (*pb.PodSandboxStatus, error) {
|
||||||
logrus.Debugf("PodSandboxStatusRequest %+v", req)
|
sb, err := m.getPodSandboxWithPartialID(sbID)
|
||||||
sb, err := s.getPodSandboxFromRequest(req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
podInfraContainer := sb.infraContainer
|
podInfraContainer := sb.infraContainer
|
||||||
if err = s.runtime.UpdateStatus(podInfraContainer); err != nil {
|
if err = m.runtime.UpdateStatus(podInfraContainer); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(podInfraContainer)
|
cState := m.runtime.ContainerStatus(podInfraContainer)
|
||||||
created := cState.Created.UnixNano()
|
created := cState.Created.UnixNano()
|
||||||
|
|
||||||
netNsPath, err := podInfraContainer.NetNsPath()
|
netNsPath, err := podInfraContainer.NetNsPath()
|
||||||
|
@ -28,7 +25,7 @@ func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusR
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
podNamespace := ""
|
podNamespace := ""
|
||||||
ip, err := s.netPlugin.GetContainerNetworkStatus(netNsPath, podNamespace, sb.id, podInfraContainer.Name())
|
ip, err := m.netPlugin.GetContainerNetworkStatus(netNsPath, podNamespace, sb.id, podInfraContainer.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// ignore the error on network status
|
// ignore the error on network status
|
||||||
ip = ""
|
ip = ""
|
||||||
|
@ -40,23 +37,20 @@ func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusR
|
||||||
}
|
}
|
||||||
|
|
||||||
sandboxID := sb.id
|
sandboxID := sb.id
|
||||||
resp := &pb.PodSandboxStatusResponse{
|
status := &pb.PodSandboxStatus{
|
||||||
Status: &pb.PodSandboxStatus{
|
Id: &sandboxID,
|
||||||
Id: &sandboxID,
|
CreatedAt: int64Ptr(created),
|
||||||
CreatedAt: int64Ptr(created),
|
Linux: &pb.LinuxPodSandboxStatus{
|
||||||
Linux: &pb.LinuxPodSandboxStatus{
|
Namespaces: &pb.Namespace{
|
||||||
Namespaces: &pb.Namespace{
|
Network: sPtr(netNsPath),
|
||||||
Network: sPtr(netNsPath),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
Network: &pb.PodSandboxNetworkStatus{Ip: &ip},
|
|
||||||
State: &rStatus,
|
|
||||||
Labels: sb.labels,
|
|
||||||
Annotations: sb.annotations,
|
|
||||||
Metadata: sb.metadata,
|
|
||||||
},
|
},
|
||||||
|
Network: &pb.PodSandboxNetworkStatus{Ip: &ip},
|
||||||
|
State: &rStatus,
|
||||||
|
Labels: sb.labels,
|
||||||
|
Annotations: sb.annotations,
|
||||||
|
Metadata: sb.metadata,
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Infof("PodSandboxStatusResponse: %+v", resp)
|
return status, nil
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,61 +1,55 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/kubernetes-incubator/cri-o/oci"
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// StopPodSandbox stops the sandbox. If there are any running containers in the
|
// StopPodSandbox stops the sandbox. If there are any running containers in the
|
||||||
// sandbox, they should be force terminated.
|
// sandbox, they should be force terminated.
|
||||||
func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) {
|
func (m *Manager) StopPodSandbox(sbID string) error {
|
||||||
logrus.Debugf("StopPodSandboxRequest %+v", req)
|
sb, err := m.getPodSandboxWithPartialID(sbID)
|
||||||
sb, err := s.getPodSandboxFromRequest(req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
podNamespace := ""
|
podNamespace := ""
|
||||||
podInfraContainer := sb.infraContainer
|
podInfraContainer := sb.infraContainer
|
||||||
netnsPath, err := podInfraContainer.NetNsPath()
|
netnsPath, err := podInfraContainer.NetNsPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := os.Stat(netnsPath); err == nil {
|
if _, err := os.Stat(netnsPath); err == nil {
|
||||||
if err2 := s.netPlugin.TearDownPod(netnsPath, podNamespace, sb.id, podInfraContainer.Name()); err2 != nil {
|
if err2 := m.netPlugin.TearDownPod(netnsPath, podNamespace, sb.id, podInfraContainer.Name()); err2 != nil {
|
||||||
return nil, fmt.Errorf("failed to destroy network for container %s in sandbox %s: %v",
|
return fmt.Errorf("failed to destroy network for container %s in sandbox %s: %v",
|
||||||
podInfraContainer.Name(), sb.id, err2)
|
podInfraContainer.Name(), sb.id, err2)
|
||||||
}
|
}
|
||||||
} else if !os.IsNotExist(err) { // it's ok for netnsPath to *not* exist
|
} else if !os.IsNotExist(err) { // it's ok for netnsPath to *not* exist
|
||||||
return nil, fmt.Errorf("failed to stat netns path for container %s in sandbox %s before tearing down the network: %v",
|
return fmt.Errorf("failed to stat netns path for container %s in sandbox %s before tearing down the network: %v",
|
||||||
podInfraContainer.Name(), sb.id, err)
|
podInfraContainer.Name(), sb.id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the sandbox networking namespace.
|
// Close the sandbox networking namespace.
|
||||||
if err := sb.netNsRemove(); err != nil {
|
if err := sb.netNsRemove(); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
containers := sb.containers.List()
|
containers := sb.containers.List()
|
||||||
containers = append(containers, podInfraContainer)
|
containers = append(containers, podInfraContainer)
|
||||||
|
|
||||||
for _, c := range containers {
|
for _, c := range containers {
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
if err := m.runtime.UpdateStatus(c); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
cStatus := s.runtime.ContainerStatus(c)
|
cStatus := m.runtime.ContainerStatus(c)
|
||||||
if cStatus.Status != oci.ContainerStateStopped {
|
if cStatus.Status != oci.ContainerStateStopped {
|
||||||
if err := s.runtime.StopContainer(c); err != nil {
|
if err := m.runtime.StopContainer(c); err != nil {
|
||||||
return nil, fmt.Errorf("failed to stop container %s in sandbox %s: %v", c.Name(), sb.id, err)
|
return fmt.Errorf("failed to stop container %s in sandbox %s: %v", c.Name(), sb.id, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := &pb.StopPodSandboxResponse{}
|
return nil
|
||||||
logrus.Debugf("StopPodSandboxResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
|
@ -1,29 +1,23 @@
|
||||||
package server
|
package manager
|
||||||
|
|
||||||
import (
|
// Version returns the runtime name and runtime version
|
||||||
"golang.org/x/net/context"
|
func (m *Manager) Version() (*VersionResponse, error) {
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Version returns the runtime name, runtime version and runtime API version
|
runtimeVersion, err := m.runtime.Version()
|
||||||
func (s *Server) Version(ctx context.Context, req *pb.VersionRequest) (*pb.VersionResponse, error) {
|
|
||||||
|
|
||||||
runtimeVersion, err := s.runtime.Version()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Track upstream code. For now it expects 0.1.0
|
runtimeName := m.runtime.Name()
|
||||||
version := "0.1.0"
|
|
||||||
|
|
||||||
// taking const address
|
return &VersionResponse{
|
||||||
rav := runtimeAPIVersion
|
RuntimeName: runtimeName,
|
||||||
runtimeName := s.runtime.Name()
|
RuntimeVersion: runtimeVersion,
|
||||||
|
|
||||||
return &pb.VersionResponse{
|
|
||||||
Version: &version,
|
|
||||||
RuntimeName: &runtimeName,
|
|
||||||
RuntimeVersion: &runtimeVersion,
|
|
||||||
RuntimeApiVersion: &rav,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VersionResponse is returned from Version.
|
||||||
|
type VersionResponse struct {
|
||||||
|
RuntimeVersion string
|
||||||
|
RuntimeName string
|
||||||
|
}
|
||||||
|
|
|
@ -1,89 +0,0 @@
|
||||||
// +build apparmor
|
|
||||||
|
|
||||||
package apparmor
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
binary = "apparmor_parser"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetVersion returns the major and minor version of apparmor_parser.
|
|
||||||
func GetVersion() (int, error) {
|
|
||||||
output, err := cmd("", "--version")
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return parseVersion(output)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadProfile runs `apparmor_parser -r` on a specified apparmor profile to
|
|
||||||
// replace the profile.
|
|
||||||
func LoadProfile(profilePath string) error {
|
|
||||||
_, err := cmd("", "-r", profilePath)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// cmd runs `apparmor_parser` with the passed arguments.
|
|
||||||
func cmd(dir string, arg ...string) (string, error) {
|
|
||||||
c := exec.Command(binary, arg...)
|
|
||||||
c.Dir = dir
|
|
||||||
|
|
||||||
output, err := c.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(output), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseVersion takes the output from `apparmor_parser --version` and returns
|
|
||||||
// a representation of the {major, minor, patch} version as a single number of
|
|
||||||
// the form MMmmPPP {major, minor, patch}.
|
|
||||||
func parseVersion(output string) (int, error) {
|
|
||||||
// output is in the form of the following:
|
|
||||||
// AppArmor parser version 2.9.1
|
|
||||||
// Copyright (C) 1999-2008 Novell Inc.
|
|
||||||
// Copyright 2009-2012 Canonical Ltd.
|
|
||||||
|
|
||||||
lines := strings.SplitN(output, "\n", 2)
|
|
||||||
words := strings.Split(lines[0], " ")
|
|
||||||
version := words[len(words)-1]
|
|
||||||
|
|
||||||
// split by major minor version
|
|
||||||
v := strings.Split(version, ".")
|
|
||||||
if len(v) == 0 || len(v) > 3 {
|
|
||||||
return -1, fmt.Errorf("parsing version failed for output: `%s`", output)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default the versions to 0.
|
|
||||||
var majorVersion, minorVersion, patchLevel int
|
|
||||||
|
|
||||||
majorVersion, err := strconv.Atoi(v[0])
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(v) > 1 {
|
|
||||||
minorVersion, err = strconv.Atoi(v[1])
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(v) > 2 {
|
|
||||||
patchLevel, err = strconv.Atoi(v[2])
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// major*10^5 + minor*10^3 + patch*10^0
|
|
||||||
numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel
|
|
||||||
return numericVersion, nil
|
|
||||||
}
|
|
|
@ -1,14 +0,0 @@
|
||||||
package apparmor
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultApparmorProfile is the name of default apparmor profile name.
|
|
||||||
DefaultApparmorProfile = "ocid-default"
|
|
||||||
|
|
||||||
// ContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container profile.
|
|
||||||
ContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/"
|
|
||||||
|
|
||||||
// ProfileRuntimeDefault is he profile specifying the runtime default.
|
|
||||||
ProfileRuntimeDefault = "runtime/default"
|
|
||||||
// ProfileNamePrefix is the prefix for specifying profiles loaded on the node.
|
|
||||||
ProfileNamePrefix = "localhost/"
|
|
||||||
)
|
|
|
@ -1,145 +0,0 @@
|
||||||
// +build apparmor
|
|
||||||
|
|
||||||
package apparmor
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/utils/templates"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// profileDirectory is the file store for apparmor profiles and macros.
|
|
||||||
profileDirectory = "/etc/apparmor.d"
|
|
||||||
)
|
|
||||||
|
|
||||||
// profileData holds information about the given profile for generation.
|
|
||||||
type profileData struct {
|
|
||||||
// Name is profile name.
|
|
||||||
Name string
|
|
||||||
// Imports defines the apparmor functions to import, before defining the profile.
|
|
||||||
Imports []string
|
|
||||||
// InnerImports defines the apparmor functions to import in the profile.
|
|
||||||
InnerImports []string
|
|
||||||
// Version is the {major, minor, patch} version of apparmor_parser as a single number.
|
|
||||||
Version int
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureDefaultApparmorProfile loads default apparmor profile, if it is not loaded.
|
|
||||||
func EnsureDefaultApparmorProfile() error {
|
|
||||||
if apparmor.IsEnabled() {
|
|
||||||
loaded, err := IsLoaded(DefaultApparmorProfile)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Could not check if %s AppArmor profile was loaded: %s", DefaultApparmorProfile, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Nothing to do.
|
|
||||||
if loaded {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load the profile.
|
|
||||||
if err := InstallDefault(DefaultApparmorProfile); err != nil {
|
|
||||||
return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", DefaultApparmorProfile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsEnabled returns true if apparmor is enabled for the host.
|
|
||||||
func IsEnabled() bool {
|
|
||||||
return apparmor.IsEnabled()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetProfileNameFromPodAnnotations gets the name of the profile to use with container from
|
|
||||||
// pod annotations
|
|
||||||
func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string {
|
|
||||||
return annotations[ContainerAnnotationKeyPrefix+containerName]
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstallDefault generates a default profile in a temp directory determined by
|
|
||||||
// os.TempDir(), then loads the profile into the kernel using 'apparmor_parser'.
|
|
||||||
func InstallDefault(name string) error {
|
|
||||||
p := profileData{
|
|
||||||
Name: name,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Install to a temporary directory.
|
|
||||||
f, err := ioutil.TempFile("", name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
if err := p.generateDefault(f); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return LoadProfile(f.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsLoaded checks if a profile with the given name has been loaded into the
|
|
||||||
// kernel.
|
|
||||||
func IsLoaded(name string) (bool, error) {
|
|
||||||
file, err := os.Open("/sys/kernel/security/apparmor/profiles")
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
r := bufio.NewReader(file)
|
|
||||||
for {
|
|
||||||
p, err := r.ReadString('\n')
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(p, name+" ") {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateDefault creates an apparmor profile from ProfileData.
|
|
||||||
func (p *profileData) generateDefault(out io.Writer) error {
|
|
||||||
compiled, err := templates.NewParse("apparmor_profile", baseTemplate)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if macroExists("tunables/global") {
|
|
||||||
p.Imports = append(p.Imports, "#include <tunables/global>")
|
|
||||||
} else {
|
|
||||||
p.Imports = append(p.Imports, "@{PROC}=/proc/")
|
|
||||||
}
|
|
||||||
|
|
||||||
if macroExists("abstractions/base") {
|
|
||||||
p.InnerImports = append(p.InnerImports, "#include <abstractions/base>")
|
|
||||||
}
|
|
||||||
|
|
||||||
ver, err := GetVersion()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
p.Version = ver
|
|
||||||
|
|
||||||
return compiled.Execute(out, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// macrosExists checks if the passed macro exists.
|
|
||||||
func macroExists(m string) bool {
|
|
||||||
_, err := os.Stat(path.Join(profileDirectory, m))
|
|
||||||
return err == nil
|
|
||||||
}
|
|
|
@ -1,18 +0,0 @@
|
||||||
// +build !apparmor
|
|
||||||
|
|
||||||
package apparmor
|
|
||||||
|
|
||||||
// IsEnabled returns false, when build without apparmor build tag.
|
|
||||||
func IsEnabled() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureDefaultApparmorProfile dose nothing, when build without apparmor build tag.
|
|
||||||
func EnsureDefaultApparmorProfile() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetProfileNameFromPodAnnotations dose nothing, when build without apparmor build tag.
|
|
||||||
func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
|
@ -1,45 +0,0 @@
|
||||||
// +build apparmor
|
|
||||||
|
|
||||||
package apparmor
|
|
||||||
|
|
||||||
// baseTemplate defines the default apparmor profile for containers.
|
|
||||||
const baseTemplate = `
|
|
||||||
{{range $value := .Imports}}
|
|
||||||
{{$value}}
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
|
|
||||||
{{range $value := .InnerImports}}
|
|
||||||
{{$value}}
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
network,
|
|
||||||
capability,
|
|
||||||
file,
|
|
||||||
umount,
|
|
||||||
|
|
||||||
deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir)
|
|
||||||
# deny write to files not in /proc/<number>/** or /proc/sys/**
|
|
||||||
deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w,
|
|
||||||
deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel)
|
|
||||||
deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/
|
|
||||||
deny @{PROC}/sysrq-trigger rwklx,
|
|
||||||
deny @{PROC}/mem rwklx,
|
|
||||||
deny @{PROC}/kmem rwklx,
|
|
||||||
deny @{PROC}/kcore rwklx,
|
|
||||||
|
|
||||||
deny mount,
|
|
||||||
|
|
||||||
deny /sys/[^f]*/** wklx,
|
|
||||||
deny /sys/f[^s]*/** wklx,
|
|
||||||
deny /sys/fs/[^c]*/** wklx,
|
|
||||||
deny /sys/fs/c[^g]*/** wklx,
|
|
||||||
deny /sys/fs/cg[^r]*/** wklx,
|
|
||||||
deny /sys/firmware/** rwklx,
|
|
||||||
deny /sys/kernel/security/** rwklx,
|
|
||||||
|
|
||||||
{{if ge .Version 208095}}
|
|
||||||
ptrace (trace,read) peer={{.Name}},
|
|
||||||
{{end}}
|
|
||||||
}
|
|
||||||
`
|
|
154
server/config.go
154
server/config.go
|
@ -1,154 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io/ioutil"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config represents the entire set of configuration values that can be set for
|
|
||||||
// the server. This is intended to be loaded from a toml-encoded config file.
|
|
||||||
type Config struct {
|
|
||||||
RootConfig
|
|
||||||
APIConfig
|
|
||||||
RuntimeConfig
|
|
||||||
ImageConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// This structure is necessary to fake the TOML tables when parsing,
|
|
||||||
// while also not requiring a bunch of layered structs for no good
|
|
||||||
// reason.
|
|
||||||
|
|
||||||
// RootConfig represents the root of the "ocid" TOML config table.
|
|
||||||
type RootConfig struct {
|
|
||||||
// Root is a path to the "root directory" where all information not
|
|
||||||
// explicitly handled by other options will be stored.
|
|
||||||
Root string `toml:"root"`
|
|
||||||
|
|
||||||
// SandboxDir is the directory where ocid will store all of its sandbox
|
|
||||||
// state and other information.
|
|
||||||
SandboxDir string `toml:"sandbox_dir"`
|
|
||||||
|
|
||||||
// ContainerDir is the directory where ocid will store all of its container
|
|
||||||
// state and other information.
|
|
||||||
ContainerDir string `toml:"container_dir"`
|
|
||||||
|
|
||||||
// LogDir is the default log directory were all logs will go unless kubelet
|
|
||||||
// tells us to put them somewhere else.
|
|
||||||
//
|
|
||||||
// TODO: This is currently unused until the conmon logging rewrite is done.
|
|
||||||
LogDir string `toml:"log_dir"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// APIConfig represents the "ocid.api" TOML config table.
|
|
||||||
type APIConfig struct {
|
|
||||||
// Listen is the path to the AF_LOCAL socket on which cri-o will listen.
|
|
||||||
// This may support proto://addr formats later, but currently this is just
|
|
||||||
// a path.
|
|
||||||
Listen string `toml:"listen"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// RuntimeConfig represents the "ocid.runtime" TOML config table.
|
|
||||||
type RuntimeConfig struct {
|
|
||||||
// Runtime is a path to the OCI runtime which ocid will be using. Currently
|
|
||||||
// the only known working choice is runC, simply because the OCI has not
|
|
||||||
// yet merged a CLI API (so we assume runC's API here).
|
|
||||||
Runtime string `toml:"runtime"`
|
|
||||||
|
|
||||||
// Conmon is the path to conmon binary, used for managing the runtime.
|
|
||||||
Conmon string `toml:"conmon"`
|
|
||||||
|
|
||||||
// ConmonEnv is the environment variable list for conmon process.
|
|
||||||
ConmonEnv []string `toml:"conmon_env"`
|
|
||||||
|
|
||||||
// SELinux determines whether or not SELinux is used for pod separation.
|
|
||||||
SELinux bool `toml:"selinux"`
|
|
||||||
|
|
||||||
// SeccompProfile is the seccomp json profile path which is used as the
|
|
||||||
// default for the runtime.
|
|
||||||
SeccompProfile string `toml:"seccomp_profile"`
|
|
||||||
|
|
||||||
// ApparmorProfile is the apparmor profile name which is used as the
|
|
||||||
// default for the runtime.
|
|
||||||
ApparmorProfile string `toml:"apparmor_profile"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageConfig represents the "ocid.image" TOML config table.
|
|
||||||
type ImageConfig struct {
|
|
||||||
// Pause is the path to the statically linked pause container binary, used
|
|
||||||
// as the entrypoint for infra containers.
|
|
||||||
//
|
|
||||||
// TODO(cyphar): This should be replaced with a path to an OCI image
|
|
||||||
// bundle, once the OCI image/storage code has been implemented.
|
|
||||||
Pause string `toml:"pause"`
|
|
||||||
|
|
||||||
// ImageStore is the directory where the ocid image store will be stored.
|
|
||||||
// TODO: This is currently not really used because we don't have
|
|
||||||
// containers/storage integrated.
|
|
||||||
ImageDir string `toml:"image_dir"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// tomlConfig is another way of looking at a Config, which is
|
|
||||||
// TOML-friendly (it has all of the explicit tables). It's just used for
|
|
||||||
// conversions.
|
|
||||||
type tomlConfig struct {
|
|
||||||
Ocid struct {
|
|
||||||
RootConfig
|
|
||||||
API struct{ APIConfig } `toml:"api"`
|
|
||||||
Runtime struct{ RuntimeConfig } `toml:"runtime"`
|
|
||||||
Image struct{ ImageConfig } `toml:"image"`
|
|
||||||
} `toml:"ocid"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tomlConfig) toConfig(c *Config) {
|
|
||||||
c.RootConfig = t.Ocid.RootConfig
|
|
||||||
c.APIConfig = t.Ocid.API.APIConfig
|
|
||||||
c.RuntimeConfig = t.Ocid.Runtime.RuntimeConfig
|
|
||||||
c.ImageConfig = t.Ocid.Image.ImageConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tomlConfig) fromConfig(c *Config) {
|
|
||||||
t.Ocid.RootConfig = c.RootConfig
|
|
||||||
t.Ocid.API.APIConfig = c.APIConfig
|
|
||||||
t.Ocid.Runtime.RuntimeConfig = c.RuntimeConfig
|
|
||||||
t.Ocid.Image.ImageConfig = c.ImageConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromFile populates the Config from the TOML-encoded file at the given path.
|
|
||||||
// Returns errors encountered when reading or parsing the files, or nil
|
|
||||||
// otherwise.
|
|
||||||
func (c *Config) FromFile(path string) error {
|
|
||||||
data, err := ioutil.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
t := new(tomlConfig)
|
|
||||||
t.fromConfig(c)
|
|
||||||
|
|
||||||
_, err = toml.Decode(string(data), t)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
t.toConfig(c)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToFile outputs the given Config as a TOML-encoded file at the given path.
|
|
||||||
// Returns errors encountered when generating or writing the file, or nil
|
|
||||||
// otherwise.
|
|
||||||
func (c *Config) ToFile(path string) error {
|
|
||||||
var w bytes.Buffer
|
|
||||||
e := toml.NewEncoder(&w)
|
|
||||||
|
|
||||||
t := new(tomlConfig)
|
|
||||||
t.fromConfig(c)
|
|
||||||
|
|
||||||
if err := e.Encode(*t); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ioutil.WriteFile(path, w.Bytes(), 0644)
|
|
||||||
}
|
|
|
@ -1,36 +1,106 @@
|
||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
"github.com/Sirupsen/logrus"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
// CreateContainer creates a new container in specified PodSandbox
|
||||||
// containerTypeSandbox represents a pod sandbox container
|
func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) {
|
||||||
containerTypeSandbox = "sandbox"
|
logrus.Debugf("CreateContainerRequest %+v", req)
|
||||||
// containerTypeContainer represents a container running within a pod
|
|
||||||
containerTypeContainer = "container"
|
|
||||||
)
|
|
||||||
|
|
||||||
type containerRequest interface {
|
containerID, err := s.manager.CreateContainer(req.GetPodSandboxId(), req.GetConfig(), req.GetSandboxConfig())
|
||||||
GetContainerId() string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) getContainerFromRequest(req containerRequest) (*oci.Container, error) {
|
|
||||||
ctrID := req.GetContainerId()
|
|
||||||
if ctrID == "" {
|
|
||||||
return nil, fmt.Errorf("container ID should not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
containerID, err := s.ctrIDIndex.Get(ctrID)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("container with ID starting with %s not found: %v", ctrID, err)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c := s.state.containers.Get(containerID)
|
resp := &pb.CreateContainerResponse{
|
||||||
if c == nil {
|
ContainerId: &containerID,
|
||||||
return nil, fmt.Errorf("specified container not found: %s", containerID)
|
|
||||||
}
|
}
|
||||||
return c, nil
|
logrus.Debugf("CreateContainerResponse: %+v", resp)
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListContainers lists all containers by filters.
|
||||||
|
func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) {
|
||||||
|
logrus.Debugf("ListContainersRequest %+v", req)
|
||||||
|
|
||||||
|
ctrs, err := s.manager.ListContainers(req.GetFilter())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.ListContainersResponse{
|
||||||
|
Containers: ctrs,
|
||||||
|
}
|
||||||
|
logrus.Debugf("ListContainersResponse: %+v", resp)
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveContainer removes the container. If the container is running, the container
|
||||||
|
// should be force removed.
|
||||||
|
func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) {
|
||||||
|
logrus.Debugf("RemoveContainerRequest %+v", req)
|
||||||
|
|
||||||
|
if err := s.manager.RemoveContainer(req.GetContainerId()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.RemoveContainerResponse{}
|
||||||
|
logrus.Debugf("RemoveContainerResponse: %+v", resp)
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartContainer starts the container.
|
||||||
|
func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) {
|
||||||
|
logrus.Debugf("StartContainerRequest %+v", req)
|
||||||
|
|
||||||
|
if err := s.manager.StartContainer(req.GetContainerId()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.StartContainerResponse{}
|
||||||
|
logrus.Debugf("StartContainerResponse %+v", resp)
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerStatus returns status of the container.
|
||||||
|
func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) {
|
||||||
|
logrus.Debugf("ContainerStatusRequest %+v", req)
|
||||||
|
|
||||||
|
status, err := s.manager.ContainerStatus(req.GetContainerId())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.ContainerStatusResponse{
|
||||||
|
Status: status,
|
||||||
|
}
|
||||||
|
logrus.Debugf("ContainerStatusResponse: %+v", resp)
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StopContainer stops a running container with a grace period (i.e., timeout).
|
||||||
|
func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) {
|
||||||
|
logrus.Debugf("StopContainerRequest %+v", req)
|
||||||
|
|
||||||
|
if err := s.manager.StopContainer(req.GetContainerId(), req.GetTimeout()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.StopContainerResponse{}
|
||||||
|
logrus.Debugf("StopContainerResponse: %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRuntimeConfig updates the configuration of a running container.
|
||||||
|
func (s *Server) UpdateRuntimeConfig(ctx context.Context, req *pb.UpdateRuntimeConfigRequest) (*pb.UpdateRuntimeConfigResponse, error) {
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Attach prepares a streaming endpoint to attach to a running container.
|
|
||||||
func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (*pb.AttachResponse, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
|
@ -1,434 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/docker/pkg/stringid"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/server/apparmor"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/server/seccomp"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/utils"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/label"
|
|
||||||
"github.com/opencontainers/runtime-tools/generate"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
seccompUnconfined = "unconfined"
|
|
||||||
seccompRuntimeDefault = "runtime/default"
|
|
||||||
seccompLocalhostPrefix = "localhost/"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CreateContainer creates a new container in specified PodSandbox
|
|
||||||
func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) {
|
|
||||||
logrus.Debugf("CreateContainerRequest %+v", req)
|
|
||||||
sbID := req.GetPodSandboxId()
|
|
||||||
if sbID == "" {
|
|
||||||
return nil, fmt.Errorf("PodSandboxId should not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
sandboxID, err := s.podIDIndex.Get(sbID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("PodSandbox with ID starting with %s not found: %v", sbID, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sb := s.getSandbox(sandboxID)
|
|
||||||
if sb == nil {
|
|
||||||
return nil, fmt.Errorf("specified sandbox not found: %s", sandboxID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The config of the container
|
|
||||||
containerConfig := req.GetConfig()
|
|
||||||
if containerConfig == nil {
|
|
||||||
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
name := containerConfig.GetMetadata().GetName()
|
|
||||||
if name == "" {
|
|
||||||
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Name is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
attempt := containerConfig.GetMetadata().GetAttempt()
|
|
||||||
containerID, containerName, err := s.generateContainerIDandName(sb.name, name, attempt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// containerDir is the dir for the container bundle.
|
|
||||||
containerDir := filepath.Join(s.runtime.ContainerDir(), containerID)
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
s.releaseContainerName(containerName)
|
|
||||||
err1 := os.RemoveAll(containerDir)
|
|
||||||
if err1 != nil {
|
|
||||||
logrus.Warnf("Failed to cleanup container directory: %v", err1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if _, err = os.Stat(containerDir); err == nil {
|
|
||||||
return nil, fmt.Errorf("container (%s) already exists", containerDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = os.MkdirAll(containerDir, 0755); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
container, err := s.createSandboxContainer(containerID, containerName, sb, containerDir, containerConfig)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = s.runtime.CreateContainer(container); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = s.runtime.UpdateStatus(container); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.addContainer(container)
|
|
||||||
|
|
||||||
if err = s.ctrIDIndex.Add(containerID); err != nil {
|
|
||||||
s.removeContainer(container)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.CreateContainerResponse{
|
|
||||||
ContainerId: &containerID,
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Debugf("CreateContainerResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) createSandboxContainer(containerID string, containerName string, sb *sandbox, containerDir string, containerConfig *pb.ContainerConfig) (*oci.Container, error) {
|
|
||||||
if sb == nil {
|
|
||||||
return nil, errors.New("createSandboxContainer needs a sandbox")
|
|
||||||
}
|
|
||||||
// creates a spec Generator with the default spec.
|
|
||||||
specgen := generate.New()
|
|
||||||
|
|
||||||
// by default, the root path is an empty string.
|
|
||||||
// here set it to be "rootfs".
|
|
||||||
specgen.SetRootPath("rootfs")
|
|
||||||
|
|
||||||
processArgs := []string{}
|
|
||||||
commands := containerConfig.GetCommand()
|
|
||||||
args := containerConfig.GetArgs()
|
|
||||||
if commands == nil && args == nil {
|
|
||||||
// TODO: override with image's config in #189
|
|
||||||
processArgs = []string{"/bin/sh"}
|
|
||||||
}
|
|
||||||
if commands != nil {
|
|
||||||
processArgs = append(processArgs, commands...)
|
|
||||||
}
|
|
||||||
if args != nil {
|
|
||||||
processArgs = append(processArgs, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
specgen.SetProcessArgs(processArgs)
|
|
||||||
|
|
||||||
cwd := containerConfig.GetWorkingDir()
|
|
||||||
if cwd == "" {
|
|
||||||
cwd = "/"
|
|
||||||
}
|
|
||||||
specgen.SetProcessCwd(cwd)
|
|
||||||
|
|
||||||
envs := containerConfig.GetEnvs()
|
|
||||||
if envs != nil {
|
|
||||||
for _, item := range envs {
|
|
||||||
key := item.GetKey()
|
|
||||||
value := item.GetValue()
|
|
||||||
if key == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
env := fmt.Sprintf("%s=%s", key, value)
|
|
||||||
specgen.AddProcessEnv(env)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mounts := containerConfig.GetMounts()
|
|
||||||
for _, mount := range mounts {
|
|
||||||
dest := mount.GetContainerPath()
|
|
||||||
if dest == "" {
|
|
||||||
return nil, fmt.Errorf("Mount.ContainerPath is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
src := mount.GetHostPath()
|
|
||||||
if src == "" {
|
|
||||||
return nil, fmt.Errorf("Mount.HostPath is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
options := []string{"rw"}
|
|
||||||
if mount.GetReadonly() {
|
|
||||||
options = []string{"ro"}
|
|
||||||
}
|
|
||||||
|
|
||||||
if mount.GetSelinuxRelabel() {
|
|
||||||
// Need a way in kubernetes to determine if the volume is shared or private
|
|
||||||
if err := label.Relabel(src, sb.mountLabel, true); err != nil && err != syscall.ENOTSUP {
|
|
||||||
return nil, fmt.Errorf("relabel failed %s: %v", src, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
specgen.AddBindMount(src, dest, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
labels := containerConfig.GetLabels()
|
|
||||||
|
|
||||||
metadata := containerConfig.GetMetadata()
|
|
||||||
|
|
||||||
annotations := containerConfig.GetAnnotations()
|
|
||||||
if annotations != nil {
|
|
||||||
for k, v := range annotations {
|
|
||||||
specgen.AddAnnotation(k, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// set this container's apparmor profile if it is set by sandbox
|
|
||||||
if s.appArmorEnabled {
|
|
||||||
appArmorProfileName := s.getAppArmorProfileName(sb.annotations, metadata.GetName())
|
|
||||||
if appArmorProfileName != "" {
|
|
||||||
// reload default apparmor profile if it is unloaded.
|
|
||||||
if s.appArmorProfile == apparmor.DefaultApparmorProfile {
|
|
||||||
if err := apparmor.EnsureDefaultApparmorProfile(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
specgen.SetProcessApparmorProfile(appArmorProfileName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if containerConfig.GetLinux().GetSecurityContext().GetPrivileged() {
|
|
||||||
specgen.SetupPrivileged(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
if containerConfig.GetLinux().GetSecurityContext().GetReadonlyRootfs() {
|
|
||||||
specgen.SetRootReadonly(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
logPath := containerConfig.GetLogPath()
|
|
||||||
|
|
||||||
if containerConfig.GetTty() {
|
|
||||||
specgen.SetProcessTerminal(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
linux := containerConfig.GetLinux()
|
|
||||||
if linux != nil {
|
|
||||||
resources := linux.GetResources()
|
|
||||||
if resources != nil {
|
|
||||||
cpuPeriod := resources.GetCpuPeriod()
|
|
||||||
if cpuPeriod != 0 {
|
|
||||||
specgen.SetLinuxResourcesCPUPeriod(uint64(cpuPeriod))
|
|
||||||
}
|
|
||||||
|
|
||||||
cpuQuota := resources.GetCpuQuota()
|
|
||||||
if cpuQuota != 0 {
|
|
||||||
specgen.SetLinuxResourcesCPUQuota(uint64(cpuQuota))
|
|
||||||
}
|
|
||||||
|
|
||||||
cpuShares := resources.GetCpuShares()
|
|
||||||
if cpuShares != 0 {
|
|
||||||
specgen.SetLinuxResourcesCPUShares(uint64(cpuShares))
|
|
||||||
}
|
|
||||||
|
|
||||||
memoryLimit := resources.GetMemoryLimitInBytes()
|
|
||||||
if memoryLimit != 0 {
|
|
||||||
specgen.SetLinuxResourcesMemoryLimit(uint64(memoryLimit))
|
|
||||||
}
|
|
||||||
|
|
||||||
oomScoreAdj := resources.GetOomScoreAdj()
|
|
||||||
specgen.SetLinuxResourcesOOMScoreAdj(int(oomScoreAdj))
|
|
||||||
}
|
|
||||||
|
|
||||||
capabilities := linux.GetSecurityContext().GetCapabilities()
|
|
||||||
if capabilities != nil {
|
|
||||||
addCaps := capabilities.GetAddCapabilities()
|
|
||||||
if addCaps != nil {
|
|
||||||
for _, cap := range addCaps {
|
|
||||||
if err := specgen.AddProcessCapability(cap); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dropCaps := capabilities.GetDropCapabilities()
|
|
||||||
if dropCaps != nil {
|
|
||||||
for _, cap := range dropCaps {
|
|
||||||
if err := specgen.DropProcessCapability(cap); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
specgen.SetProcessSelinuxLabel(sb.processLabel)
|
|
||||||
specgen.SetLinuxMountLabel(sb.mountLabel)
|
|
||||||
|
|
||||||
user := linux.GetSecurityContext().GetRunAsUser()
|
|
||||||
specgen.SetProcessUID(uint32(user))
|
|
||||||
|
|
||||||
specgen.SetProcessGID(uint32(user))
|
|
||||||
|
|
||||||
groups := linux.GetSecurityContext().GetSupplementalGroups()
|
|
||||||
for _, group := range groups {
|
|
||||||
specgen.AddProcessAdditionalGid(uint32(group))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Join the namespace paths for the pod sandbox container.
|
|
||||||
podInfraState := s.runtime.ContainerStatus(sb.infraContainer)
|
|
||||||
|
|
||||||
logrus.Debugf("pod container state %+v", podInfraState)
|
|
||||||
|
|
||||||
ipcNsPath := fmt.Sprintf("/proc/%d/ns/ipc", podInfraState.Pid)
|
|
||||||
if err := specgen.AddOrReplaceLinuxNamespace("ipc", ipcNsPath); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
netNsPath := sb.netNsPath()
|
|
||||||
if netNsPath == "" {
|
|
||||||
// The sandbox does not have a permanent namespace,
|
|
||||||
// it's on the host one.
|
|
||||||
netNsPath = fmt.Sprintf("/proc/%d/ns/net", podInfraState.Pid)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := specgen.AddOrReplaceLinuxNamespace("network", netNsPath); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
imageSpec := containerConfig.GetImage()
|
|
||||||
if imageSpec == nil {
|
|
||||||
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
image := imageSpec.GetImage()
|
|
||||||
if image == "" {
|
|
||||||
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image.Image is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
// bind mount the pod shm
|
|
||||||
specgen.AddBindMount(sb.shmPath, "/dev/shm", []string{"rw"})
|
|
||||||
|
|
||||||
specgen.AddAnnotation("ocid/name", containerName)
|
|
||||||
specgen.AddAnnotation("ocid/sandbox_id", sb.id)
|
|
||||||
specgen.AddAnnotation("ocid/sandbox_name", sb.infraContainer.Name())
|
|
||||||
specgen.AddAnnotation("ocid/container_type", containerTypeContainer)
|
|
||||||
specgen.AddAnnotation("ocid/log_path", logPath)
|
|
||||||
specgen.AddAnnotation("ocid/tty", fmt.Sprintf("%v", containerConfig.GetTty()))
|
|
||||||
specgen.AddAnnotation("ocid/image", image)
|
|
||||||
|
|
||||||
metadataJSON, err := json.Marshal(metadata)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
specgen.AddAnnotation("ocid/metadata", string(metadataJSON))
|
|
||||||
|
|
||||||
labelsJSON, err := json.Marshal(labels)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
specgen.AddAnnotation("ocid/labels", string(labelsJSON))
|
|
||||||
|
|
||||||
annotationsJSON, err := json.Marshal(annotations)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
specgen.AddAnnotation("ocid/annotations", string(annotationsJSON))
|
|
||||||
|
|
||||||
if err = s.setupSeccomp(&specgen, containerName, sb.annotations); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = specgen.SaveToFile(filepath.Join(containerDir, "config.json"), generate.ExportOptions{}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: copy the rootfs into the bundle.
|
|
||||||
// Currently, utils.CreateFakeRootfs is used to populate the rootfs.
|
|
||||||
if err = utils.CreateFakeRootfs(containerDir, image); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
container, err := oci.NewContainer(containerID, containerName, containerDir, logPath, sb.netNs(), labels, annotations, imageSpec, metadata, sb.id, containerConfig.GetTty())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return container, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) setupSeccomp(specgen *generate.Generator, cname string, sbAnnotations map[string]string) error {
|
|
||||||
profile, ok := sbAnnotations["security.alpha.kubernetes.io/seccomp/container/"+cname]
|
|
||||||
if !ok {
|
|
||||||
profile, ok = sbAnnotations["security.alpha.kubernetes.io/seccomp/pod"]
|
|
||||||
if !ok {
|
|
||||||
// running w/o seccomp, aka unconfined
|
|
||||||
profile = seccompUnconfined
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !s.seccompEnabled {
|
|
||||||
if profile != seccompUnconfined {
|
|
||||||
return fmt.Errorf("seccomp is not enabled in your kernel, cannot run with a profile")
|
|
||||||
}
|
|
||||||
logrus.Warn("seccomp is not enabled in your kernel, running container without profile")
|
|
||||||
}
|
|
||||||
if profile == seccompUnconfined {
|
|
||||||
// running w/o seccomp, aka unconfined
|
|
||||||
specgen.Spec().Linux.Seccomp = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if profile == seccompRuntimeDefault {
|
|
||||||
return seccomp.LoadProfileFromStruct(s.seccompProfile, specgen)
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(profile, seccompLocalhostPrefix) {
|
|
||||||
return fmt.Errorf("unknown seccomp profile option: %q", profile)
|
|
||||||
}
|
|
||||||
//file, err := ioutil.ReadFile(filepath.Join(s.seccompProfileRoot, strings.TrimPrefix(profile, seccompLocalhostPrefix)))
|
|
||||||
//if err != nil {
|
|
||||||
//return err
|
|
||||||
//}
|
|
||||||
// TODO(runcom): setup from provided node's seccomp profile
|
|
||||||
// can't do this yet, see https://issues.k8s.io/36997
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) generateContainerIDandName(podName string, name string, attempt uint32) (string, string, error) {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
id = stringid.GenerateNonCryptoID()
|
|
||||||
)
|
|
||||||
nameStr := fmt.Sprintf("%s-%s-%v", podName, name, attempt)
|
|
||||||
if name == "infra" {
|
|
||||||
nameStr = fmt.Sprintf("%s-%s", podName, name)
|
|
||||||
}
|
|
||||||
if name, err = s.reserveContainerName(id, nameStr); err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
return id, name, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// getAppArmorProfileName gets the profile name for the given container.
|
|
||||||
func (s *Server) getAppArmorProfileName(annotations map[string]string, ctrName string) string {
|
|
||||||
profile := apparmor.GetProfileNameFromPodAnnotations(annotations, ctrName)
|
|
||||||
|
|
||||||
if profile == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
if profile == apparmor.ProfileRuntimeDefault {
|
|
||||||
// If the value is runtime/default, then return default profile.
|
|
||||||
return s.appArmorProfile
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.TrimPrefix(profile, apparmor.ProfileNamePrefix)
|
|
||||||
}
|
|
|
@ -1,11 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Exec prepares a streaming endpoint to execute a command in the container.
|
|
||||||
func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecResponse, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
|
@ -1,46 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ExecSync runs a command in a container synchronously.
|
|
||||||
func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (*pb.ExecSyncResponse, error) {
|
|
||||||
logrus.Debugf("ExecSyncRequest %+v", req)
|
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = s.runtime.UpdateStatus(c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(c)
|
|
||||||
if !(cState.Status == oci.ContainerStateRunning || cState.Status == oci.ContainerStateCreated) {
|
|
||||||
return nil, fmt.Errorf("container is not created or running")
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := req.GetCmd()
|
|
||||||
if cmd == nil {
|
|
||||||
return nil, fmt.Errorf("exec command cannot be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
execResp, err := s.runtime.ExecSync(c, cmd, req.GetTimeout())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
resp := &pb.ExecSyncResponse{
|
|
||||||
Stdout: execResp.Stdout,
|
|
||||||
Stderr: execResp.Stderr,
|
|
||||||
ExitCode: &execResp.ExitCode,
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Debugf("ExecSyncResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
|
@ -1,110 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// filterContainer returns whether passed container matches filtering criteria
|
|
||||||
func filterContainer(c *pb.Container, filter *pb.ContainerFilter) bool {
|
|
||||||
if filter != nil {
|
|
||||||
if filter.State != nil {
|
|
||||||
if *c.State != *filter.State {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if filter.LabelSelector != nil {
|
|
||||||
sel := fields.SelectorFromSet(filter.LabelSelector)
|
|
||||||
if !sel.Matches(fields.Set(c.Labels)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListContainers lists all containers by filters.
|
|
||||||
func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) {
|
|
||||||
logrus.Debugf("ListContainersRequest %+v", req)
|
|
||||||
var ctrs []*pb.Container
|
|
||||||
filter := req.Filter
|
|
||||||
ctrList := s.state.containers.List()
|
|
||||||
|
|
||||||
// Filter using container id and pod id first.
|
|
||||||
if filter != nil {
|
|
||||||
if filter.Id != nil {
|
|
||||||
id, err := s.ctrIDIndex.Get(*filter.Id)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c := s.state.containers.Get(id)
|
|
||||||
if c != nil {
|
|
||||||
if filter.PodSandboxId != nil {
|
|
||||||
if c.Sandbox() == *filter.PodSandboxId {
|
|
||||||
ctrList = []*oci.Container{c}
|
|
||||||
} else {
|
|
||||||
ctrList = []*oci.Container{}
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
ctrList = []*oci.Container{c}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if filter.PodSandboxId != nil {
|
|
||||||
pod := s.state.sandboxes[*filter.PodSandboxId]
|
|
||||||
if pod == nil {
|
|
||||||
ctrList = []*oci.Container{}
|
|
||||||
} else {
|
|
||||||
ctrList = pod.containers.List()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ctr := range ctrList {
|
|
||||||
if err := s.runtime.UpdateStatus(ctr); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
podSandboxID := ctr.Sandbox()
|
|
||||||
cState := s.runtime.ContainerStatus(ctr)
|
|
||||||
created := cState.Created.UnixNano()
|
|
||||||
rState := pb.ContainerState_CONTAINER_UNKNOWN
|
|
||||||
cID := ctr.ID()
|
|
||||||
|
|
||||||
c := &pb.Container{
|
|
||||||
Id: &cID,
|
|
||||||
PodSandboxId: &podSandboxID,
|
|
||||||
CreatedAt: int64Ptr(created),
|
|
||||||
Labels: ctr.Labels(),
|
|
||||||
Metadata: ctr.Metadata(),
|
|
||||||
Annotations: ctr.Annotations(),
|
|
||||||
Image: ctr.Image(),
|
|
||||||
}
|
|
||||||
|
|
||||||
switch cState.Status {
|
|
||||||
case oci.ContainerStateCreated:
|
|
||||||
rState = pb.ContainerState_CONTAINER_CREATED
|
|
||||||
case oci.ContainerStateRunning:
|
|
||||||
rState = pb.ContainerState_CONTAINER_RUNNING
|
|
||||||
case oci.ContainerStateStopped:
|
|
||||||
rState = pb.ContainerState_CONTAINER_EXITED
|
|
||||||
}
|
|
||||||
c.State = &rState
|
|
||||||
|
|
||||||
// Filter by other criteria such as state and labels.
|
|
||||||
if filterContainer(c, req.Filter) {
|
|
||||||
ctrs = append(ctrs, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.ListContainersResponse{
|
|
||||||
Containers: ctrs,
|
|
||||||
}
|
|
||||||
logrus.Debugf("ListContainersResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
|
@ -1,11 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox.
|
|
||||||
func (s *Server) PortForward(ctx context.Context, req *pb.PortForwardRequest) (*pb.PortForwardResponse, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
|
@ -1,53 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RemoveContainer removes the container. If the container is running, the container
|
|
||||||
// should be force removed.
|
|
||||||
func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) {
|
|
||||||
logrus.Debugf("RemoveContainerRequest %+v", req)
|
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to update container state: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(c)
|
|
||||||
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
|
|
||||||
if err := s.runtime.StopContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to stop container %s: %v", c.ID(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.DeleteContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to delete container %s: %v", c.ID(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID())
|
|
||||||
if err := os.RemoveAll(containerDir); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.ID(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.releaseContainerName(c.Name())
|
|
||||||
s.removeContainer(c)
|
|
||||||
|
|
||||||
if err := s.ctrIDIndex.Delete(c.ID()); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.RemoveContainerResponse{}
|
|
||||||
logrus.Debugf("RemoveContainerResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
|
@ -1,26 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StartContainer starts the container.
|
|
||||||
func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) {
|
|
||||||
logrus.Debugf("StartContainerRequest %+v", req)
|
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.StartContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.StartContainerResponse{}
|
|
||||||
logrus.Debugf("StartContainerResponse %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
|
@ -1,59 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ContainerStatus returns status of the container.
|
|
||||||
func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) {
|
|
||||||
logrus.Debugf("ContainerStatusRequest %+v", req)
|
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
containerID := c.ID()
|
|
||||||
resp := &pb.ContainerStatusResponse{
|
|
||||||
Status: &pb.ContainerStatus{
|
|
||||||
Id: &containerID,
|
|
||||||
Metadata: c.Metadata(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(c)
|
|
||||||
rStatus := pb.ContainerState_CONTAINER_UNKNOWN
|
|
||||||
|
|
||||||
switch cState.Status {
|
|
||||||
case oci.ContainerStateCreated:
|
|
||||||
rStatus = pb.ContainerState_CONTAINER_CREATED
|
|
||||||
created := cState.Created.UnixNano()
|
|
||||||
resp.Status.CreatedAt = int64Ptr(created)
|
|
||||||
case oci.ContainerStateRunning:
|
|
||||||
rStatus = pb.ContainerState_CONTAINER_RUNNING
|
|
||||||
created := cState.Created.UnixNano()
|
|
||||||
resp.Status.CreatedAt = int64Ptr(created)
|
|
||||||
started := cState.Started.UnixNano()
|
|
||||||
resp.Status.StartedAt = int64Ptr(started)
|
|
||||||
case oci.ContainerStateStopped:
|
|
||||||
rStatus = pb.ContainerState_CONTAINER_EXITED
|
|
||||||
created := cState.Created.UnixNano()
|
|
||||||
resp.Status.CreatedAt = int64Ptr(created)
|
|
||||||
started := cState.Started.UnixNano()
|
|
||||||
resp.Status.StartedAt = int64Ptr(started)
|
|
||||||
finished := cState.Finished.UnixNano()
|
|
||||||
resp.Status.FinishedAt = int64Ptr(finished)
|
|
||||||
resp.Status.ExitCode = int32Ptr(cState.ExitCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp.Status.State = &rStatus
|
|
||||||
|
|
||||||
logrus.Debugf("ContainerStatusResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
|
@ -1,33 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StopContainer stops a running container with a grace period (i.e., timeout).
|
|
||||||
func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) {
|
|
||||||
logrus.Debugf("StopContainerRequest %+v", req)
|
|
||||||
c, err := s.getContainerFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cStatus := s.runtime.ContainerStatus(c)
|
|
||||||
if cStatus.Status != oci.ContainerStateStopped {
|
|
||||||
if err := s.runtime.StopContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to stop container %s: %v", c.ID(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.StopContainerResponse{}
|
|
||||||
logrus.Debugf("StopContainerResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
|
@ -1,11 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UpdateRuntimeConfig updates the configuration of a running container.
|
|
||||||
func (s *Server) UpdateRuntimeConfig(ctx context.Context, req *pb.UpdateRuntimeConfigRequest) (*pb.UpdateRuntimeConfigResponse, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
42
server/image.go
Normal file
42
server/image.go
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ListImages lists existing images.
|
||||||
|
func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) {
|
||||||
|
logrus.Debugf("ListImages: %+v", req)
|
||||||
|
// TODO
|
||||||
|
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
||||||
|
// and listing images.
|
||||||
|
return &pb.ListImagesResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PullImage pulls a image with authentication config.
|
||||||
|
func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) {
|
||||||
|
logrus.Debugf("PullImage: %+v", req)
|
||||||
|
|
||||||
|
if err := s.manager.PullImage(req.GetImage(), req.GetAuth(), req.GetSandboxConfig()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &pb.PullImageResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveImage removes the image.
|
||||||
|
func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) {
|
||||||
|
logrus.Debugf("RemoveImage: %+v", req)
|
||||||
|
return &pb.RemoveImageResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImageStatus returns the status of the image.
|
||||||
|
func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) {
|
||||||
|
logrus.Debugf("ImageStatus: %+v", req)
|
||||||
|
// TODO
|
||||||
|
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
||||||
|
// and getting the image status
|
||||||
|
return &pb.ImageStatusResponse{}, nil
|
||||||
|
}
|
|
@ -1,16 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ListImages lists existing images.
|
|
||||||
func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) {
|
|
||||||
logrus.Debugf("ListImages: %+v", req)
|
|
||||||
// TODO
|
|
||||||
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
|
||||||
// and listing images.
|
|
||||||
return &pb.ListImagesResponse{}, nil
|
|
||||||
}
|
|
|
@ -1,82 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/directory"
|
|
||||||
"github.com/containers/image/image"
|
|
||||||
"github.com/containers/image/transports"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PullImage pulls a image with authentication config.
|
|
||||||
func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) {
|
|
||||||
logrus.Debugf("PullImage: %+v", req)
|
|
||||||
img := req.GetImage().GetImage()
|
|
||||||
if img == "" {
|
|
||||||
return nil, errors.New("got empty imagespec name")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(runcom): deal with AuthConfig in req.GetAuth()
|
|
||||||
|
|
||||||
// TODO(mrunalp,runcom): why do we need the SandboxConfig here?
|
|
||||||
// how do we pull in a specified sandbox?
|
|
||||||
tr, err := transports.ParseImageName(img)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
|
||||||
src, err := tr.NewImageSource(nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
i := image.FromSource(src)
|
|
||||||
blobs, err := i.BlobDigests()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = os.Mkdir(filepath.Join(s.config.ImageDir, tr.StringWithinTransport()), 0755); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
dir, err := directory.NewReference(filepath.Join(s.config.ImageDir, tr.StringWithinTransport()))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
|
||||||
dest, err := dir.NewImageDestination(nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// save blobs (layer + config for docker v2s2, layers only for docker v2s1 [the config is in the manifest])
|
|
||||||
for _, b := range blobs {
|
|
||||||
// TODO(runcom,nalin): we need do-then-commit to later purge on error
|
|
||||||
var r io.ReadCloser
|
|
||||||
r, _, err = src.GetBlob(b)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if _, _, err = dest.PutBlob(r, b, -1); err != nil {
|
|
||||||
r.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r.Close()
|
|
||||||
}
|
|
||||||
// save manifest
|
|
||||||
m, _, err := i.Manifest()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := dest.PutManifest(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://)
|
|
||||||
|
|
||||||
return &pb.PullImageResponse{}, nil
|
|
||||||
}
|
|
|
@ -1,13 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RemoveImage removes the image.
|
|
||||||
func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) {
|
|
||||||
logrus.Debugf("RemoveImage: %+v", req)
|
|
||||||
return &pb.RemoveImageResponse{}, nil
|
|
||||||
}
|
|
|
@ -1,16 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ImageStatus returns the status of the image.
|
|
||||||
func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) {
|
|
||||||
logrus.Debugf("ImageStatus: %+v", req)
|
|
||||||
// TODO
|
|
||||||
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
|
||||||
// and getting the image status
|
|
||||||
return &pb.ImageStatusResponse{}, nil
|
|
||||||
}
|
|
|
@ -7,34 +7,13 @@ import (
|
||||||
|
|
||||||
// Status returns the status of the runtime
|
// Status returns the status of the runtime
|
||||||
func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) {
|
func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||||
|
status, err := s.manager.Status()
|
||||||
// Deal with Runtime conditions
|
|
||||||
runtimeReady, err := s.runtime.RuntimeReady()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
networkReady, err := s.runtime.NetworkReady()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use vendored strings
|
|
||||||
runtimeReadyConditionString := pb.RuntimeReady
|
|
||||||
networkReadyConditionString := pb.NetworkReady
|
|
||||||
|
|
||||||
resp := &pb.StatusResponse{
|
resp := &pb.StatusResponse{
|
||||||
Status: &pb.RuntimeStatus{
|
Status: status,
|
||||||
Conditions: []*pb.RuntimeCondition{
|
|
||||||
&pb.RuntimeCondition{
|
|
||||||
Type: &runtimeReadyConditionString,
|
|
||||||
Status: &runtimeReady,
|
|
||||||
},
|
|
||||||
&pb.RuntimeCondition{
|
|
||||||
Type: &networkReadyConditionString,
|
|
||||||
Status: &networkReady,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, nil
|
return resp, nil
|
||||||
|
|
|
@ -1,270 +1,83 @@
|
||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"golang.org/x/net/context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/docker/docker/pkg/stringid"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
|
||||||
"github.com/containernetworking/cni/pkg/ns"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
type sandboxNetNs struct {
|
// ListPodSandbox returns a list of SandBoxes.
|
||||||
sync.Mutex
|
func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) {
|
||||||
ns ns.NetNS
|
logrus.Debugf("ListPodSandboxRequest %+v", req)
|
||||||
symlink *os.File
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ns *sandboxNetNs) symlinkCreate(name string) error {
|
pods, err := s.manager.ListPodSandbox(req.GetFilter())
|
||||||
b := make([]byte, 4)
|
|
||||||
_, randErr := rand.Reader.Read(b)
|
|
||||||
if randErr != nil {
|
|
||||||
return randErr
|
|
||||||
}
|
|
||||||
|
|
||||||
nsName := fmt.Sprintf("%s-%x", name, b)
|
|
||||||
symlinkPath := filepath.Join(nsRunDir, nsName)
|
|
||||||
|
|
||||||
if err := os.Symlink(ns.ns.Path(), symlinkPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fd, err := os.Open(symlinkPath)
|
|
||||||
if err != nil {
|
|
||||||
if removeErr := os.RemoveAll(symlinkPath); removeErr != nil {
|
|
||||||
return removeErr
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ns.symlink = fd
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ns *sandboxNetNs) symlinkRemove() error {
|
|
||||||
if err := ns.symlink.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return os.RemoveAll(ns.symlink.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSymbolicLink(path string) (bool, error) {
|
|
||||||
fi, err := os.Lstat(path)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fi.Mode()&os.ModeSymlink == os.ModeSymlink, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func netNsGet(nspath, name string) (*sandboxNetNs, error) {
|
|
||||||
if err := ns.IsNSorErr(nspath); err != nil {
|
|
||||||
return nil, errSandboxClosedNetNS
|
|
||||||
}
|
|
||||||
|
|
||||||
symlink, symlinkErr := isSymbolicLink(nspath)
|
|
||||||
if symlinkErr != nil {
|
|
||||||
return nil, symlinkErr
|
|
||||||
}
|
|
||||||
|
|
||||||
var resolvedNsPath string
|
|
||||||
if symlink {
|
|
||||||
path, err := os.Readlink(nspath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
resolvedNsPath = path
|
|
||||||
} else {
|
|
||||||
resolvedNsPath = nspath
|
|
||||||
}
|
|
||||||
|
|
||||||
netNS, err := ns.GetNS(resolvedNsPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
netNs := &sandboxNetNs{ns: netNS, closed: false,}
|
resp := &pb.ListPodSandboxResponse{
|
||||||
|
Items: pods,
|
||||||
if symlink {
|
|
||||||
fd, err := os.Open(nspath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
netNs.symlink = fd
|
|
||||||
} else {
|
|
||||||
if err := netNs.symlinkCreate(name); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
logrus.Debugf("ListPodSandboxResponse %+v", resp)
|
||||||
return netNs, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func hostNetNsPath() (string, error) {
|
// RemovePodSandbox deletes the sandbox. If there are any running containers in the
|
||||||
netNS, err := ns.GetCurrentNS()
|
// sandbox, they should be force deleted.
|
||||||
|
func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) {
|
||||||
|
logrus.Debugf("RemovePodSandboxRequest %+v", req)
|
||||||
|
|
||||||
|
if err := s.manager.RemovePodSandbox(req.GetPodSandboxId()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.RemovePodSandboxResponse{}
|
||||||
|
logrus.Debugf("RemovePodSandboxResponse %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunPodSandbox creates and runs a pod-level sandbox.
|
||||||
|
func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest) (resp *pb.RunPodSandboxResponse, err error) {
|
||||||
|
logrus.Debugf("RunPodSandboxRequest %+v", req)
|
||||||
|
|
||||||
|
id, err := s.manager.RunPodSandbox(req.GetConfig())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer netNS.Close()
|
resp = &pb.RunPodSandboxResponse{PodSandboxId: &id}
|
||||||
|
logrus.Debugf("RunPodSandboxResponse: %+v", resp)
|
||||||
return netNS.Path(), nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type sandbox struct {
|
// PodSandboxStatus returns the Status of the PodSandbox.
|
||||||
id string
|
func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) {
|
||||||
name string
|
logrus.Debugf("PodSandboxStatusRequest %+v", req)
|
||||||
logDir string
|
|
||||||
labels fields.Set
|
|
||||||
annotations map[string]string
|
|
||||||
infraContainer *oci.Container
|
|
||||||
containers oci.Store
|
|
||||||
processLabel string
|
|
||||||
mountLabel string
|
|
||||||
netns *sandboxNetNs
|
|
||||||
metadata *pb.PodSandboxMetadata
|
|
||||||
shmPath string
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
status, err := s.manager.PodSandboxStatus(req.GetPodSandboxId())
|
||||||
podDefaultNamespace = "default"
|
|
||||||
defaultShmSize = 64 * 1024 * 1024
|
|
||||||
nsRunDir = "/var/run/netns"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errSandboxIDEmpty = errors.New("PodSandboxId should not be empty")
|
|
||||||
errSandboxClosedNetNS = errors.New("PodSandbox networking namespace is closed")
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s *sandbox) addContainer(c *oci.Container) {
|
|
||||||
s.containers.Add(c.Name(), c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *sandbox) getContainer(name string) *oci.Container {
|
|
||||||
return s.containers.Get(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *sandbox) removeContainer(c *oci.Container) {
|
|
||||||
s.containers.Delete(c.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *sandbox) netNs() ns.NetNS {
|
|
||||||
if s.netns == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.netns.ns
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *sandbox) netNsPath() string {
|
|
||||||
if s.netns == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.netns.symlink.Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *sandbox) netNsCreate() error {
|
|
||||||
if s.netns != nil {
|
|
||||||
return fmt.Errorf("net NS already created")
|
|
||||||
}
|
|
||||||
|
|
||||||
netNS, err := ns.NewNS()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
s.netns = &sandboxNetNs{
|
resp := &pb.PodSandboxStatusResponse{
|
||||||
ns: netNS,
|
Status: status,
|
||||||
closed: false,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.netns.symlinkCreate(s.name); err != nil {
|
logrus.Infof("PodSandboxStatusResponse: %+v", resp)
|
||||||
logrus.Warnf("Could not create nentns symlink %v", err)
|
return resp, nil
|
||||||
|
|
||||||
if err := s.netns.ns.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *sandbox) netNsRemove() error {
|
// StopPodSandbox stops the sandbox. If there are any running containers in the
|
||||||
if s.netns == nil {
|
// sandbox, they should be force terminated.
|
||||||
logrus.Warn("no networking namespace")
|
func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) {
|
||||||
return nil
|
logrus.Debugf("StopPodSandboxRequest %+v", req)
|
||||||
|
|
||||||
|
if err := s.manager.StopPodSandbox(req.GetPodSandboxId()); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
s.netns.Lock()
|
resp := &pb.StopPodSandboxResponse{}
|
||||||
defer s.netns.Unlock()
|
logrus.Debugf("StopPodSandboxResponse: %+v", resp)
|
||||||
|
return resp, nil
|
||||||
if s.netns.closed {
|
|
||||||
// netNsRemove() can be called multiple
|
|
||||||
// times without returning an error.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.netns.symlinkRemove(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.netns.ns.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.netns.closed = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) generatePodIDandName(name string, namespace string, attempt uint32) (string, string, error) {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
id = stringid.GenerateNonCryptoID()
|
|
||||||
)
|
|
||||||
if namespace == "" {
|
|
||||||
namespace = podDefaultNamespace
|
|
||||||
}
|
|
||||||
|
|
||||||
if name, err = s.reservePodName(id, fmt.Sprintf("%s-%s-%v", namespace, name, attempt)); err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
return id, name, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type podSandboxRequest interface {
|
|
||||||
GetPodSandboxId() string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) getPodSandboxFromRequest(req podSandboxRequest) (*sandbox, error) {
|
|
||||||
sbID := req.GetPodSandboxId()
|
|
||||||
if sbID == "" {
|
|
||||||
return nil, errSandboxIDEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
sandboxID, err := s.podIDIndex.Get(sbID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("PodSandbox with ID starting with %s not found: %v", sbID, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sb := s.getSandbox(sandboxID)
|
|
||||||
if sb == nil {
|
|
||||||
return nil, fmt.Errorf("specified sandbox not found: %s", sandboxID)
|
|
||||||
}
|
|
||||||
return sb, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,92 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// filterSandbox returns whether passed container matches filtering criteria
|
|
||||||
func filterSandbox(p *pb.PodSandbox, filter *pb.PodSandboxFilter) bool {
|
|
||||||
if filter != nil {
|
|
||||||
if filter.State != nil {
|
|
||||||
if *p.State != *filter.State {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if filter.LabelSelector != nil {
|
|
||||||
sel := fields.SelectorFromSet(filter.LabelSelector)
|
|
||||||
if !sel.Matches(fields.Set(p.Labels)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListPodSandbox returns a list of SandBoxes.
|
|
||||||
func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) {
|
|
||||||
logrus.Debugf("ListPodSandboxRequest %+v", req)
|
|
||||||
var pods []*pb.PodSandbox
|
|
||||||
var podList []*sandbox
|
|
||||||
for _, sb := range s.state.sandboxes {
|
|
||||||
podList = append(podList, sb)
|
|
||||||
}
|
|
||||||
|
|
||||||
filter := req.Filter
|
|
||||||
// Filter by pod id first.
|
|
||||||
if filter != nil {
|
|
||||||
if filter.Id != nil {
|
|
||||||
id, err := s.podIDIndex.Get(*filter.Id)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sb := s.getSandbox(id)
|
|
||||||
if sb == nil {
|
|
||||||
podList = []*sandbox{}
|
|
||||||
} else {
|
|
||||||
podList = []*sandbox{sb}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, sb := range podList {
|
|
||||||
podInfraContainer := sb.infraContainer
|
|
||||||
if podInfraContainer == nil {
|
|
||||||
// this can't really happen, but if it does because of a bug
|
|
||||||
// it's better not to panic
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := s.runtime.UpdateStatus(podInfraContainer); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cState := s.runtime.ContainerStatus(podInfraContainer)
|
|
||||||
created := cState.Created.UnixNano()
|
|
||||||
rStatus := pb.PodSandboxState_SANDBOX_NOTREADY
|
|
||||||
if cState.Status == oci.ContainerStateRunning {
|
|
||||||
rStatus = pb.PodSandboxState_SANDBOX_READY
|
|
||||||
}
|
|
||||||
|
|
||||||
pod := &pb.PodSandbox{
|
|
||||||
Id: &sb.id,
|
|
||||||
CreatedAt: int64Ptr(created),
|
|
||||||
State: &rStatus,
|
|
||||||
Labels: sb.labels,
|
|
||||||
Annotations: sb.annotations,
|
|
||||||
Metadata: sb.metadata,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter by other criteria such as state and labels.
|
|
||||||
if filterSandbox(pod, req.Filter) {
|
|
||||||
pods = append(pods, pod)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.ListPodSandboxResponse{
|
|
||||||
Items: pods,
|
|
||||||
}
|
|
||||||
logrus.Debugf("ListPodSandboxResponse %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
|
@ -1,95 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/label"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RemovePodSandbox deletes the sandbox. If there are any running containers in the
|
|
||||||
// sandbox, they should be force deleted.
|
|
||||||
func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) {
|
|
||||||
logrus.Debugf("RemovePodSandboxRequest %+v", req)
|
|
||||||
sb, err := s.getPodSandboxFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
if err == errSandboxIDEmpty {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.RemovePodSandboxResponse{}
|
|
||||||
logrus.Warnf("could not get sandbox %s, it's probably been removed already: %v", req.GetPodSandboxId(), err)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
podInfraContainer := sb.infraContainer
|
|
||||||
containers := sb.containers.List()
|
|
||||||
containers = append(containers, podInfraContainer)
|
|
||||||
|
|
||||||
// Delete all the containers in the sandbox
|
|
||||||
for _, c := range containers {
|
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to update container state: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(c)
|
|
||||||
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
|
|
||||||
if err := s.runtime.StopContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to stop container %s: %v", c.Name(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.DeleteContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to delete container %s in sandbox %s: %v", c.Name(), sb.id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c == podInfraContainer {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID())
|
|
||||||
if err := os.RemoveAll(containerDir); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.Name(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.releaseContainerName(c.Name())
|
|
||||||
s.removeContainer(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := label.UnreserveLabel(sb.processLabel); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// unmount the shm for the pod
|
|
||||||
if sb.shmPath != "/dev/shm" {
|
|
||||||
if err := syscall.Unmount(sb.shmPath, syscall.MNT_DETACH); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sb.netNsRemove(); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to remove networking namespace for sandbox %s: %v", sb.id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the files related to the sandbox
|
|
||||||
podSandboxDir := filepath.Join(s.config.SandboxDir, sb.id)
|
|
||||||
if err := os.RemoveAll(podSandboxDir); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to remove sandbox %s directory: %v", sb.id, err)
|
|
||||||
}
|
|
||||||
s.releaseContainerName(podInfraContainer.Name())
|
|
||||||
s.removeContainer(podInfraContainer)
|
|
||||||
sb.infraContainer = nil
|
|
||||||
|
|
||||||
s.releasePodName(sb.name)
|
|
||||||
s.removeSandbox(sb.id)
|
|
||||||
|
|
||||||
resp := &pb.RemovePodSandboxResponse{}
|
|
||||||
logrus.Debugf("RemovePodSandboxResponse %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
|
@ -1,377 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/utils"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/label"
|
|
||||||
"github.com/opencontainers/runtime-tools/generate"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s *Server) runContainer(container *oci.Container) error {
|
|
||||||
if err := s.runtime.CreateContainer(container); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.UpdateStatus(container); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.StartContainer(container); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runtime.UpdateStatus(container); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunPodSandbox creates and runs a pod-level sandbox.
|
|
||||||
func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest) (resp *pb.RunPodSandboxResponse, err error) {
|
|
||||||
logrus.Debugf("RunPodSandboxRequest %+v", req)
|
|
||||||
var processLabel, mountLabel, netNsPath string
|
|
||||||
// process req.Name
|
|
||||||
name := req.GetConfig().GetMetadata().GetName()
|
|
||||||
if name == "" {
|
|
||||||
return nil, fmt.Errorf("PodSandboxConfig.Name should not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace := req.GetConfig().GetMetadata().GetNamespace()
|
|
||||||
attempt := req.GetConfig().GetMetadata().GetAttempt()
|
|
||||||
|
|
||||||
id, name, err := s.generatePodIDandName(name, namespace, attempt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
s.releasePodName(name)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err = s.podIDIndex.Add(id); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
if err = s.podIDIndex.Delete(id); err != nil {
|
|
||||||
logrus.Warnf("couldn't delete pod id %s from idIndex", id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
podSandboxDir := filepath.Join(s.config.SandboxDir, id)
|
|
||||||
if _, err = os.Stat(podSandboxDir); err == nil {
|
|
||||||
return nil, fmt.Errorf("pod sandbox (%s) already exists", podSandboxDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
if err2 := os.RemoveAll(podSandboxDir); err2 != nil {
|
|
||||||
logrus.Warnf("couldn't cleanup podSandboxDir %s: %v", podSandboxDir, err2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err = os.MkdirAll(podSandboxDir, 0755); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// creates a spec Generator with the default spec.
|
|
||||||
g := generate.New()
|
|
||||||
|
|
||||||
// TODO: Make the `graph/vfs` part of this configurable once the storage
|
|
||||||
// integration has been merged.
|
|
||||||
podInfraRootfs := filepath.Join(s.config.Root, "graph/vfs/pause")
|
|
||||||
// setup defaults for the pod sandbox
|
|
||||||
g.SetRootPath(filepath.Join(podInfraRootfs, "rootfs"))
|
|
||||||
g.SetRootReadonly(true)
|
|
||||||
g.SetProcessArgs([]string{"/pause"})
|
|
||||||
|
|
||||||
// set hostname
|
|
||||||
hostname := req.GetConfig().GetHostname()
|
|
||||||
if hostname != "" {
|
|
||||||
g.SetHostname(hostname)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set log directory
|
|
||||||
logDir := req.GetConfig().GetLogDirectory()
|
|
||||||
if logDir == "" {
|
|
||||||
logDir = filepath.Join(s.config.LogDir, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set DNS options
|
|
||||||
dnsServers := req.GetConfig().GetDnsConfig().GetServers()
|
|
||||||
dnsSearches := req.GetConfig().GetDnsConfig().GetSearches()
|
|
||||||
dnsOptions := req.GetConfig().GetDnsConfig().GetOptions()
|
|
||||||
resolvPath := fmt.Sprintf("%s/resolv.conf", podSandboxDir)
|
|
||||||
err = parseDNSOptions(dnsServers, dnsSearches, dnsOptions, resolvPath)
|
|
||||||
if err != nil {
|
|
||||||
err1 := removeFile(resolvPath)
|
|
||||||
if err1 != nil {
|
|
||||||
err = err1
|
|
||||||
return nil, fmt.Errorf("%v; failed to remove %s: %v", err, resolvPath, err1)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
g.AddBindMount(resolvPath, "/etc/resolv.conf", []string{"ro"})
|
|
||||||
|
|
||||||
// add metadata
|
|
||||||
metadata := req.GetConfig().GetMetadata()
|
|
||||||
metadataJSON, err := json.Marshal(metadata)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// add labels
|
|
||||||
labels := req.GetConfig().GetLabels()
|
|
||||||
labelsJSON, err := json.Marshal(labels)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// add annotations
|
|
||||||
annotations := req.GetConfig().GetAnnotations()
|
|
||||||
annotationsJSON, err := json.Marshal(annotations)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't use SELinux separation with Host Pid or IPC Namespace,
|
|
||||||
if !req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostPid() && !req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
|
||||||
processLabel, mountLabel, err = getSELinuxLabels(nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
g.SetProcessSelinuxLabel(processLabel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// create shm mount for the pod containers.
|
|
||||||
var shmPath string
|
|
||||||
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
|
||||||
shmPath = "/dev/shm"
|
|
||||||
} else {
|
|
||||||
shmPath, err = setupShm(podSandboxDir, mountLabel)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
if err2 := syscall.Unmount(shmPath, syscall.MNT_DETACH); err2 != nil {
|
|
||||||
logrus.Warnf("failed to unmount shm for pod: %v", err2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
containerID, containerName, err := s.generateContainerIDandName(name, "infra", 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
s.releaseContainerName(containerName)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err = s.ctrIDIndex.Add(containerID); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
if err = s.ctrIDIndex.Delete(containerID); err != nil {
|
|
||||||
logrus.Warnf("couldn't delete ctr id %s from idIndex", containerID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
g.AddAnnotation("ocid/metadata", string(metadataJSON))
|
|
||||||
g.AddAnnotation("ocid/labels", string(labelsJSON))
|
|
||||||
g.AddAnnotation("ocid/annotations", string(annotationsJSON))
|
|
||||||
g.AddAnnotation("ocid/log_path", logDir)
|
|
||||||
g.AddAnnotation("ocid/name", name)
|
|
||||||
g.AddAnnotation("ocid/container_type", containerTypeSandbox)
|
|
||||||
g.AddAnnotation("ocid/container_name", containerName)
|
|
||||||
g.AddAnnotation("ocid/container_id", containerID)
|
|
||||||
g.AddAnnotation("ocid/shm_path", shmPath)
|
|
||||||
|
|
||||||
sb := &sandbox{
|
|
||||||
id: id,
|
|
||||||
name: name,
|
|
||||||
logDir: logDir,
|
|
||||||
labels: labels,
|
|
||||||
annotations: annotations,
|
|
||||||
containers: oci.NewMemoryStore(),
|
|
||||||
processLabel: processLabel,
|
|
||||||
mountLabel: mountLabel,
|
|
||||||
metadata: metadata,
|
|
||||||
shmPath: shmPath,
|
|
||||||
}
|
|
||||||
|
|
||||||
s.addSandbox(sb)
|
|
||||||
|
|
||||||
for k, v := range annotations {
|
|
||||||
g.AddAnnotation(k, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// extract linux sysctls from annotations and pass down to oci runtime
|
|
||||||
safe, unsafe, err := SysctlsFromPodAnnotations(annotations)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, sysctl := range safe {
|
|
||||||
g.AddLinuxSysctl(sysctl.Name, sysctl.Value)
|
|
||||||
}
|
|
||||||
for _, sysctl := range unsafe {
|
|
||||||
g.AddLinuxSysctl(sysctl.Name, sysctl.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// setup cgroup settings
|
|
||||||
cgroupParent := req.GetConfig().GetLinux().GetCgroupParent()
|
|
||||||
if cgroupParent != "" {
|
|
||||||
g.SetLinuxCgroupsPath(cgroupParent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set up namespaces
|
|
||||||
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostNetwork() {
|
|
||||||
err = g.RemoveLinuxNamespace("network")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
netNsPath, err = hostNetNsPath()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Create the sandbox network namespace
|
|
||||||
if err = sb.netNsCreate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if netnsErr := sb.netNsRemove(); netnsErr != nil {
|
|
||||||
logrus.Warnf("Failed to remove networking namespace: %v", netnsErr)
|
|
||||||
}
|
|
||||||
} ()
|
|
||||||
|
|
||||||
// Pass the created namespace path to the runtime
|
|
||||||
err = g.AddOrReplaceLinuxNamespace("network", sb.netNsPath())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
netNsPath = sb.netNsPath()
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostPid() {
|
|
||||||
err = g.RemoveLinuxNamespace("pid")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
|
||||||
err = g.RemoveLinuxNamespace("ipc")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = g.SaveToFile(filepath.Join(podSandboxDir, "config.json"), generate.ExportOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = os.Stat(podInfraRootfs); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
// TODO: Replace by rootfs creation API when it is ready
|
|
||||||
if err = utils.CreateInfraRootfs(podInfraRootfs, s.config.Pause); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
container, err := oci.NewContainer(containerID, containerName, podSandboxDir, podSandboxDir, sb.netNs(), labels, annotations, nil, nil, id, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sb.infraContainer = container
|
|
||||||
|
|
||||||
// setup the network
|
|
||||||
podNamespace := ""
|
|
||||||
if err = s.netPlugin.SetUpPod(netNsPath, podNamespace, id, containerName); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create network for container %s in sandbox %s: %v", containerName, id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = s.runContainer(container); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp = &pb.RunPodSandboxResponse{PodSandboxId: &id}
|
|
||||||
logrus.Debugf("RunPodSandboxResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mountLabel string, err error) {
|
|
||||||
processLabel = ""
|
|
||||||
if selinuxOptions != nil {
|
|
||||||
user := selinuxOptions.GetUser()
|
|
||||||
if user == "" {
|
|
||||||
return "", "", fmt.Errorf("SELinuxOption.User is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
role := selinuxOptions.GetRole()
|
|
||||||
if role == "" {
|
|
||||||
return "", "", fmt.Errorf("SELinuxOption.Role is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
t := selinuxOptions.GetType()
|
|
||||||
if t == "" {
|
|
||||||
return "", "", fmt.Errorf("SELinuxOption.Type is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
level := selinuxOptions.GetLevel()
|
|
||||||
if level == "" {
|
|
||||||
return "", "", fmt.Errorf("SELinuxOption.Level is empty")
|
|
||||||
}
|
|
||||||
processLabel = fmt.Sprintf("%s:%s:%s:%s", user, role, t, level)
|
|
||||||
}
|
|
||||||
return label.InitLabels(label.DupSecOpt(processLabel))
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupShm(podSandboxDir, mountLabel string) (shmPath string, err error) {
|
|
||||||
shmPath = filepath.Join(podSandboxDir, "shm")
|
|
||||||
if err = os.Mkdir(shmPath, 0700); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
shmOptions := "mode=1777,size=" + strconv.Itoa(defaultShmSize)
|
|
||||||
if err = syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV),
|
|
||||||
label.FormatMountLabel(shmOptions, mountLabel)); err != nil {
|
|
||||||
return "", fmt.Errorf("failed to mount shm tmpfs for pod: %v", err)
|
|
||||||
}
|
|
||||||
return shmPath, nil
|
|
||||||
}
|
|
|
@ -1,62 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PodSandboxStatus returns the Status of the PodSandbox.
|
|
||||||
func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) {
|
|
||||||
logrus.Debugf("PodSandboxStatusRequest %+v", req)
|
|
||||||
sb, err := s.getPodSandboxFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
podInfraContainer := sb.infraContainer
|
|
||||||
if err = s.runtime.UpdateStatus(podInfraContainer); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cState := s.runtime.ContainerStatus(podInfraContainer)
|
|
||||||
created := cState.Created.UnixNano()
|
|
||||||
|
|
||||||
netNsPath, err := podInfraContainer.NetNsPath()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
podNamespace := ""
|
|
||||||
ip, err := s.netPlugin.GetContainerNetworkStatus(netNsPath, podNamespace, sb.id, podInfraContainer.Name())
|
|
||||||
if err != nil {
|
|
||||||
// ignore the error on network status
|
|
||||||
ip = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
rStatus := pb.PodSandboxState_SANDBOX_NOTREADY
|
|
||||||
if cState.Status == oci.ContainerStateRunning {
|
|
||||||
rStatus = pb.PodSandboxState_SANDBOX_READY
|
|
||||||
}
|
|
||||||
|
|
||||||
sandboxID := sb.id
|
|
||||||
resp := &pb.PodSandboxStatusResponse{
|
|
||||||
Status: &pb.PodSandboxStatus{
|
|
||||||
Id: &sandboxID,
|
|
||||||
CreatedAt: int64Ptr(created),
|
|
||||||
Linux: &pb.LinuxPodSandboxStatus{
|
|
||||||
Namespaces: &pb.Namespace{
|
|
||||||
Network: sPtr(netNsPath),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Network: &pb.PodSandboxNetworkStatus{Ip: &ip},
|
|
||||||
State: &rStatus,
|
|
||||||
Labels: sb.labels,
|
|
||||||
Annotations: sb.annotations,
|
|
||||||
Metadata: sb.metadata,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Infof("PodSandboxStatusResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
|
@ -1,61 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StopPodSandbox stops the sandbox. If there are any running containers in the
|
|
||||||
// sandbox, they should be force terminated.
|
|
||||||
func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) {
|
|
||||||
logrus.Debugf("StopPodSandboxRequest %+v", req)
|
|
||||||
sb, err := s.getPodSandboxFromRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
podNamespace := ""
|
|
||||||
podInfraContainer := sb.infraContainer
|
|
||||||
netnsPath, err := podInfraContainer.NetNsPath()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(netnsPath); err == nil {
|
|
||||||
if err2 := s.netPlugin.TearDownPod(netnsPath, podNamespace, sb.id, podInfraContainer.Name()); err2 != nil {
|
|
||||||
return nil, fmt.Errorf("failed to destroy network for container %s in sandbox %s: %v",
|
|
||||||
podInfraContainer.Name(), sb.id, err2)
|
|
||||||
}
|
|
||||||
} else if !os.IsNotExist(err) { // it's ok for netnsPath to *not* exist
|
|
||||||
return nil, fmt.Errorf("failed to stat netns path for container %s in sandbox %s before tearing down the network: %v",
|
|
||||||
podInfraContainer.Name(), sb.id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the sandbox networking namespace.
|
|
||||||
if err := sb.netNsRemove(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
containers := sb.containers.List()
|
|
||||||
containers = append(containers, podInfraContainer)
|
|
||||||
|
|
||||||
for _, c := range containers {
|
|
||||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cStatus := s.runtime.ContainerStatus(c)
|
|
||||||
if cStatus.Status != oci.ContainerStateStopped {
|
|
||||||
if err := s.runtime.StopContainer(c); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to stop container %s in sandbox %s: %v", c.Name(), sb.id, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &pb.StopPodSandboxResponse{}
|
|
||||||
logrus.Debugf("StopPodSandboxResponse: %+v", resp)
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
|
@ -1,149 +0,0 @@
|
||||||
// +build seccomp
|
|
||||||
|
|
||||||
package seccomp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/stringutils"
|
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
"github.com/opencontainers/runtime-tools/generate"
|
|
||||||
libseccomp "github.com/seccomp/libseccomp-golang"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LoadProfileFromStruct takes a Seccomp struct and setup seccomp in the spec.
|
|
||||||
func LoadProfileFromStruct(config Seccomp, specgen *generate.Generator) error {
|
|
||||||
return setupSeccomp(&config, specgen)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile.
|
|
||||||
func LoadProfileFromBytes(body []byte, specgen *generate.Generator) error {
|
|
||||||
var config Seccomp
|
|
||||||
if err := json.Unmarshal(body, &config); err != nil {
|
|
||||||
return fmt.Errorf("decoding seccomp profile failed: %v", err)
|
|
||||||
}
|
|
||||||
return setupSeccomp(&config, specgen)
|
|
||||||
}
|
|
||||||
|
|
||||||
var nativeToSeccomp = map[string]Arch{
|
|
||||||
"amd64": ArchX86_64,
|
|
||||||
"arm64": ArchAARCH64,
|
|
||||||
"mips64": ArchMIPS64,
|
|
||||||
"mips64n32": ArchMIPS64N32,
|
|
||||||
"mipsel64": ArchMIPSEL64,
|
|
||||||
"mipsel64n32": ArchMIPSEL64N32,
|
|
||||||
"s390x": ArchS390X,
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupSeccomp(config *Seccomp, specgen *generate.Generator) error {
|
|
||||||
if config == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// No default action specified, no syscalls listed, assume seccomp disabled
|
|
||||||
if config.DefaultAction == "" && len(config.Syscalls) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var arch string
|
|
||||||
var native, err = libseccomp.GetNativeArch()
|
|
||||||
if err == nil {
|
|
||||||
arch = native.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(config.Architectures) != 0 && len(config.ArchMap) != 0 {
|
|
||||||
return errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'")
|
|
||||||
}
|
|
||||||
|
|
||||||
customspec := specgen.Spec()
|
|
||||||
customspec.Linux.Seccomp = &specs.Seccomp{}
|
|
||||||
|
|
||||||
// if config.Architectures == 0 then libseccomp will figure out the architecture to use
|
|
||||||
if len(config.Architectures) != 0 {
|
|
||||||
for _, a := range config.Architectures {
|
|
||||||
customspec.Linux.Seccomp.Architectures = append(customspec.Linux.Seccomp.Architectures, specs.Arch(a))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(config.ArchMap) != 0 {
|
|
||||||
for _, a := range config.ArchMap {
|
|
||||||
seccompArch, ok := nativeToSeccomp[arch]
|
|
||||||
if ok {
|
|
||||||
if a.Arch == seccompArch {
|
|
||||||
customspec.Linux.Seccomp.Architectures = append(customspec.Linux.Seccomp.Architectures, specs.Arch(a.Arch))
|
|
||||||
for _, sa := range a.SubArches {
|
|
||||||
customspec.Linux.Seccomp.Architectures = append(customspec.Linux.Seccomp.Architectures, specs.Arch(sa))
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
customspec.Linux.Seccomp.DefaultAction = specs.Action(config.DefaultAction)
|
|
||||||
|
|
||||||
Loop:
|
|
||||||
// Loop through all syscall blocks and convert them to libcontainer format after filtering them
|
|
||||||
for _, call := range config.Syscalls {
|
|
||||||
if len(call.Excludes.Arches) > 0 {
|
|
||||||
if stringutils.InSlice(call.Excludes.Arches, arch) {
|
|
||||||
continue Loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(call.Excludes.Caps) > 0 {
|
|
||||||
for _, c := range call.Excludes.Caps {
|
|
||||||
if stringutils.InSlice(customspec.Process.Capabilities, c) {
|
|
||||||
continue Loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(call.Includes.Arches) > 0 {
|
|
||||||
if !stringutils.InSlice(call.Includes.Arches, arch) {
|
|
||||||
continue Loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(call.Includes.Caps) > 0 {
|
|
||||||
for _, c := range call.Includes.Caps {
|
|
||||||
if !stringutils.InSlice(customspec.Process.Capabilities, c) {
|
|
||||||
continue Loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if call.Name != "" && len(call.Names) != 0 {
|
|
||||||
return errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'")
|
|
||||||
}
|
|
||||||
|
|
||||||
if call.Name != "" {
|
|
||||||
customspec.Linux.Seccomp.Syscalls = append(customspec.Linux.Seccomp.Syscalls, createSpecsSyscall(call.Name, call.Action, call.Args))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, n := range call.Names {
|
|
||||||
customspec.Linux.Seccomp.Syscalls = append(customspec.Linux.Seccomp.Syscalls, createSpecsSyscall(n, call.Action, call.Args))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createSpecsSyscall(name string, action Action, args []*Arg) specs.Syscall {
|
|
||||||
newCall := specs.Syscall{
|
|
||||||
Name: name,
|
|
||||||
Action: specs.Action(action),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loop through all the arguments of the syscall and convert them
|
|
||||||
for _, arg := range args {
|
|
||||||
newArg := specs.Arg{
|
|
||||||
Index: arg.Index,
|
|
||||||
Value: arg.Value,
|
|
||||||
ValueTwo: arg.ValueTwo,
|
|
||||||
Op: specs.Operator(arg.Op),
|
|
||||||
}
|
|
||||||
|
|
||||||
newCall.Args = append(newCall.Args, newArg)
|
|
||||||
}
|
|
||||||
return newCall
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
// +build !seccomp
|
|
||||||
|
|
||||||
package seccomp
|
|
||||||
|
|
||||||
import "github.com/opencontainers/runtime-tools/generate"
|
|
||||||
|
|
||||||
// LoadProfileFromStruct takes a Seccomp struct and setup seccomp in the spec.
|
|
||||||
func LoadProfileFromStruct(config Seccomp, specgen *generate.Generator) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile.
|
|
||||||
func LoadProfileFromBytes(body []byte, specgen *generate.Generator) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,93 +0,0 @@
|
||||||
package seccomp
|
|
||||||
|
|
||||||
// Seccomp represents the config for a seccomp profile for syscall restriction.
|
|
||||||
type Seccomp struct {
|
|
||||||
DefaultAction Action `json:"defaultAction"`
|
|
||||||
// Architectures is kept to maintain backward compatibility with the old
|
|
||||||
// seccomp profile.
|
|
||||||
Architectures []Arch `json:"architectures,omitempty"`
|
|
||||||
ArchMap []Architecture `json:"archMap,omitempty"`
|
|
||||||
Syscalls []*Syscall `json:"syscalls"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Architecture is used to represent an specific architecture
|
|
||||||
// and its sub-architectures
|
|
||||||
type Architecture struct {
|
|
||||||
Arch Arch `json:"architecture"`
|
|
||||||
SubArches []Arch `json:"subArchitectures"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Arch used for architectures
|
|
||||||
type Arch string
|
|
||||||
|
|
||||||
// Additional architectures permitted to be used for system calls
|
|
||||||
// By default only the native architecture of the kernel is permitted
|
|
||||||
const (
|
|
||||||
ArchX86 Arch = "SCMP_ARCH_X86"
|
|
||||||
ArchX86_64 Arch = "SCMP_ARCH_X86_64"
|
|
||||||
ArchX32 Arch = "SCMP_ARCH_X32"
|
|
||||||
ArchARM Arch = "SCMP_ARCH_ARM"
|
|
||||||
ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
|
|
||||||
ArchMIPS Arch = "SCMP_ARCH_MIPS"
|
|
||||||
ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
|
|
||||||
ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
|
|
||||||
ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
|
|
||||||
ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
|
|
||||||
ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
|
|
||||||
ArchPPC Arch = "SCMP_ARCH_PPC"
|
|
||||||
ArchPPC64 Arch = "SCMP_ARCH_PPC64"
|
|
||||||
ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
|
|
||||||
ArchS390 Arch = "SCMP_ARCH_S390"
|
|
||||||
ArchS390X Arch = "SCMP_ARCH_S390X"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Action taken upon Seccomp rule match
|
|
||||||
type Action string
|
|
||||||
|
|
||||||
// Define actions for Seccomp rules
|
|
||||||
const (
|
|
||||||
ActKill Action = "SCMP_ACT_KILL"
|
|
||||||
ActTrap Action = "SCMP_ACT_TRAP"
|
|
||||||
ActErrno Action = "SCMP_ACT_ERRNO"
|
|
||||||
ActTrace Action = "SCMP_ACT_TRACE"
|
|
||||||
ActAllow Action = "SCMP_ACT_ALLOW"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Operator used to match syscall arguments in Seccomp
|
|
||||||
type Operator string
|
|
||||||
|
|
||||||
// Define operators for syscall arguments in Seccomp
|
|
||||||
const (
|
|
||||||
OpNotEqual Operator = "SCMP_CMP_NE"
|
|
||||||
OpLessThan Operator = "SCMP_CMP_LT"
|
|
||||||
OpLessEqual Operator = "SCMP_CMP_LE"
|
|
||||||
OpEqualTo Operator = "SCMP_CMP_EQ"
|
|
||||||
OpGreaterEqual Operator = "SCMP_CMP_GE"
|
|
||||||
OpGreaterThan Operator = "SCMP_CMP_GT"
|
|
||||||
OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Arg used for matching specific syscall arguments in Seccomp
|
|
||||||
type Arg struct {
|
|
||||||
Index uint `json:"index"`
|
|
||||||
Value uint64 `json:"value"`
|
|
||||||
ValueTwo uint64 `json:"valueTwo"`
|
|
||||||
Op Operator `json:"op"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter is used to conditionally apply Seccomp rules
|
|
||||||
type Filter struct {
|
|
||||||
Caps []string `json:"caps,omitempty"`
|
|
||||||
Arches []string `json:"arches,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Syscall is used to match a group of syscalls in Seccomp
|
|
||||||
type Syscall struct {
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Names []string `json:"names,omitempty"`
|
|
||||||
Action Action `json:"action"`
|
|
||||||
Args []*Arg `json:"args"`
|
|
||||||
Comment string `json:"comment"`
|
|
||||||
Includes Filter `json:"includes"`
|
|
||||||
Excludes Filter `json:"excludes"`
|
|
||||||
}
|
|
382
server/server.go
382
server/server.go
|
@ -1,25 +1,11 @@
|
||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
runtimeManager "github.com/kubernetes-incubator/cri-o/manager"
|
||||||
"github.com/docker/docker/pkg/registrar"
|
|
||||||
"github.com/docker/docker/pkg/truncindex"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/oci"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/server/apparmor"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/server/seccomp"
|
|
||||||
"github.com/kubernetes-incubator/cri-o/utils"
|
"github.com/kubernetes-incubator/cri-o/utils"
|
||||||
"github.com/opencontainers/runc/libcontainer/label"
|
|
||||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
"github.com/rajatchopra/ocicni"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -28,275 +14,11 @@ const (
|
||||||
|
|
||||||
// Server implements the RuntimeService and ImageService
|
// Server implements the RuntimeService and ImageService
|
||||||
type Server struct {
|
type Server struct {
|
||||||
config Config
|
manager *runtimeManager.Manager
|
||||||
runtime *oci.Runtime
|
|
||||||
stateLock sync.Mutex
|
|
||||||
state *serverState
|
|
||||||
netPlugin ocicni.CNIPlugin
|
|
||||||
podNameIndex *registrar.Registrar
|
|
||||||
podIDIndex *truncindex.TruncIndex
|
|
||||||
ctrNameIndex *registrar.Registrar
|
|
||||||
ctrIDIndex *truncindex.TruncIndex
|
|
||||||
|
|
||||||
seccompEnabled bool
|
|
||||||
seccompProfile seccomp.Seccomp
|
|
||||||
|
|
||||||
appArmorEnabled bool
|
|
||||||
appArmorProfile string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) loadContainer(id string) error {
|
|
||||||
config, err := ioutil.ReadFile(filepath.Join(s.runtime.ContainerDir(), id, "config.json"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var m rspec.Spec
|
|
||||||
if err = json.Unmarshal(config, &m); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
labels := make(map[string]string)
|
|
||||||
if err = json.Unmarshal([]byte(m.Annotations["ocid/labels"]), &labels); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
name := m.Annotations["ocid/name"]
|
|
||||||
name, err = s.reserveContainerName(id, name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var metadata pb.ContainerMetadata
|
|
||||||
if err = json.Unmarshal([]byte(m.Annotations["ocid/metadata"]), &metadata); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sb := s.getSandbox(m.Annotations["ocid/sandbox_id"])
|
|
||||||
if sb == nil {
|
|
||||||
logrus.Warnf("could not get sandbox with id %s, skipping", m.Annotations["ocid/sandbox_id"])
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var tty bool
|
|
||||||
if v := m.Annotations["ocid/tty"]; v == "true" {
|
|
||||||
tty = true
|
|
||||||
}
|
|
||||||
containerPath := filepath.Join(s.runtime.ContainerDir(), id)
|
|
||||||
|
|
||||||
var img *pb.ImageSpec
|
|
||||||
image, ok := m.Annotations["ocid/image"]
|
|
||||||
if ok {
|
|
||||||
img = &pb.ImageSpec{
|
|
||||||
Image: &image,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
annotations := make(map[string]string)
|
|
||||||
if err = json.Unmarshal([]byte(m.Annotations["ocid/annotations"]), &annotations); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctr, err := oci.NewContainer(id, name, containerPath, m.Annotations["ocid/log_path"], sb.netNs(), labels, annotations, img, &metadata, sb.id, tty)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.addContainer(ctr)
|
|
||||||
if err = s.runtime.UpdateStatus(ctr); err != nil {
|
|
||||||
logrus.Warnf("error updating status for container %s: %v", ctr.ID(), err)
|
|
||||||
}
|
|
||||||
if err = s.ctrIDIndex.Add(id); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func configNetNsPath(spec rspec.Spec) (string, error) {
|
|
||||||
for _, ns := range spec.Linux.Namespaces {
|
|
||||||
if ns.Type != rspec.NetworkNamespace {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if ns.Path == "" {
|
|
||||||
return "", fmt.Errorf("empty networking namespace")
|
|
||||||
}
|
|
||||||
|
|
||||||
return ns.Path, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", fmt.Errorf("missing networking namespace")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) loadSandbox(id string) error {
|
|
||||||
config, err := ioutil.ReadFile(filepath.Join(s.config.SandboxDir, id, "config.json"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var m rspec.Spec
|
|
||||||
if err = json.Unmarshal(config, &m); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
labels := make(map[string]string)
|
|
||||||
if err = json.Unmarshal([]byte(m.Annotations["ocid/labels"]), &labels); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
name := m.Annotations["ocid/name"]
|
|
||||||
name, err = s.reservePodName(id, name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var metadata pb.PodSandboxMetadata
|
|
||||||
if err = json.Unmarshal([]byte(m.Annotations["ocid/metadata"]), &metadata); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
processLabel, mountLabel, err := label.InitLabels(label.DupSecOpt(m.Process.SelinuxLabel))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
annotations := make(map[string]string)
|
|
||||||
if err = json.Unmarshal([]byte(m.Annotations["ocid/annotations"]), &annotations); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
sb := &sandbox{
|
|
||||||
id: id,
|
|
||||||
name: name,
|
|
||||||
logDir: m.Annotations["ocid/log_path"],
|
|
||||||
labels: labels,
|
|
||||||
containers: oci.NewMemoryStore(),
|
|
||||||
processLabel: processLabel,
|
|
||||||
mountLabel: mountLabel,
|
|
||||||
annotations: annotations,
|
|
||||||
metadata: &metadata,
|
|
||||||
shmPath: m.Annotations["ocid/shm_path"],
|
|
||||||
}
|
|
||||||
|
|
||||||
// We add a netNS only if we can load a permanent one.
|
|
||||||
// Otherwise, the sandbox will live in the host namespace.
|
|
||||||
netNsPath, err := configNetNsPath(m)
|
|
||||||
if err == nil {
|
|
||||||
netNS, nsErr := netNsGet(netNsPath, sb.name)
|
|
||||||
// If we can't load the networking namespace
|
|
||||||
// because it's closed, we just set the sb netns
|
|
||||||
// pointer to nil. Otherwise we return an error.
|
|
||||||
if nsErr != nil && nsErr != errSandboxClosedNetNS {
|
|
||||||
return nsErr
|
|
||||||
}
|
|
||||||
|
|
||||||
sb.netns = netNS
|
|
||||||
}
|
|
||||||
|
|
||||||
s.addSandbox(sb)
|
|
||||||
|
|
||||||
sandboxPath := filepath.Join(s.config.SandboxDir, id)
|
|
||||||
|
|
||||||
if err = label.ReserveLabel(processLabel); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cname, err := s.reserveContainerName(m.Annotations["ocid/container_id"], m.Annotations["ocid/container_name"])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
scontainer, err := oci.NewContainer(m.Annotations["ocid/container_id"], cname, sandboxPath, sandboxPath, sb.netNs(), labels, annotations, nil, nil, id, false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sb.infraContainer = scontainer
|
|
||||||
if err = s.runtime.UpdateStatus(scontainer); err != nil {
|
|
||||||
logrus.Warnf("error updating status for container %s: %v", scontainer.ID(), err)
|
|
||||||
}
|
|
||||||
if err = s.ctrIDIndex.Add(scontainer.ID()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = s.podIDIndex.Add(id); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) restore() {
|
|
||||||
sandboxDir, err := ioutil.ReadDir(s.config.SandboxDir)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
logrus.Warnf("could not read sandbox directory %s: %v", sandboxDir, err)
|
|
||||||
}
|
|
||||||
for _, v := range sandboxDir {
|
|
||||||
if !v.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err = s.loadSandbox(v.Name()); err != nil {
|
|
||||||
logrus.Warnf("could not restore sandbox %s: %v", v.Name(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
containerDir, err := ioutil.ReadDir(s.runtime.ContainerDir())
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
logrus.Warnf("could not read container directory %s: %v", s.runtime.ContainerDir(), err)
|
|
||||||
}
|
|
||||||
for _, v := range containerDir {
|
|
||||||
if !v.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := s.loadContainer(v.Name()); err != nil {
|
|
||||||
logrus.Warnf("could not restore container %s: %v", v.Name(), err)
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) reservePodName(id, name string) (string, error) {
|
|
||||||
if err := s.podNameIndex.Reserve(name, id); err != nil {
|
|
||||||
if err == registrar.ErrNameReserved {
|
|
||||||
id, err := s.podNameIndex.Get(name)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Warnf("conflict, pod name %q already reserved", name)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("conflict, name %q already reserved for pod %q", name, id)
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("error reserving pod name %q", name)
|
|
||||||
}
|
|
||||||
return name, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) releasePodName(name string) {
|
|
||||||
s.podNameIndex.Release(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) reserveContainerName(id, name string) (string, error) {
|
|
||||||
if err := s.ctrNameIndex.Reserve(name, id); err != nil {
|
|
||||||
if err == registrar.ErrNameReserved {
|
|
||||||
id, err := s.ctrNameIndex.Get(name)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Warnf("conflict, ctr name %q already reserved", name)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("conflict, name %q already reserved for ctr %q", name, id)
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("error reserving ctr name %s", name)
|
|
||||||
}
|
|
||||||
return name, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) releaseContainerName(name string) {
|
|
||||||
s.ctrNameIndex.Release(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER.
|
|
||||||
SeccompModeFilter = uintptr(2)
|
|
||||||
)
|
|
||||||
|
|
||||||
func seccompEnabled() bool {
|
|
||||||
var enabled bool
|
|
||||||
// Check if Seccomp is supported, via CONFIG_SECCOMP.
|
|
||||||
if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL {
|
|
||||||
// Make sure the kernel has CONFIG_SECCOMP_FILTER.
|
|
||||||
if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL {
|
|
||||||
enabled = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return enabled
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new Server with options provided
|
// New creates a new Server with options provided
|
||||||
func New(config *Config) (*Server, error) {
|
func New(config *runtimeManager.Config) (*Server, error) {
|
||||||
// TODO: This will go away later when we have wrapper process or systemd acting as
|
// TODO: This will go away later when we have wrapper process or systemd acting as
|
||||||
// subreaper.
|
// subreaper.
|
||||||
if err := utils.SetSubreaper(1); err != nil {
|
if err := utils.SetSubreaper(1); err != nil {
|
||||||
|
@ -313,107 +35,13 @@ func New(config *Config) (*Server, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := oci.New(config.Runtime, config.ContainerDir, config.Conmon, config.ConmonEnv)
|
manager, err := runtimeManager.New(config)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sandboxes := make(map[string]*sandbox)
|
|
||||||
containers := oci.NewMemoryStore()
|
|
||||||
netPlugin, err := ocicni.InitCNI("")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
s := &Server{
|
s := &Server{
|
||||||
runtime: r,
|
manager: manager,
|
||||||
netPlugin: netPlugin,
|
|
||||||
config: *config,
|
|
||||||
state: &serverState{
|
|
||||||
sandboxes: sandboxes,
|
|
||||||
containers: containers,
|
|
||||||
},
|
|
||||||
seccompEnabled: seccompEnabled(),
|
|
||||||
appArmorEnabled: apparmor.IsEnabled(),
|
|
||||||
appArmorProfile: config.ApparmorProfile,
|
|
||||||
}
|
|
||||||
seccompProfile, err := ioutil.ReadFile(config.SeccompProfile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("opening seccomp profile (%s) failed: %v", config.SeccompProfile, err)
|
|
||||||
}
|
|
||||||
var seccompConfig seccomp.Seccomp
|
|
||||||
if err := json.Unmarshal(seccompProfile, &seccompConfig); err != nil {
|
|
||||||
return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
|
|
||||||
}
|
|
||||||
s.seccompProfile = seccompConfig
|
|
||||||
|
|
||||||
if s.appArmorEnabled && s.appArmorProfile == apparmor.DefaultApparmorProfile {
|
|
||||||
if err := apparmor.EnsureDefaultApparmorProfile(); err != nil {
|
|
||||||
return nil, fmt.Errorf("ensuring the default apparmor profile is installed failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s.podIDIndex = truncindex.NewTruncIndex([]string{})
|
|
||||||
s.podNameIndex = registrar.NewRegistrar()
|
|
||||||
s.ctrIDIndex = truncindex.NewTruncIndex([]string{})
|
|
||||||
s.ctrNameIndex = registrar.NewRegistrar()
|
|
||||||
|
|
||||||
s.restore()
|
|
||||||
|
|
||||||
logrus.Debugf("sandboxes: %v", s.state.sandboxes)
|
|
||||||
logrus.Debugf("containers: %v", s.state.containers)
|
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type serverState struct {
|
|
||||||
sandboxes map[string]*sandbox
|
|
||||||
containers oci.Store
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) addSandbox(sb *sandbox) {
|
|
||||||
s.stateLock.Lock()
|
|
||||||
s.state.sandboxes[sb.id] = sb
|
|
||||||
s.stateLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) getSandbox(id string) *sandbox {
|
|
||||||
s.stateLock.Lock()
|
|
||||||
sb := s.state.sandboxes[id]
|
|
||||||
s.stateLock.Unlock()
|
|
||||||
return sb
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) hasSandbox(id string) bool {
|
|
||||||
s.stateLock.Lock()
|
|
||||||
_, ok := s.state.sandboxes[id]
|
|
||||||
s.stateLock.Unlock()
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) removeSandbox(id string) {
|
|
||||||
s.stateLock.Lock()
|
|
||||||
delete(s.state.sandboxes, id)
|
|
||||||
s.stateLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) addContainer(c *oci.Container) {
|
|
||||||
s.stateLock.Lock()
|
|
||||||
sandbox := s.state.sandboxes[c.Sandbox()]
|
|
||||||
// TODO(runcom): handle !ok above!!! otherwise it panics!
|
|
||||||
sandbox.addContainer(c)
|
|
||||||
s.state.containers.Add(c.ID(), c)
|
|
||||||
s.stateLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) getContainer(id string) *oci.Container {
|
|
||||||
s.stateLock.Lock()
|
|
||||||
c := s.state.containers.Get(id)
|
|
||||||
s.stateLock.Unlock()
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) removeContainer(c *oci.Container) {
|
|
||||||
s.stateLock.Lock()
|
|
||||||
sandbox := s.state.sandboxes[c.Sandbox()]
|
|
||||||
sandbox.removeContainer(c)
|
|
||||||
s.state.containers.Delete(c.ID())
|
|
||||||
s.stateLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
42
server/streaming.go
Normal file
42
server/streaming.go
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ExecSync runs a command in a container synchronously.
|
||||||
|
func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (*pb.ExecSyncResponse, error) {
|
||||||
|
logrus.Debugf("ExecSyncRequest %+v", req)
|
||||||
|
|
||||||
|
execResp, err := s.manager.ExecSync(req.GetContainerId(), req.GetCmd(), req.GetTimeout())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &pb.ExecSyncResponse{
|
||||||
|
Stdout: execResp.Stdout,
|
||||||
|
Stderr: execResp.Stderr,
|
||||||
|
ExitCode: &execResp.ExitCode,
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("ExecSyncResponse: %+v", resp)
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attach prepares a streaming endpoint to attach to a running container.
|
||||||
|
func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (*pb.AttachResponse, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec prepares a streaming endpoint to execute a command in the container.
|
||||||
|
func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecResponse, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox.
|
||||||
|
func (s *Server) PortForward(ctx context.Context, req *pb.PortForwardRequest) (*pb.PortForwardResponse, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
158
server/utils.go
158
server/utils.go
|
@ -1,158 +0,0 @@
|
||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// According to http://man7.org/linux/man-pages/man5/resolv.conf.5.html:
|
|
||||||
// "The search list is currently limited to six domains with a total of 256 characters."
|
|
||||||
maxDNSSearches = 6
|
|
||||||
)
|
|
||||||
|
|
||||||
func int64Ptr(i int64) *int64 {
|
|
||||||
return &i
|
|
||||||
}
|
|
||||||
|
|
||||||
func int32Ptr(i int32) *int32 {
|
|
||||||
return &i
|
|
||||||
}
|
|
||||||
|
|
||||||
func sPtr(s string) *string {
|
|
||||||
return &s
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyFile(src, dest string) error {
|
|
||||||
in, err := os.Open(src)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer in.Close()
|
|
||||||
|
|
||||||
out, err := os.Create(dest)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer out.Close()
|
|
||||||
|
|
||||||
_, err = io.Copy(out, in)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeFile(path string) error {
|
|
||||||
if _, err := os.Stat(path); err == nil {
|
|
||||||
if err := os.Remove(path); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseDNSOptions(servers, searches, options []string, path string) error {
|
|
||||||
nServers := len(servers)
|
|
||||||
nSearches := len(searches)
|
|
||||||
nOptions := len(options)
|
|
||||||
if nServers == 0 && nSearches == 0 && nOptions == 0 {
|
|
||||||
return copyFile("/etc/resolv.conf", path)
|
|
||||||
}
|
|
||||||
|
|
||||||
if nSearches > maxDNSSearches {
|
|
||||||
return fmt.Errorf("DNSOption.Searches has more than 6 domains")
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Create(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
if nSearches > 0 {
|
|
||||||
data := fmt.Sprintf("search %s\n", strings.Join(searches, " "))
|
|
||||||
_, err = f.Write([]byte(data))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if nServers > 0 {
|
|
||||||
data := fmt.Sprintf("nameserver %s\n", strings.Join(servers, "\nnameserver "))
|
|
||||||
_, err = f.Write([]byte(data))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if nOptions > 0 {
|
|
||||||
data := fmt.Sprintf("options %s\n", strings.Join(options, " "))
|
|
||||||
_, err = f.Write([]byte(data))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: remove sysctl extraction related code here, instead we import from k8s directly.
|
|
||||||
|
|
||||||
const (
|
|
||||||
// SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
|
|
||||||
// container of a pod. The annotation value is a comma separated list of sysctl_name=value
|
|
||||||
// key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by
|
|
||||||
// the kubelet. Pods with other sysctls will fail to launch.
|
|
||||||
SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls"
|
|
||||||
|
|
||||||
// UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
|
|
||||||
// container of a pod. The annotation value is a comma separated list of sysctl_name=value
|
|
||||||
// key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly
|
|
||||||
// namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use
|
|
||||||
// is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet
|
|
||||||
// will fail to launch.
|
|
||||||
UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sysctl defines a kernel parameter to be set
|
|
||||||
type Sysctl struct {
|
|
||||||
// Name of a property to set
|
|
||||||
Name string `json:"name"`
|
|
||||||
// Value of a property to set
|
|
||||||
Value string `json:"value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls
|
|
||||||
// and a slice of unsafe Sysctls. This is only a convenience wrapper around
|
|
||||||
// SysctlsFromPodAnnotation.
|
|
||||||
func SysctlsFromPodAnnotations(a map[string]string) ([]Sysctl, []Sysctl, error) {
|
|
||||||
safe, err := SysctlsFromPodAnnotation(a[SysctlsPodAnnotationKey])
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
unsafe, err := SysctlsFromPodAnnotation(a[UnsafeSysctlsPodAnnotationKey])
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return safe, unsafe, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls.
|
|
||||||
func SysctlsFromPodAnnotation(annotation string) ([]Sysctl, error) {
|
|
||||||
if len(annotation) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
kvs := strings.Split(annotation, ",")
|
|
||||||
sysctls := make([]Sysctl, len(kvs))
|
|
||||||
for i, kv := range kvs {
|
|
||||||
cs := strings.Split(kv, "=")
|
|
||||||
if len(cs) != 2 || len(cs[0]) == 0 {
|
|
||||||
return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv)
|
|
||||||
}
|
|
||||||
sysctls[i].Name = cs[0]
|
|
||||||
sysctls[i].Value = cs[1]
|
|
||||||
}
|
|
||||||
return sysctls, nil
|
|
||||||
}
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
// Version returns the runtime name, runtime version and runtime API version
|
// Version returns the runtime name, runtime version and runtime API version
|
||||||
func (s *Server) Version(ctx context.Context, req *pb.VersionRequest) (*pb.VersionResponse, error) {
|
func (s *Server) Version(ctx context.Context, req *pb.VersionRequest) (*pb.VersionResponse, error) {
|
||||||
|
|
||||||
runtimeVersion, err := s.runtime.Version()
|
versionResp, err := s.manager.Version()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -18,12 +18,11 @@ func (s *Server) Version(ctx context.Context, req *pb.VersionRequest) (*pb.Versi
|
||||||
|
|
||||||
// taking const address
|
// taking const address
|
||||||
rav := runtimeAPIVersion
|
rav := runtimeAPIVersion
|
||||||
runtimeName := s.runtime.Name()
|
|
||||||
|
|
||||||
return &pb.VersionResponse{
|
return &pb.VersionResponse{
|
||||||
Version: &version,
|
Version: &version,
|
||||||
RuntimeName: &runtimeName,
|
RuntimeName: &versionResp.RuntimeName,
|
||||||
RuntimeVersion: &runtimeVersion,
|
RuntimeVersion: &versionResp.RuntimeVersion,
|
||||||
RuntimeApiVersion: &rav,
|
RuntimeApiVersion: &rav,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue