Integrate containers/storage
Use containers/storage to store images, pod sandboxes, and containers. A pod sandbox's infrastructure container has the same ID as the pod to which it belongs, and all containers also keep track of their pod's ID. The container configuration that we build using the data in a CreateContainerRequest is stored in the container's ContainerDirectory and ContainerRunDirectory. We catch SIGTERM and SIGINT, and when we receive either, we gracefully exit the grpc loop. If we also think that there aren't any container filesystems in use, we attempt to do a clean shutdown of the storage driver. The test harness now waits for ocid to exit before attempting to delete the storage root directory. Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
parent
caee4a99c9
commit
c0333b102b
29 changed files with 637 additions and 372 deletions
|
@ -3,7 +3,6 @@ package server
|
|||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/opencontainers/runc/libcontainer/selinux"
|
||||
|
@ -11,16 +10,16 @@ import (
|
|||
|
||||
// Default paths if none are specified
|
||||
const (
|
||||
ocidRoot = "/var/lib/ocid"
|
||||
conmonPath = "/usr/libexec/ocid/conmon"
|
||||
pausePath = "/usr/libexec/ocid/pause"
|
||||
seccompProfilePath = "/etc/ocid/seccomp.json"
|
||||
cniConfigDir = "/etc/cni/net.d/"
|
||||
cniBinDir = "/opt/cni/bin/"
|
||||
)
|
||||
|
||||
const (
|
||||
ocidRoot = "/var/lib/ocid"
|
||||
ocidRunRoot = "/var/run/containers"
|
||||
conmonPath = "/usr/libexec/ocid/conmon"
|
||||
pauseImage = "kubernetes/pause"
|
||||
pauseCommand = "/pause"
|
||||
defaultTransport = "docker://"
|
||||
seccompProfilePath = "/etc/ocid/seccomp.json"
|
||||
apparmorProfileName = "ocid-default"
|
||||
cniConfigDir = "/etc/cni/net.d/"
|
||||
cniBinDir = "/opt/cni/bin/"
|
||||
cgroupManager = "cgroupfs"
|
||||
)
|
||||
|
||||
|
@ -40,17 +39,20 @@ type Config struct {
|
|||
|
||||
// RootConfig represents the root of the "ocid" TOML config table.
|
||||
type RootConfig struct {
|
||||
// Root is a path to the "root directory" where all information not
|
||||
// Root is a path to the "root directory" where data not
|
||||
// explicitly handled by other options will be stored.
|
||||
Root string `toml:"root"`
|
||||
|
||||
// SandboxDir is the directory where ocid will store all of its sandbox
|
||||
// state and other information.
|
||||
SandboxDir string `toml:"sandbox_dir"`
|
||||
// RunRoot is a path to the "run directory" where state information not
|
||||
// explicitly handled by other options will be stored.
|
||||
RunRoot string `toml:"runroot"`
|
||||
|
||||
// ContainerDir is the directory where ocid will store all of its container
|
||||
// state and other information.
|
||||
ContainerDir string `toml:"container_dir"`
|
||||
// Storage is the name of the storage driver which handles actually
|
||||
// storing the contents of containers.
|
||||
Storage string `toml:"storage_driver"`
|
||||
|
||||
// StorageOption is a list of storage driver specific options.
|
||||
StorageOptions []string `toml:"storage_option"`
|
||||
|
||||
// LogDir is the default log directory were all logs will go unless kubelet
|
||||
// tells us to put them somewhere else.
|
||||
|
@ -98,17 +100,21 @@ type RuntimeConfig struct {
|
|||
|
||||
// ImageConfig represents the "ocid.image" TOML config table.
|
||||
type ImageConfig struct {
|
||||
// Pause is the path to the statically linked pause container binary, used
|
||||
// as the entrypoint for infra containers.
|
||||
//
|
||||
// TODO(cyphar): This should be replaced with a path to an OCI image
|
||||
// bundle, once the OCI image/storage code has been implemented.
|
||||
Pause string `toml:"pause"`
|
||||
|
||||
// ImageStore is the directory where the ocid image store will be stored.
|
||||
// TODO: This is currently not really used because we don't have
|
||||
// containers/storage integrated.
|
||||
ImageDir string `toml:"image_dir"`
|
||||
// DefaultTransport is a value we prefix to image names that fail to
|
||||
// validate source references.
|
||||
DefaultTransport string `toml:"default_transport"`
|
||||
// PauseImage is the name of an image which we use to instantiate infra
|
||||
// containers.
|
||||
PauseImage string `toml:"pause_image"`
|
||||
// PauseCommand is the path of the binary we run in an infra
|
||||
// container that's been instantiated using PauseImage.
|
||||
PauseCommand string `toml:"pause_command"`
|
||||
// SignaturePolicyPath is the name of the file which decides what sort
|
||||
// of policy we use when deciding whether or not to trust an image that
|
||||
// we've pulled. Outside of testing situations, it is strongly advised
|
||||
// that this be left unspecified so that the default system-wide policy
|
||||
// will be used.
|
||||
SignaturePolicyPath string `toml:"signature_policy"`
|
||||
}
|
||||
|
||||
// NetworkConfig represents the "ocid.network" TOML config table
|
||||
|
@ -191,10 +197,9 @@ func (c *Config) ToFile(path string) error {
|
|||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
RootConfig: RootConfig{
|
||||
Root: ocidRoot,
|
||||
SandboxDir: filepath.Join(ocidRoot, "sandboxes"),
|
||||
ContainerDir: filepath.Join(ocidRoot, "containers"),
|
||||
LogDir: "/var/log/ocid/pods",
|
||||
Root: ocidRoot,
|
||||
RunRoot: ocidRunRoot,
|
||||
LogDir: "/var/log/ocid/pods",
|
||||
},
|
||||
APIConfig: APIConfig{
|
||||
Listen: "/var/run/ocid.sock",
|
||||
|
@ -211,8 +216,10 @@ func DefaultConfig() *Config {
|
|||
CgroupManager: cgroupManager,
|
||||
},
|
||||
ImageConfig: ImageConfig{
|
||||
Pause: pausePath,
|
||||
ImageDir: filepath.Join(ocidRoot, "store"),
|
||||
DefaultTransport: defaultTransport,
|
||||
PauseImage: pauseImage,
|
||||
PauseCommand: pauseCommand,
|
||||
SignaturePolicyPath: "",
|
||||
},
|
||||
NetworkConfig: NetworkConfig{
|
||||
NetworkDir: cniConfigDir,
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
@ -14,7 +13,6 @@ import (
|
|||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/kubernetes-incubator/cri-o/server/apparmor"
|
||||
"github.com/kubernetes-incubator/cri-o/server/seccomp"
|
||||
"github.com/kubernetes-incubator/cri-o/utils"
|
||||
"github.com/opencontainers/runc/libcontainer/label"
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -30,6 +28,7 @@ const (
|
|||
// CreateContainer creates a new container in specified PodSandbox
|
||||
func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) {
|
||||
logrus.Debugf("CreateContainerRequest %+v", req)
|
||||
s.Update()
|
||||
sbID := req.GetPodSandboxId()
|
||||
if sbID == "" {
|
||||
return nil, fmt.Errorf("PodSandboxId should not be empty")
|
||||
|
@ -62,30 +61,24 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// containerDir is the dir for the container bundle.
|
||||
containerDir := filepath.Join(s.runtime.ContainerDir(), containerID)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.releaseContainerName(containerName)
|
||||
err1 := os.RemoveAll(containerDir)
|
||||
if err1 != nil {
|
||||
logrus.Warnf("Failed to cleanup container directory: %v", err1)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err = os.Stat(containerDir); err == nil {
|
||||
return nil, fmt.Errorf("container (%s) already exists", containerDir)
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(containerDir, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
container, err := s.createSandboxContainer(containerID, containerName, sb, containerDir, containerConfig)
|
||||
container, err := s.createSandboxContainer(ctx, containerID, containerName, sb, req.GetSandboxConfig(), containerConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err2 := s.storage.DeleteContainer(containerID)
|
||||
if err2 != nil {
|
||||
logrus.Warnf("Failed to cleanup container directory: %v", err2)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err = s.runtime.CreateContainer(container); err != nil {
|
||||
return nil, err
|
||||
|
@ -110,23 +103,21 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *Server) createSandboxContainer(containerID string, containerName string, sb *sandbox, containerDir string, containerConfig *pb.ContainerConfig) (*oci.Container, error) {
|
||||
func (s *Server) createSandboxContainer(ctx context.Context, containerID string, containerName string, sb *sandbox, SandboxConfig *pb.PodSandboxConfig, containerConfig *pb.ContainerConfig) (*oci.Container, error) {
|
||||
if sb == nil {
|
||||
return nil, errors.New("createSandboxContainer needs a sandbox")
|
||||
}
|
||||
|
||||
// TODO: factor generating/updating the spec into something other projects can vendor
|
||||
|
||||
// creates a spec Generator with the default spec.
|
||||
specgen := generate.New()
|
||||
|
||||
// by default, the root path is an empty string.
|
||||
// here set it to be "rootfs".
|
||||
specgen.SetRootPath("rootfs")
|
||||
|
||||
processArgs := []string{}
|
||||
commands := containerConfig.GetCommand()
|
||||
args := containerConfig.GetArgs()
|
||||
if commands == nil && args == nil {
|
||||
// TODO: override with image's config in #189
|
||||
processArgs = []string{"/bin/sh"}
|
||||
processArgs = nil
|
||||
}
|
||||
if commands != nil {
|
||||
processArgs = append(processArgs, commands...)
|
||||
|
@ -135,8 +126,6 @@ func (s *Server) createSandboxContainer(containerID string, containerName string
|
|||
processArgs = append(processArgs, args...)
|
||||
}
|
||||
|
||||
specgen.SetProcessArgs(processArgs)
|
||||
|
||||
cwd := containerConfig.GetWorkingDir()
|
||||
if cwd == "" {
|
||||
cwd = "/"
|
||||
|
@ -357,17 +346,46 @@ func (s *Server) createSandboxContainer(containerID string, containerName string
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err = specgen.SaveToFile(filepath.Join(containerDir, "config.json"), generate.ExportOptions{}); err != nil {
|
||||
metaname := metadata.GetName()
|
||||
attempt := metadata.GetAttempt()
|
||||
containerInfo, err := s.storage.CreateContainer(s.imageContext,
|
||||
sb.name, sb.id,
|
||||
image, "",
|
||||
containerName, containerID,
|
||||
metaname,
|
||||
attempt,
|
||||
sb.mountLabel,
|
||||
nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: copy the rootfs into the bundle.
|
||||
// Currently, utils.CreateFakeRootfs is used to populate the rootfs.
|
||||
if err = utils.CreateFakeRootfs(containerDir, image); err != nil {
|
||||
mountPoint, err := s.storage.StartContainer(containerID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to mount container %s(%s): %v", containerName, containerID, err)
|
||||
}
|
||||
|
||||
if processArgs == nil {
|
||||
if containerInfo.Config != nil && len(containerInfo.Config.Config.Cmd) > 0 {
|
||||
processArgs = containerInfo.Config.Config.Cmd
|
||||
} else {
|
||||
processArgs = []string{"/bin/sh"}
|
||||
}
|
||||
}
|
||||
specgen.SetProcessArgs(processArgs)
|
||||
|
||||
// by default, the root path is an empty string. set it now.
|
||||
specgen.SetRootPath(mountPoint)
|
||||
|
||||
saveOptions := generate.ExportOptions{}
|
||||
if err = specgen.SaveToFile(filepath.Join(containerInfo.Dir, "config.json"), saveOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = specgen.SaveToFile(filepath.Join(containerInfo.RunDir, "config.json"), saveOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
container, err := oci.NewContainer(containerID, containerName, containerDir, logPath, sb.netNs(), labels, annotations, imageSpec, metadata, sb.id, containerConfig.GetTty())
|
||||
container, err := oci.NewContainer(containerID, containerName, containerInfo.RunDir, logPath, sb.netNs(), labels, annotations, imageSpec, metadata, sb.id, containerConfig.GetTty())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ func filterContainer(c *pb.Container, filter *pb.ContainerFilter) bool {
|
|||
// ListContainers lists all containers by filters.
|
||||
func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) {
|
||||
logrus.Debugf("ListContainersRequest %+v", req)
|
||||
s.Update()
|
||||
var ctrs []*pb.Container
|
||||
filter := req.Filter
|
||||
ctrList := s.state.containers.List()
|
||||
|
|
|
@ -2,8 +2,6 @@ package server
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
|
@ -15,6 +13,7 @@ import (
|
|||
// should be force removed.
|
||||
func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) {
|
||||
logrus.Debugf("RemoveContainerRequest %+v", req)
|
||||
s.Update()
|
||||
c, err := s.getContainerFromRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -35,9 +34,12 @@ func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerReq
|
|||
return nil, fmt.Errorf("failed to delete container %s: %v", c.ID(), err)
|
||||
}
|
||||
|
||||
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID())
|
||||
if err := os.RemoveAll(containerDir); err != nil {
|
||||
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.ID(), err)
|
||||
if err := s.storage.StopContainer(c.ID()); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmount container %s: %v", c.ID(), err)
|
||||
}
|
||||
|
||||
if err := s.storage.DeleteContainer(c.ID()); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete storage for container %s: %v", c.ID(), err)
|
||||
}
|
||||
|
||||
s.releaseContainerName(c.Name())
|
||||
|
|
|
@ -11,12 +11,13 @@ import (
|
|||
// StartContainer starts the container.
|
||||
func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) {
|
||||
logrus.Debugf("StartContainerRequest %+v", req)
|
||||
s.Update()
|
||||
c, err := s.getContainerFromRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.runtime.StartContainer(c); err != nil {
|
||||
if err = s.runtime.StartContainer(c); err != nil {
|
||||
return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err)
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
// ContainerStatus returns status of the container.
|
||||
func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) {
|
||||
logrus.Debugf("ContainerStatusRequest %+v", req)
|
||||
s.Update()
|
||||
c, err := s.getContainerFromRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
// StopContainer stops a running container with a grace period (i.e., timeout).
|
||||
func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) {
|
||||
logrus.Debugf("StopContainerRequest %+v", req)
|
||||
s.Update()
|
||||
c, err := s.getContainerFromRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -8,9 +8,27 @@ import (
|
|||
|
||||
// ListImages lists existing images.
|
||||
func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) {
|
||||
logrus.Debugf("ListImages: %+v", req)
|
||||
// TODO
|
||||
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
||||
// and listing images.
|
||||
return &pb.ListImagesResponse{}, nil
|
||||
logrus.Debugf("ListImagesRequest: %+v", req)
|
||||
filter := ""
|
||||
reqFilter := req.GetFilter()
|
||||
if reqFilter != nil {
|
||||
filterImage := reqFilter.GetImage()
|
||||
if filterImage != nil {
|
||||
filter = filterImage.GetImage()
|
||||
}
|
||||
}
|
||||
results, err := s.images.ListImages(filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response := pb.ListImagesResponse{}
|
||||
for _, result := range results {
|
||||
response.Images = append(response.Images, &pb.Image{
|
||||
Id: sPtr(result.ID),
|
||||
RepoTags: result.Names,
|
||||
Size_: result.Size,
|
||||
})
|
||||
}
|
||||
logrus.Debugf("ListImagesResponse: %+v", response)
|
||||
return &response, nil
|
||||
}
|
||||
|
|
|
@ -1,86 +1,28 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/directory"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/copy"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// PullImage pulls a image with authentication config.
|
||||
func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) {
|
||||
logrus.Debugf("PullImage: %+v", req)
|
||||
img := req.GetImage().GetImage()
|
||||
if img == "" {
|
||||
return nil, errors.New("got empty imagespec name")
|
||||
}
|
||||
|
||||
// TODO(runcom): deal with AuthConfig in req.GetAuth()
|
||||
|
||||
// TODO(mrunalp,runcom): why do we need the SandboxConfig here?
|
||||
// how do we pull in a specified sandbox?
|
||||
tr, err := transports.ParseImageName(img)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
||||
src, err := tr.NewImageSource(nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i, err := image.FromSource(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobs := i.LayerInfos()
|
||||
config := i.ConfigInfo()
|
||||
if config.Digest != "" {
|
||||
blobs = append(blobs, config)
|
||||
}
|
||||
|
||||
if err = os.Mkdir(filepath.Join(s.config.ImageDir, tr.StringWithinTransport()), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dir, err := directory.NewReference(filepath.Join(s.config.ImageDir, tr.StringWithinTransport()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
||||
dest, err := dir.NewImageDestination(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// save blobs (layer + config for docker v2s2, layers only for docker v2s1 [the config is in the manifest])
|
||||
for _, b := range blobs {
|
||||
// TODO(runcom,nalin): we need do-then-commit to later purge on error
|
||||
var r io.ReadCloser
|
||||
r, _, err = src.GetBlob(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err = dest.PutBlob(r, b); err != nil {
|
||||
r.Close()
|
||||
return nil, err
|
||||
}
|
||||
r.Close()
|
||||
}
|
||||
// save manifest
|
||||
m, _, err := i.Manifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := dest.PutManifest(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("PullImageRequest: %+v", req)
|
||||
// TODO(runcom?): deal with AuthConfig in req.GetAuth()
|
||||
// TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://)
|
||||
|
||||
return &pb.PullImageResponse{}, nil
|
||||
image := ""
|
||||
img := req.GetImage()
|
||||
if img != nil {
|
||||
image = img.GetImage()
|
||||
}
|
||||
options := ©.Options{}
|
||||
_, err := s.images.PullImage(s.imageContext, image, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := &pb.PullImageResponse{}
|
||||
logrus.Debugf("PullImageResponse: %+v", resp)
|
||||
return resp, nil
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
|
@ -8,6 +10,20 @@ import (
|
|||
|
||||
// RemoveImage removes the image.
|
||||
func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) {
|
||||
logrus.Debugf("RemoveImage: %+v", req)
|
||||
return &pb.RemoveImageResponse{}, nil
|
||||
logrus.Debugf("RemoveImageRequest: %+v", req)
|
||||
image := ""
|
||||
img := req.GetImage()
|
||||
if img != nil {
|
||||
image = img.GetImage()
|
||||
}
|
||||
if image == "" {
|
||||
return nil, fmt.Errorf("no image specified")
|
||||
}
|
||||
err := s.images.RemoveImage(s.imageContext, image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := &pb.RemoveImageResponse{}
|
||||
logrus.Debugf("RemoveImageResponse: %+v", resp)
|
||||
return resp, nil
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
|
@ -8,9 +10,26 @@ import (
|
|||
|
||||
// ImageStatus returns the status of the image.
|
||||
func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) {
|
||||
logrus.Debugf("ImageStatus: %+v", req)
|
||||
// TODO
|
||||
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
||||
// and getting the image status
|
||||
return &pb.ImageStatusResponse{}, nil
|
||||
logrus.Debugf("ImageStatusRequest: %+v", req)
|
||||
image := ""
|
||||
img := req.GetImage()
|
||||
if img != nil {
|
||||
image = img.GetImage()
|
||||
}
|
||||
if image == "" {
|
||||
return nil, fmt.Errorf("no image specified")
|
||||
}
|
||||
status, err := s.images.ImageStatus(s.imageContext, image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := &pb.ImageStatusResponse{
|
||||
Image: &pb.Image{
|
||||
Id: &status.ID,
|
||||
RepoTags: status.Names,
|
||||
Size_: status.Size,
|
||||
},
|
||||
}
|
||||
logrus.Debugf("ImageStatusResponse: %+v", resp)
|
||||
return resp, nil
|
||||
}
|
||||
|
|
|
@ -145,6 +145,7 @@ const (
|
|||
podDefaultNamespace = "default"
|
||||
defaultShmSize = 64 * 1024 * 1024
|
||||
nsRunDir = "/var/run/netns"
|
||||
podInfraCommand = "/pause"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -277,7 +278,7 @@ func (s *Server) getPodSandboxFromRequest(req podSandboxRequest) (*sandbox, erro
|
|||
|
||||
sb := s.getSandbox(sandboxID)
|
||||
if sb == nil {
|
||||
return nil, fmt.Errorf("specified sandbox not found: %s", sandboxID)
|
||||
return nil, fmt.Errorf("specified pod sandbox not found: %s", sandboxID)
|
||||
}
|
||||
return sb, nil
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ func filterSandbox(p *pb.PodSandbox, filter *pb.PodSandboxFilter) bool {
|
|||
// ListPodSandbox returns a list of SandBoxes.
|
||||
func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) {
|
||||
logrus.Debugf("ListPodSandboxRequest %+v", req)
|
||||
s.Update()
|
||||
var pods []*pb.PodSandbox
|
||||
var podList []*sandbox
|
||||
for _, sb := range s.state.sandboxes {
|
||||
|
|
|
@ -2,8 +2,6 @@ package server
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
@ -17,6 +15,7 @@ import (
|
|||
// sandbox, they should be force deleted.
|
||||
func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) {
|
||||
logrus.Debugf("RemovePodSandboxRequest %+v", req)
|
||||
s.Update()
|
||||
sb, err := s.getPodSandboxFromRequest(req)
|
||||
if err != nil {
|
||||
if err == errSandboxIDEmpty {
|
||||
|
@ -46,16 +45,18 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
|
|||
}
|
||||
|
||||
if err := s.runtime.DeleteContainer(c); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete container %s in sandbox %s: %v", c.Name(), sb.id, err)
|
||||
return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
|
||||
}
|
||||
|
||||
if c == podInfraContainer {
|
||||
continue
|
||||
}
|
||||
|
||||
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID())
|
||||
if err := os.RemoveAll(containerDir); err != nil {
|
||||
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.Name(), err)
|
||||
if err := s.storage.StopContainer(c.ID()); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
|
||||
}
|
||||
if err := s.storage.DeleteContainer(c.ID()); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
|
||||
}
|
||||
|
||||
s.releaseContainerName(c.Name())
|
||||
|
@ -81,10 +82,13 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
|
|||
}
|
||||
|
||||
// Remove the files related to the sandbox
|
||||
podSandboxDir := filepath.Join(s.config.SandboxDir, sb.id)
|
||||
if err := os.RemoveAll(podSandboxDir); err != nil {
|
||||
return nil, fmt.Errorf("failed to remove sandbox %s directory: %v", sb.id, err)
|
||||
if err := s.storage.StopContainer(sb.id); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete sandbox container in pod sandbox %s: %v", sb.id, err)
|
||||
}
|
||||
if err := s.storage.RemovePodSandbox(sb.id); err != nil {
|
||||
return nil, fmt.Errorf("failed to remove pod sandbox %s: %v", sb.id, err)
|
||||
}
|
||||
|
||||
s.releaseContainerName(podInfraContainer.Name())
|
||||
s.removeContainer(podInfraContainer)
|
||||
sb.infraContainer = nil
|
||||
|
|
|
@ -9,8 +9,8 @@ import (
|
|||
"syscall"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/storage/storage"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/kubernetes-incubator/cri-o/utils"
|
||||
"github.com/opencontainers/runc/libcontainer/label"
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -54,6 +54,10 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, containerName, err := s.generateContainerIDandName(name, "infra", attempt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
@ -67,39 +71,51 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
|||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if err = s.podIDIndex.Delete(id); err != nil {
|
||||
if err2 := s.podIDIndex.Delete(id); err2 != nil {
|
||||
logrus.Warnf("couldn't delete pod id %s from idIndex", id)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
podSandboxDir := filepath.Join(s.config.SandboxDir, id)
|
||||
if _, err = os.Stat(podSandboxDir); err == nil {
|
||||
return nil, fmt.Errorf("pod sandbox (%s) already exists", podSandboxDir)
|
||||
podContainer, err := s.storage.CreatePodSandbox(s.imageContext,
|
||||
name, id,
|
||||
s.config.PauseImage, "",
|
||||
containerName,
|
||||
req.GetConfig().GetMetadata().GetName(),
|
||||
req.GetConfig().GetMetadata().GetUid(),
|
||||
namespace,
|
||||
attempt,
|
||||
nil)
|
||||
if err == storage.ErrDuplicateName {
|
||||
return nil, fmt.Errorf("pod sandbox with name %q already exists", name)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating pod sandbox with name %q: %v", name, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if err2 := os.RemoveAll(podSandboxDir); err2 != nil {
|
||||
logrus.Warnf("couldn't cleanup podSandboxDir %s: %v", podSandboxDir, err2)
|
||||
if err2 := s.storage.RemovePodSandbox(id); err2 != nil {
|
||||
logrus.Warnf("couldn't cleanup pod sandbox %q: %v", id, err2)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err = os.MkdirAll(podSandboxDir, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: factor generating/updating the spec into something other projects can vendor
|
||||
|
||||
// creates a spec Generator with the default spec.
|
||||
g := generate.New()
|
||||
|
||||
// TODO: Make the `graph/vfs` part of this configurable once the storage
|
||||
// integration has been merged.
|
||||
podInfraRootfs := filepath.Join(s.config.Root, "graph/vfs/pause")
|
||||
// setup defaults for the pod sandbox
|
||||
g.SetRootPath(filepath.Join(podInfraRootfs, "rootfs"))
|
||||
g.SetRootReadonly(true)
|
||||
g.SetProcessArgs([]string{"/pause"})
|
||||
if s.config.PauseCommand == "" {
|
||||
if podContainer.Config != nil {
|
||||
g.SetProcessArgs(podContainer.Config.Config.Cmd)
|
||||
} else {
|
||||
g.SetProcessArgs([]string{podInfraCommand})
|
||||
}
|
||||
} else {
|
||||
g.SetProcessArgs([]string{s.config.PauseCommand})
|
||||
}
|
||||
|
||||
// set hostname
|
||||
hostname := req.GetConfig().GetHostname()
|
||||
|
@ -117,7 +133,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
|||
dnsServers := req.GetConfig().GetDnsConfig().GetServers()
|
||||
dnsSearches := req.GetConfig().GetDnsConfig().GetSearches()
|
||||
dnsOptions := req.GetConfig().GetDnsConfig().GetOptions()
|
||||
resolvPath := fmt.Sprintf("%s/resolv.conf", podSandboxDir)
|
||||
resolvPath := fmt.Sprintf("%s/resolv.conf", podContainer.RunDir)
|
||||
err = parseDNSOptions(dnsServers, dnsSearches, dnsOptions, resolvPath)
|
||||
if err != nil {
|
||||
err1 := removeFile(resolvPath)
|
||||
|
@ -165,7 +181,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
|||
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
|
||||
shmPath = "/dev/shm"
|
||||
} else {
|
||||
shmPath, err = setupShm(podSandboxDir, mountLabel)
|
||||
shmPath, err = setupShm(podContainer.RunDir, mountLabel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -178,7 +194,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
|||
}()
|
||||
}
|
||||
|
||||
containerID, containerName, err := s.generateContainerIDandName(name, "infra", 0)
|
||||
err = s.setPodSandboxMountLabel(id, mountLabel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -189,14 +205,14 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
|||
}
|
||||
}()
|
||||
|
||||
if err = s.ctrIDIndex.Add(containerID); err != nil {
|
||||
if err = s.ctrIDIndex.Add(id); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if err = s.ctrIDIndex.Delete(containerID); err != nil {
|
||||
logrus.Warnf("couldn't delete ctr id %s from idIndex", containerID)
|
||||
if err2 := s.ctrIDIndex.Delete(id); err2 != nil {
|
||||
logrus.Warnf("couldn't delete ctr id %s from idIndex", id)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -207,8 +223,9 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
|||
g.AddAnnotation("ocid/log_path", logDir)
|
||||
g.AddAnnotation("ocid/name", name)
|
||||
g.AddAnnotation("ocid/container_type", containerTypeSandbox)
|
||||
g.AddAnnotation("ocid/sandbox_id", id)
|
||||
g.AddAnnotation("ocid/container_name", containerName)
|
||||
g.AddAnnotation("ocid/container_id", containerID)
|
||||
g.AddAnnotation("ocid/container_id", id)
|
||||
g.AddAnnotation("ocid/shm_path", shmPath)
|
||||
|
||||
sb := &sandbox{
|
||||
|
@ -246,11 +263,11 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
|||
cgroupParent := req.GetConfig().GetLinux().GetCgroupParent()
|
||||
if cgroupParent != "" {
|
||||
if s.config.CgroupManager == "systemd" {
|
||||
cgPath := sb.cgroupParent + ":" + "ocid" + ":" + containerID
|
||||
cgPath := sb.cgroupParent + ":" + "ocid" + ":" + id
|
||||
g.SetLinuxCgroupsPath(cgPath)
|
||||
|
||||
} else {
|
||||
g.SetLinuxCgroupsPath(sb.cgroupParent + "/" + containerID)
|
||||
g.SetLinuxCgroupsPath(sb.cgroupParent + "/" + id)
|
||||
|
||||
}
|
||||
sb.cgroupParent = cgroupParent
|
||||
|
@ -308,23 +325,21 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
|||
}
|
||||
}
|
||||
|
||||
err = g.SaveToFile(filepath.Join(podSandboxDir, "config.json"), generate.ExportOptions{})
|
||||
saveOptions := generate.ExportOptions{}
|
||||
mountPoint, err := s.storage.StartContainer(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to mount container %s in pod sandbox %s(%s): %v", containerName, sb.name, id, err)
|
||||
}
|
||||
g.SetRootPath(mountPoint)
|
||||
err = g.SaveToFile(filepath.Join(podContainer.Dir, "config.json"), saveOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save template configuration for pod sandbox %s(%s): %v", sb.name, id, err)
|
||||
}
|
||||
if err = g.SaveToFile(filepath.Join(podContainer.RunDir, "config.json"), saveOptions); err != nil {
|
||||
return nil, fmt.Errorf("failed to write runtime configuration for pod sandbox %s(%s): %v", sb.name, id, err)
|
||||
}
|
||||
|
||||
if _, err = os.Stat(podInfraRootfs); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// TODO: Replace by rootfs creation API when it is ready
|
||||
if err = utils.CreateInfraRootfs(podInfraRootfs, s.config.Pause); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
container, err := oci.NewContainer(containerID, containerName, podSandboxDir, podSandboxDir, sb.netNs(), labels, annotations, nil, nil, id, false)
|
||||
container, err := oci.NewContainer(id, containerName, podContainer.RunDir, logDir, sb.netNs(), labels, annotations, nil, nil, id, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -348,6 +363,19 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *Server) setPodSandboxMountLabel(id, mountLabel string) error {
|
||||
storageMetadata, err := s.storage.GetContainerMetadata(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
storageMetadata.SetMountLabel(mountLabel)
|
||||
err = s.storage.SetContainerMetadata(id, storageMetadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mountLabel string, err error) {
|
||||
processLabel = ""
|
||||
if selinuxOptions != nil {
|
||||
|
@ -375,8 +403,8 @@ func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mo
|
|||
return label.InitLabels(label.DupSecOpt(processLabel))
|
||||
}
|
||||
|
||||
func setupShm(podSandboxDir, mountLabel string) (shmPath string, err error) {
|
||||
shmPath = filepath.Join(podSandboxDir, "shm")
|
||||
func setupShm(podSandboxRunDir, mountLabel string) (shmPath string, err error) {
|
||||
shmPath = filepath.Join(podSandboxRunDir, "shm")
|
||||
if err = os.Mkdir(shmPath, 0700); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
// PodSandboxStatus returns the Status of the PodSandbox.
|
||||
func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) {
|
||||
logrus.Debugf("PodSandboxStatusRequest %+v", req)
|
||||
s.Update()
|
||||
sb, err := s.getPodSandboxFromRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
// sandbox, they should be force terminated.
|
||||
func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) {
|
||||
logrus.Debugf("StopPodSandboxRequest %+v", req)
|
||||
s.Update()
|
||||
sb, err := s.getPodSandboxFromRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -50,7 +51,7 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque
|
|||
cStatus := s.runtime.ContainerStatus(c)
|
||||
if cStatus.Status != oci.ContainerStateStopped {
|
||||
if err := s.runtime.StopContainer(c); err != nil {
|
||||
return nil, fmt.Errorf("failed to stop container %s in sandbox %s: %v", c.Name(), sb.id, err)
|
||||
return nil, fmt.Errorf("failed to stop container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
191
server/server.go
191
server/server.go
|
@ -5,14 +5,16 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/types"
|
||||
sstorage "github.com/containers/storage/storage"
|
||||
"github.com/docker/docker/pkg/registrar"
|
||||
"github.com/docker/docker/pkg/truncindex"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/kubernetes-incubator/cri-o/pkg/storage"
|
||||
"github.com/kubernetes-incubator/cri-o/server/apparmor"
|
||||
"github.com/kubernetes-incubator/cri-o/server/seccomp"
|
||||
"github.com/opencontainers/runc/libcontainer/label"
|
||||
|
@ -29,6 +31,9 @@ const (
|
|||
type Server struct {
|
||||
config Config
|
||||
runtime *oci.Runtime
|
||||
store sstorage.Store
|
||||
images storage.ImageServer
|
||||
storage storage.RuntimeServer
|
||||
stateLock sync.Mutex
|
||||
state *serverState
|
||||
netPlugin ocicni.CNIPlugin
|
||||
|
@ -36,6 +41,7 @@ type Server struct {
|
|||
podIDIndex *truncindex.TruncIndex
|
||||
ctrNameIndex *registrar.Registrar
|
||||
ctrIDIndex *truncindex.TruncIndex
|
||||
imageContext *types.SystemContext
|
||||
|
||||
seccompEnabled bool
|
||||
seccompProfile seccomp.Seccomp
|
||||
|
@ -45,7 +51,7 @@ type Server struct {
|
|||
}
|
||||
|
||||
func (s *Server) loadContainer(id string) error {
|
||||
config, err := ioutil.ReadFile(filepath.Join(s.runtime.ContainerDir(), id, "config.json"))
|
||||
config, err := s.store.GetFromContainerDirectory(id, "config.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -76,7 +82,10 @@ func (s *Server) loadContainer(id string) error {
|
|||
if v := m.Annotations["ocid/tty"]; v == "true" {
|
||||
tty = true
|
||||
}
|
||||
containerPath := filepath.Join(s.runtime.ContainerDir(), id)
|
||||
containerPath, err := s.store.GetContainerRunDirectory(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var img *pb.ImageSpec
|
||||
image, ok := m.Annotations["ocid/image"]
|
||||
|
@ -122,7 +131,7 @@ func configNetNsPath(spec rspec.Spec) (string, error) {
|
|||
}
|
||||
|
||||
func (s *Server) loadSandbox(id string) error {
|
||||
config, err := ioutil.ReadFile(filepath.Join(s.config.SandboxDir, id, "config.json"))
|
||||
config, err := s.store.GetFromContainerDirectory(id, "config.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -184,7 +193,10 @@ func (s *Server) loadSandbox(id string) error {
|
|||
|
||||
s.addSandbox(sb)
|
||||
|
||||
sandboxPath := filepath.Join(s.config.SandboxDir, id)
|
||||
sandboxPath, err := s.store.GetContainerRunDirectory(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = label.ReserveLabel(processLabel); err != nil {
|
||||
return err
|
||||
|
@ -200,7 +212,7 @@ func (s *Server) loadSandbox(id string) error {
|
|||
}
|
||||
sb.infraContainer = scontainer
|
||||
if err = s.runtime.UpdateStatus(scontainer); err != nil {
|
||||
logrus.Warnf("error updating status for container %s: %v", scontainer.ID(), err)
|
||||
logrus.Warnf("error updating status for pod sandbox infra container %s: %v", scontainer.ID(), err)
|
||||
}
|
||||
if err = s.ctrIDIndex.Add(scontainer.ID()); err != nil {
|
||||
return err
|
||||
|
@ -212,31 +224,138 @@ func (s *Server) loadSandbox(id string) error {
|
|||
}
|
||||
|
||||
func (s *Server) restore() {
|
||||
sandboxDir, err := ioutil.ReadDir(s.config.SandboxDir)
|
||||
containers, err := s.store.Containers()
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
logrus.Warnf("could not read sandbox directory %s: %v", sandboxDir, err)
|
||||
logrus.Warnf("could not read containers and sandboxes: %v", err)
|
||||
}
|
||||
for _, v := range sandboxDir {
|
||||
if !v.IsDir() {
|
||||
pods := map[string]*storage.RuntimeContainerMetadata{}
|
||||
podContainers := map[string]*storage.RuntimeContainerMetadata{}
|
||||
for _, container := range containers {
|
||||
metadata, err2 := s.storage.GetContainerMetadata(container.ID)
|
||||
if err2 != nil {
|
||||
logrus.Warnf("error parsing metadata for %s: %v, ignoring", container.ID, err2)
|
||||
continue
|
||||
}
|
||||
if err = s.loadSandbox(v.Name()); err != nil {
|
||||
logrus.Warnf("could not restore sandbox %s: %v", v.Name(), err)
|
||||
if metadata.Pod {
|
||||
pods[container.ID] = &metadata
|
||||
} else {
|
||||
podContainers[container.ID] = &metadata
|
||||
}
|
||||
}
|
||||
containerDir, err := ioutil.ReadDir(s.runtime.ContainerDir())
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
logrus.Warnf("could not read container directory %s: %v", s.runtime.ContainerDir(), err)
|
||||
}
|
||||
for _, v := range containerDir {
|
||||
if !v.IsDir() {
|
||||
continue
|
||||
for containerID, metadata := range pods {
|
||||
if err = s.loadSandbox(containerID); err != nil {
|
||||
logrus.Warnf("could not restore sandbox %s container %s: %v", metadata.PodID, containerID, err)
|
||||
}
|
||||
if err := s.loadContainer(v.Name()); err != nil {
|
||||
logrus.Warnf("could not restore container %s: %v", v.Name(), err)
|
||||
}
|
||||
for containerID := range podContainers {
|
||||
if err := s.loadContainer(containerID); err != nil {
|
||||
logrus.Warnf("could not restore container %s: %v", containerID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update makes changes to the server's state (lists of pods and containers) to
|
||||
// reflect the list of pods and containers that are stored on disk, possibly
|
||||
// having been modified by other parties
|
||||
func (s *Server) Update() {
|
||||
logrus.Debugf("updating sandbox and container information")
|
||||
if err := s.update(); err != nil {
|
||||
logrus.Errorf("error updating sandbox and container information: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) update() error {
|
||||
containers, err := s.store.Containers()
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
logrus.Warnf("could not read containers and sandboxes: %v", err)
|
||||
return err
|
||||
}
|
||||
newPods := map[string]*storage.RuntimeContainerMetadata{}
|
||||
oldPods := map[string]string{}
|
||||
removedPods := map[string]string{}
|
||||
newPodContainers := map[string]*storage.RuntimeContainerMetadata{}
|
||||
oldPodContainers := map[string]string{}
|
||||
removedPodContainers := map[string]string{}
|
||||
for _, container := range containers {
|
||||
if s.hasSandbox(container.ID) {
|
||||
// FIXME: do we need to reload/update any info about the sandbox?
|
||||
oldPods[container.ID] = container.ID
|
||||
oldPodContainers[container.ID] = container.ID
|
||||
continue
|
||||
}
|
||||
if s.getContainer(container.ID) != nil {
|
||||
// FIXME: do we need to reload/update any info about the container?
|
||||
oldPodContainers[container.ID] = container.ID
|
||||
continue
|
||||
}
|
||||
// not previously known, so figure out what it is
|
||||
metadata, err2 := s.storage.GetContainerMetadata(container.ID)
|
||||
if err2 != nil {
|
||||
logrus.Errorf("error parsing metadata for %s: %v, ignoring", container.ID, err2)
|
||||
continue
|
||||
}
|
||||
if metadata.Pod {
|
||||
newPods[container.ID] = &metadata
|
||||
} else {
|
||||
newPodContainers[container.ID] = &metadata
|
||||
}
|
||||
}
|
||||
s.ctrIDIndex.Iterate(func(id string) {
|
||||
if _, ok := oldPodContainers[id]; !ok {
|
||||
// this container's ID wasn't in the updated list -> removed
|
||||
removedPodContainers[id] = id
|
||||
}
|
||||
})
|
||||
for removedPodContainer := range removedPodContainers {
|
||||
// forget this container
|
||||
c := s.getContainer(removedPodContainer)
|
||||
s.releaseContainerName(c.Name())
|
||||
s.removeContainer(c)
|
||||
if err = s.ctrIDIndex.Delete(c.ID()); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("forgetting removed pod container %s", c.ID())
|
||||
}
|
||||
s.podIDIndex.Iterate(func(id string) {
|
||||
if _, ok := oldPods[id]; !ok {
|
||||
// this pod's ID wasn't in the updated list -> removed
|
||||
removedPods[id] = id
|
||||
}
|
||||
})
|
||||
for removedPod := range removedPods {
|
||||
// forget this pod
|
||||
sb := s.getSandbox(removedPod)
|
||||
podInfraContainer := sb.infraContainer
|
||||
s.releaseContainerName(podInfraContainer.Name())
|
||||
s.removeContainer(podInfraContainer)
|
||||
if err = s.ctrIDIndex.Delete(podInfraContainer.ID()); err != nil {
|
||||
return err
|
||||
}
|
||||
sb.infraContainer = nil
|
||||
s.releasePodName(sb.name)
|
||||
s.removeSandbox(sb.id)
|
||||
if err = s.podIDIndex.Delete(sb.id); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("forgetting removed pod %s", sb.id)
|
||||
}
|
||||
for sandboxID := range newPods {
|
||||
// load this pod
|
||||
if err = s.loadSandbox(sandboxID); err != nil {
|
||||
logrus.Warnf("could not load new pod sandbox %s: %v, ignoring", sandboxID, err)
|
||||
} else {
|
||||
logrus.Debugf("loaded new pod sandbox %s", sandboxID, err)
|
||||
}
|
||||
}
|
||||
for containerID := range newPodContainers {
|
||||
// load this container
|
||||
if err = s.loadContainer(containerID); err != nil {
|
||||
logrus.Warnf("could not load new sandbox container %s: %v, ignoring", containerID, err)
|
||||
} else {
|
||||
logrus.Debugf("loaded new pod container %s", containerID, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) reservePodName(id, name string) (string, error) {
|
||||
|
@ -294,17 +413,35 @@ func seccompEnabled() bool {
|
|||
return enabled
|
||||
}
|
||||
|
||||
// Shutdown attempts to shut down the server's storage cleanly
|
||||
func (s *Server) Shutdown() error {
|
||||
_, err := s.store.Shutdown(false)
|
||||
return err
|
||||
}
|
||||
|
||||
// New creates a new Server with options provided
|
||||
func New(config *Config) (*Server, error) {
|
||||
if err := os.MkdirAll(config.ImageDir, 0755); err != nil {
|
||||
store, err := sstorage.GetStore(sstorage.StoreOptions{
|
||||
RunRoot: config.RunRoot,
|
||||
GraphRoot: config.Root,
|
||||
GraphDriverName: config.Storage,
|
||||
GraphDriverOptions: config.StorageOptions,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(config.SandboxDir, 0755); err != nil {
|
||||
imageService, err := storage.GetImageService(store, config.DefaultTransport)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r, err := oci.New(config.Runtime, config.ContainerDir, config.Conmon, config.ConmonEnv, config.CgroupManager)
|
||||
storageRuntimeService := storage.GetRuntimeService(imageService)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r, err := oci.New(config.Runtime, config.Conmon, config.ConmonEnv, config.CgroupManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -316,6 +453,9 @@ func New(config *Config) (*Server, error) {
|
|||
}
|
||||
s := &Server{
|
||||
runtime: r,
|
||||
store: store,
|
||||
images: imageService,
|
||||
storage: storageRuntimeService,
|
||||
netPlugin: netPlugin,
|
||||
config: *config,
|
||||
state: &serverState{
|
||||
|
@ -346,6 +486,9 @@ func New(config *Config) (*Server, error) {
|
|||
s.podNameIndex = registrar.NewRegistrar()
|
||||
s.ctrIDIndex = truncindex.NewTruncIndex([]string{})
|
||||
s.ctrNameIndex = registrar.NewRegistrar()
|
||||
s.imageContext = &types.SystemContext{
|
||||
SignaturePolicyPath: config.ImageConfig.SignaturePolicyPath,
|
||||
}
|
||||
|
||||
s.restore()
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue