Integrate containers/storage

Use containers/storage to store images, pod sandboxes, and containers.
A pod sandbox's infrastructure container has the same ID as the pod to
which it belongs, and all containers also keep track of their pod's ID.

The container configuration that we build using the data in a
CreateContainerRequest is stored in the container's ContainerDirectory
and ContainerRunDirectory.

We catch SIGTERM and SIGINT, and when we receive either, we gracefully
exit the grpc loop.  If we also think that there aren't any container
filesystems in use, we attempt to do a clean shutdown of the storage
driver.

The test harness now waits for ocid to exit before attempting to delete
the storage root directory.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
Nalin Dahyabhai 2016-10-18 10:48:33 -04:00
parent caee4a99c9
commit c0333b102b
29 changed files with 637 additions and 372 deletions

View file

@ -9,6 +9,10 @@ sudo: required
services: services:
- docker - docker
before_install:
- sudo apt-get -qq update
- sudo apt-get -qq install btrfs-tools libdevmapper-dev libgpgme11-dev
install: install:
- make install.tools - make install.tools

View file

@ -23,6 +23,7 @@ RUN apt-get update && apt-get install -y \
btrfs-tools \ btrfs-tools \
libdevmapper1.02.1 \ libdevmapper1.02.1 \
libdevmapper-dev \ libdevmapper-dev \
libgpgme11-dev \
--no-install-recommends \ --no-install-recommends \
&& apt-get clean && apt-get clean
@ -52,6 +53,10 @@ RUN set -x \
&& cp runc /usr/local/bin/runc \ && cp runc /usr/local/bin/runc \
&& rm -rf "$GOPATH" && rm -rf "$GOPATH"
# Make sure we have some policy for pulling images
RUN mkdir -p /etc/containers
COPY test/policy.json /etc/containers/policy.json
WORKDIR /go/src/github.com/kubernetes-incubator/cri-o WORKDIR /go/src/github.com/kubernetes-incubator/cri-o
ADD . /go/src/github.com/kubernetes-incubator/cri-o ADD . /go/src/github.com/kubernetes-incubator/cri-o

View file

@ -42,9 +42,11 @@ It is currently in active development in the Kubernetes community through the [d
### Build ### Build
`glib2-devel` and `glibc-static` packages on Fedora or ` libglib2.0-dev` on Ubuntu or equivalent is required. `btrfs-progs-devel`, `device-mapper-devel`, `glib2-devel`, `glibc-devel`, `gpgme-devel`, `libassuan-devel`, `libgpg-error-devel`, and `pkg-config` packages on CentOS/Fedora or `btrfs-tools`, `libassuan-dev`, `libc6-dev`, `libdevmapper-dev`, `libglib2.0-dev`, `libgpg-error-dev`, `libgpgme11-dev`, and `pkg-config` on Ubuntu or equivalent is required.
In order to enable seccomp support you will need to install `libseccomp` on your platform. In order to enable seccomp support you will need to install development files for `libseccomp` on your platform.
> e.g. `libseccomp-devel` for CentOS/Fedora, or `libseccomp-dev` for Ubuntu > e.g. `libseccomp-devel` for CentOS/Fedora, or `libseccomp-dev` for Ubuntu
In order to enable apparmor support you will need to install development files for `libapparmor` on your platform.
> e.g. `libapparmor-dev` for Ubuntu
```bash ```bash
$ GOPATH=/path/to/gopath $ GOPATH=/path/to/gopath

View file

@ -12,17 +12,21 @@ var commentedConfigTemplate = template.Must(template.New("config").Parse(`
# The "ocid" table contains all of the server options. # The "ocid" table contains all of the server options.
[ocid] [ocid]
# root is a path to the "root directory". OCID stores all of its state # root is a path to the "root directory". OCID stores all of its data,
# data, including container images, in this directory. # including container images, in this directory.
root = "{{ .Root }}" root = "{{ .Root }}"
# sandbox_dir is the directory where ocid will store all of its sandbox # run is a path to the "run directory". OCID stores all of its state
# state and other information. # in this directory.
sandbox_dir = "{{ .SandboxDir }}" runroot = "{{ .RunRoot }}"
# container_dir is the directory where ocid will store all of its # storage_driver select which storage driver is used to manage storage
# container state and other information. # of images and containers.
container_dir = "{{ .ContainerDir }}" storage_driver = "{{ .Storage }}"
# storage_option is used to pass an option to the storage driver.
storage_option = [
{{ range $opt := .StorageOptions }}{{ printf "\t%q,\n" $opt }}{{ end }}]
# The "ocid.api" table contains settings for the kubelet/gRPC # The "ocid.api" table contains settings for the kubelet/gRPC
# interface (which is also used by ocic). # interface (which is also used by ocic).
@ -67,9 +71,23 @@ cgroup_manager = "{{ .CgroupManager }}"
# management of OCI images. # management of OCI images.
[ocid.image] [ocid.image]
# pause is the path to the statically linked pause container binary, used # default_transport is the prefix we try prepending to an image name if the
# as the entrypoint for infra containers. # image name as we receive it can't be parsed as a valid source reference
pause = "{{ .Pause }}" default_transport = "{{ .DefaultTransport }}"
# pause_image is the image which we use to instantiate infra containers.
pause_image = "{{ .PauseImage }}"
# pause_command is the command to run in a pause_image to have a container just
# sit there. If the image contains the necessary information, this value need
# not be specified.
pause_command = "{{ .PauseCommand }}"
# signature_policy is the name of the file which decides what sort of policy we
# use when deciding whether or not to trust an image that we've pulled.
# Outside of testing situations, it is strongly advised that this be left
# unspecified so that the default system-wide policy will be used.
signature_policy = "{{ .SignaturePolicyPath }}"
# The "ocid.network" table contains settings pertaining to the # The "ocid.network" table contains settings pertaining to the
# management of CNI plugins. # management of CNI plugins.

View file

@ -4,7 +4,10 @@ import (
"fmt" "fmt"
"net" "net"
"os" "os"
"os/signal"
"sort" "sort"
"strings"
"syscall"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/reexec"
@ -36,17 +39,29 @@ func mergeConfig(config *server.Config, ctx *cli.Context) error {
if ctx.GlobalIsSet("conmon") { if ctx.GlobalIsSet("conmon") {
config.Conmon = ctx.GlobalString("conmon") config.Conmon = ctx.GlobalString("conmon")
} }
if ctx.GlobalIsSet("containerdir") { if ctx.GlobalIsSet("pause-command") {
config.ContainerDir = ctx.GlobalString("containerdir") config.PauseCommand = ctx.GlobalString("pause-command")
} }
if ctx.GlobalIsSet("pause") { if ctx.GlobalIsSet("pause-image") {
config.Pause = ctx.GlobalString("pause") config.PauseImage = ctx.GlobalString("pause-image")
}
if ctx.GlobalIsSet("signature-policy") {
config.SignaturePolicyPath = ctx.GlobalString("signature-policy")
} }
if ctx.GlobalIsSet("root") { if ctx.GlobalIsSet("root") {
config.Root = ctx.GlobalString("root") config.Root = ctx.GlobalString("root")
} }
if ctx.GlobalIsSet("sandboxdir") { if ctx.GlobalIsSet("runroot") {
config.SandboxDir = ctx.GlobalString("sandboxdir") config.RunRoot = ctx.GlobalString("runroot")
}
if ctx.GlobalIsSet("storage-driver") {
config.Storage = ctx.GlobalString("storage-driver")
}
if ctx.GlobalIsSet("storage-option") {
config.StorageOptions = ctx.GlobalStringSlice("storage-option")
}
if ctx.GlobalIsSet("default-transport") {
config.DefaultTransport = ctx.GlobalString("default-transport")
} }
if ctx.GlobalIsSet("listen") { if ctx.GlobalIsSet("listen") {
config.Listen = ctx.GlobalString("listen") config.Listen = ctx.GlobalString("listen")
@ -75,6 +90,26 @@ func mergeConfig(config *server.Config, ctx *cli.Context) error {
return nil return nil
} }
func catchShutdown(gserver *grpc.Server, sserver *server.Server, signalled *bool) {
sig := make(chan os.Signal, 10)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
go func() {
for s := range sig {
switch s {
case syscall.SIGINT:
logrus.Debugf("Caught SIGINT")
case syscall.SIGTERM:
logrus.Debugf("Caught SIGTERM")
default:
continue
}
*signalled = true
gserver.GracefulStop()
return
}
}()
}
func main() { func main() {
if reexec.Init() { if reexec.Init() {
return return
@ -97,10 +132,6 @@ func main() {
Name: "conmon", Name: "conmon",
Usage: "path to the conmon executable", Usage: "path to the conmon executable",
}, },
cli.StringFlag{
Name: "containerdir",
Usage: "ocid container dir",
},
cli.BoolFlag{ cli.BoolFlag{
Name: "debug", Name: "debug",
Usage: "enable debug output for logging", Usage: "enable debug output for logging",
@ -120,20 +151,40 @@ func main() {
Usage: "set the format used by logs ('text' (default), or 'json')", Usage: "set the format used by logs ('text' (default), or 'json')",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "pause", Name: "pause-command",
Usage: "path to the pause executable", Usage: "name of the pause command in the pause image",
},
cli.StringFlag{
Name: "pause-image",
Usage: "name of the pause image",
},
cli.StringFlag{
Name: "signature-policy",
Usage: "path to signature policy file",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "root", Name: "root",
Usage: "ocid root dir", Usage: "ocid root dir",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "runtime", Name: "runroot",
Usage: "OCI runtime path", Usage: "ocid state dir",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "sandboxdir", Name: "storage-driver",
Usage: "ocid pod sandbox dir", Usage: "storage driver",
},
cli.StringSliceFlag{
Name: "storage-option",
Usage: "storage driver option",
},
cli.StringFlag{
Name: "default-transport",
Usage: "default transport",
},
cli.StringFlag{
Name: "runtime",
Usage: "OCI runtime path",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "seccomp-profile", Name: "seccomp-profile",
@ -236,13 +287,24 @@ func main() {
logrus.Fatal(err) logrus.Fatal(err)
} }
graceful := false
catchShutdown(s, service, &graceful)
runtime.RegisterRuntimeServiceServer(s, service) runtime.RegisterRuntimeServiceServer(s, service)
runtime.RegisterImageServiceServer(s, service) runtime.RegisterImageServiceServer(s, service)
// after the daemon is done setting up we can notify systemd api // after the daemon is done setting up we can notify systemd api
notifySystem() notifySystem()
if err := s.Serve(lis); err != nil { err = s.Serve(lis)
if graceful && strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") {
err = nil
}
if err2 := service.Shutdown(); err2 != nil {
logrus.Infof("error shutting down layer storage: %v", err2)
}
if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
return nil return nil

View file

@ -8,16 +8,20 @@ ocid - Enable OCI Kubernetes Container Runtime daemon
**ocid** **ocid**
[**--config**=[*value*]] [**--config**=[*value*]]
[**--conmon**=[*value*]] [**--conmon**=[*value*]]
[**--containerdir**=[*value*]]
[**--debug**] [**--debug**]
[**--default-transport**=[*value*]]
[**--help**|**-h**] [**--help**|**-h**]
[**--listen**=[*value*]] [**--listen**=[*value*]]
[**--log**=[*value*]] [**--log**=[*value*]]
[**--log-format value**] [**--log-format value**]
[**--pause**=[*value*]] [**--pause-command**=[*value*]]
[**--pause-image**=[*value*]]
[**--root**=[*value*]] [**--root**=[*value*]]
[**--runroot**=[*value*]]
[**--runtime**=[*value*]] [**--runtime**=[*value*]]
[**--sandboxdir**=[*value*]] [**--signature-policy**=[*value*]]
[**--storage-driver**=[*value*]]
[**--storage-option**=[*value*]]
[**--selinux**] [**--selinux**]
[**--seccomp-profile**=[*value*]] [**--seccomp-profile**=[*value*]]
[**--apparmor-profile**=[*value*]] [**--apparmor-profile**=[*value*]]
@ -43,18 +47,21 @@ ocid is meant to provide an integration path between OCI conformant runtimes and
# GLOBAL OPTIONS # GLOBAL OPTIONS
**--apparmor_profile**=""
Name of the apparmor profile to be used as the runtime's default (default: "ocid-default")
**--config**="" **--config**=""
path to configuration file path to configuration file
**--conmon**="" **--conmon**=""
path to the conmon executable (default: "/usr/libexec/ocid/conmon") path to the conmon executable (default: "/usr/libexec/ocid/conmon")
**--containerdir**=""
OCID container dir (default: "/var/lib/ocid/containers")
**--debug** **--debug**
Enable debug output for logging Enable debug output for logging
**--default-transport**
A prefix to prepend to image names that can't be pulled as-is.
**--help, -h** **--help, -h**
Print usage statement Print usage statement
@ -67,32 +74,41 @@ ocid is meant to provide an integration path between OCI conformant runtimes and
**--log-format**="" **--log-format**=""
Set the format used by logs ('text' (default), or 'json') (default: "text") Set the format used by logs ('text' (default), or 'json') (default: "text")
**--pause**="" **--pause-command**=""
Path to the pause executable (default: "/usr/libexec/ocid/pause") Path to the pause executable in the pause image (default: "/pause")
**--pause-image**=""
Image which contains the pause executable (default: "kubernetes/pause")
**--root**="" **--root**=""
OCID root dir (default: "/var/lib/ocid") OCID root dir (default: "/var/lib/containers")
**--runroot**=""
OCID state dir (default: "/var/run/containers")
**--runtime**="" **--runtime**=""
OCI runtime path (default: "/usr/bin/runc") OCI runtime path (default: "/usr/bin/runc")
**--sandboxdir**=""
OCID pod sandbox dir (default: "/var/lib/ocid/sandboxes")
**--selinux**=*true*|*false* **--selinux**=*true*|*false*
Enable selinux support (default: false) Enable selinux support (default: false)
**--seccomp_profile**="" **--seccomp-profile**=""
Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json") Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json")
**--apparmor_profile**="" **--signature-policy**=""
Name of the apparmor profile to be used as the runtime's default (default: "ocid-default") Path to the signature policy json file (default: "", to use the system-wide default)
**--storage-driver**
OCI storage driver (default: "devicemapper")
**--storage-option**
OCI storage driver option (no default)
**--cni-config-dir**="" **--cni-config-dir**=""
CNI configuration files directory (defautl: "/etc/cni/net.d/") CNI configuration files directory (default: "/etc/cni/net.d/")
**--cni-plugin-dir**="" **--cni-plugin-dir**=""
CNI plugin binaries directory (defautl: "/opt/cni/bin/") CNI plugin binaries directory (default: "/opt/cni/bin/")
**--version, -v** **--version, -v**
Print the version Print the version

View file

@ -29,15 +29,17 @@ No bare options are used. The format of TOML can be simplified to:
The `ocid` table supports the following options: The `ocid` table supports the following options:
**container_dir**=""
OCID container dir (default: "/var/lib/ocid/containers")
**root**="" **root**=""
OCID root dir (default: "/var/lib/ocid") OCID root dir (default: "/var/lib/containers")
**sandbox_dir**="" **runroot**=""
OCID pod sandbox dir (default: "/var/lib/ocid/sandboxes") OCID state dir (default: "/var/run/containers")
**storage_driver**=""
OCID storage driver (default is "devicemapper")
**storage_option**=[]
OCID storage driver option list (no default)
## OCID.API TABLE ## OCID.API TABLE
@ -58,6 +60,9 @@ The `ocid` table supports the following options:
**selinux**=*true*|*false* **selinux**=*true*|*false*
Enable selinux support (default: false) Enable selinux support (default: false)
**signature_policy**=""
Path to the signature policy json file (default: "", to use the system-wide default)
**seccomp_profile**="" **seccomp_profile**=""
Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json") Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json")
@ -66,8 +71,14 @@ The `ocid` table supports the following options:
## OCID.IMAGE TABLE ## OCID.IMAGE TABLE
**pause**="" **default_transport**
Path to the pause executable (default: "/usr/libexec/ocid/pause") A prefix to prepend to image names that can't be pulled as-is (default: "docker://")
**pause_command**=""
Path to the pause executable in the pause image (default: "/pause")
**pause_image**=""
Image which contains the pause executable (default: "kubernetes/pause")
## OCID.NETWORK TABLE ## OCID.NETWORK TABLE

View file

@ -34,11 +34,10 @@ const (
) )
// New creates a new Runtime with options provided // New creates a new Runtime with options provided
func New(runtimePath string, containerDir string, conmonPath string, conmonEnv []string, cgroupManager string) (*Runtime, error) { func New(runtimePath string, conmonPath string, conmonEnv []string, cgroupManager string) (*Runtime, error) {
r := &Runtime{ r := &Runtime{
name: filepath.Base(runtimePath), name: filepath.Base(runtimePath),
path: runtimePath, path: runtimePath,
containerDir: containerDir,
conmonPath: conmonPath, conmonPath: conmonPath,
conmonEnv: conmonEnv, conmonEnv: conmonEnv,
cgroupManager: cgroupManager, cgroupManager: cgroupManager,
@ -50,7 +49,6 @@ func New(runtimePath string, containerDir string, conmonPath string, conmonEnv [
type Runtime struct { type Runtime struct {
name string name string
path string path string
containerDir string
conmonPath string conmonPath string
conmonEnv []string conmonEnv []string
cgroupManager string cgroupManager string
@ -76,11 +74,6 @@ func (r *Runtime) Path() string {
return r.path return r.path
} }
// ContainerDir returns the path to the base directory for storing container configurations
func (r *Runtime) ContainerDir() string {
return r.containerDir
}
// Version returns the version of the OCI Runtime // Version returns the version of the OCI Runtime
func (r *Runtime) Version() (string, error) { func (r *Runtime) Version() (string, error) {
runtimeVersion, err := getOCIVersion(r.path, "-v") runtimeVersion, err := getOCIVersion(r.path, "-v")

View file

@ -3,7 +3,6 @@ package server
import ( import (
"bytes" "bytes"
"io/ioutil" "io/ioutil"
"path/filepath"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
"github.com/opencontainers/runc/libcontainer/selinux" "github.com/opencontainers/runc/libcontainer/selinux"
@ -11,16 +10,16 @@ import (
// Default paths if none are specified // Default paths if none are specified
const ( const (
ocidRoot = "/var/lib/ocid" ocidRoot = "/var/lib/ocid"
conmonPath = "/usr/libexec/ocid/conmon" ocidRunRoot = "/var/run/containers"
pausePath = "/usr/libexec/ocid/pause" conmonPath = "/usr/libexec/ocid/conmon"
seccompProfilePath = "/etc/ocid/seccomp.json" pauseImage = "kubernetes/pause"
cniConfigDir = "/etc/cni/net.d/" pauseCommand = "/pause"
cniBinDir = "/opt/cni/bin/" defaultTransport = "docker://"
) seccompProfilePath = "/etc/ocid/seccomp.json"
const (
apparmorProfileName = "ocid-default" apparmorProfileName = "ocid-default"
cniConfigDir = "/etc/cni/net.d/"
cniBinDir = "/opt/cni/bin/"
cgroupManager = "cgroupfs" cgroupManager = "cgroupfs"
) )
@ -40,17 +39,20 @@ type Config struct {
// RootConfig represents the root of the "ocid" TOML config table. // RootConfig represents the root of the "ocid" TOML config table.
type RootConfig struct { type RootConfig struct {
// Root is a path to the "root directory" where all information not // Root is a path to the "root directory" where data not
// explicitly handled by other options will be stored. // explicitly handled by other options will be stored.
Root string `toml:"root"` Root string `toml:"root"`
// SandboxDir is the directory where ocid will store all of its sandbox // RunRoot is a path to the "run directory" where state information not
// state and other information. // explicitly handled by other options will be stored.
SandboxDir string `toml:"sandbox_dir"` RunRoot string `toml:"runroot"`
// ContainerDir is the directory where ocid will store all of its container // Storage is the name of the storage driver which handles actually
// state and other information. // storing the contents of containers.
ContainerDir string `toml:"container_dir"` Storage string `toml:"storage_driver"`
// StorageOption is a list of storage driver specific options.
StorageOptions []string `toml:"storage_option"`
// LogDir is the default log directory were all logs will go unless kubelet // LogDir is the default log directory were all logs will go unless kubelet
// tells us to put them somewhere else. // tells us to put them somewhere else.
@ -98,17 +100,21 @@ type RuntimeConfig struct {
// ImageConfig represents the "ocid.image" TOML config table. // ImageConfig represents the "ocid.image" TOML config table.
type ImageConfig struct { type ImageConfig struct {
// Pause is the path to the statically linked pause container binary, used // DefaultTransport is a value we prefix to image names that fail to
// as the entrypoint for infra containers. // validate source references.
// DefaultTransport string `toml:"default_transport"`
// TODO(cyphar): This should be replaced with a path to an OCI image // PauseImage is the name of an image which we use to instantiate infra
// bundle, once the OCI image/storage code has been implemented. // containers.
Pause string `toml:"pause"` PauseImage string `toml:"pause_image"`
// PauseCommand is the path of the binary we run in an infra
// ImageStore is the directory where the ocid image store will be stored. // container that's been instantiated using PauseImage.
// TODO: This is currently not really used because we don't have PauseCommand string `toml:"pause_command"`
// containers/storage integrated. // SignaturePolicyPath is the name of the file which decides what sort
ImageDir string `toml:"image_dir"` // of policy we use when deciding whether or not to trust an image that
// we've pulled. Outside of testing situations, it is strongly advised
// that this be left unspecified so that the default system-wide policy
// will be used.
SignaturePolicyPath string `toml:"signature_policy"`
} }
// NetworkConfig represents the "ocid.network" TOML config table // NetworkConfig represents the "ocid.network" TOML config table
@ -191,10 +197,9 @@ func (c *Config) ToFile(path string) error {
func DefaultConfig() *Config { func DefaultConfig() *Config {
return &Config{ return &Config{
RootConfig: RootConfig{ RootConfig: RootConfig{
Root: ocidRoot, Root: ocidRoot,
SandboxDir: filepath.Join(ocidRoot, "sandboxes"), RunRoot: ocidRunRoot,
ContainerDir: filepath.Join(ocidRoot, "containers"), LogDir: "/var/log/ocid/pods",
LogDir: "/var/log/ocid/pods",
}, },
APIConfig: APIConfig{ APIConfig: APIConfig{
Listen: "/var/run/ocid.sock", Listen: "/var/run/ocid.sock",
@ -211,8 +216,10 @@ func DefaultConfig() *Config {
CgroupManager: cgroupManager, CgroupManager: cgroupManager,
}, },
ImageConfig: ImageConfig{ ImageConfig: ImageConfig{
Pause: pausePath, DefaultTransport: defaultTransport,
ImageDir: filepath.Join(ocidRoot, "store"), PauseImage: pauseImage,
PauseCommand: pauseCommand,
SignaturePolicyPath: "",
}, },
NetworkConfig: NetworkConfig{ NetworkConfig: NetworkConfig{
NetworkDir: cniConfigDir, NetworkDir: cniConfigDir,

View file

@ -4,7 +4,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"os"
"path/filepath" "path/filepath"
"strings" "strings"
"syscall" "syscall"
@ -14,7 +13,6 @@ import (
"github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/oci"
"github.com/kubernetes-incubator/cri-o/server/apparmor" "github.com/kubernetes-incubator/cri-o/server/apparmor"
"github.com/kubernetes-incubator/cri-o/server/seccomp" "github.com/kubernetes-incubator/cri-o/server/seccomp"
"github.com/kubernetes-incubator/cri-o/utils"
"github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runc/libcontainer/label"
"github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/runtime-tools/generate"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -30,6 +28,7 @@ const (
// CreateContainer creates a new container in specified PodSandbox // CreateContainer creates a new container in specified PodSandbox
func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) { func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) {
logrus.Debugf("CreateContainerRequest %+v", req) logrus.Debugf("CreateContainerRequest %+v", req)
s.Update()
sbID := req.GetPodSandboxId() sbID := req.GetPodSandboxId()
if sbID == "" { if sbID == "" {
return nil, fmt.Errorf("PodSandboxId should not be empty") return nil, fmt.Errorf("PodSandboxId should not be empty")
@ -62,30 +61,24 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq
return nil, err return nil, err
} }
// containerDir is the dir for the container bundle.
containerDir := filepath.Join(s.runtime.ContainerDir(), containerID)
defer func() { defer func() {
if err != nil { if err != nil {
s.releaseContainerName(containerName) s.releaseContainerName(containerName)
err1 := os.RemoveAll(containerDir)
if err1 != nil {
logrus.Warnf("Failed to cleanup container directory: %v", err1)
}
} }
}() }()
if _, err = os.Stat(containerDir); err == nil { container, err := s.createSandboxContainer(ctx, containerID, containerName, sb, req.GetSandboxConfig(), containerConfig)
return nil, fmt.Errorf("container (%s) already exists", containerDir)
}
if err = os.MkdirAll(containerDir, 0755); err != nil {
return nil, err
}
container, err := s.createSandboxContainer(containerID, containerName, sb, containerDir, containerConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer func() {
if err != nil {
err2 := s.storage.DeleteContainer(containerID)
if err2 != nil {
logrus.Warnf("Failed to cleanup container directory: %v", err2)
}
}
}()
if err = s.runtime.CreateContainer(container); err != nil { if err = s.runtime.CreateContainer(container); err != nil {
return nil, err return nil, err
@ -110,23 +103,21 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq
return resp, nil return resp, nil
} }
func (s *Server) createSandboxContainer(containerID string, containerName string, sb *sandbox, containerDir string, containerConfig *pb.ContainerConfig) (*oci.Container, error) { func (s *Server) createSandboxContainer(ctx context.Context, containerID string, containerName string, sb *sandbox, SandboxConfig *pb.PodSandboxConfig, containerConfig *pb.ContainerConfig) (*oci.Container, error) {
if sb == nil { if sb == nil {
return nil, errors.New("createSandboxContainer needs a sandbox") return nil, errors.New("createSandboxContainer needs a sandbox")
} }
// TODO: factor generating/updating the spec into something other projects can vendor
// creates a spec Generator with the default spec. // creates a spec Generator with the default spec.
specgen := generate.New() specgen := generate.New()
// by default, the root path is an empty string.
// here set it to be "rootfs".
specgen.SetRootPath("rootfs")
processArgs := []string{} processArgs := []string{}
commands := containerConfig.GetCommand() commands := containerConfig.GetCommand()
args := containerConfig.GetArgs() args := containerConfig.GetArgs()
if commands == nil && args == nil { if commands == nil && args == nil {
// TODO: override with image's config in #189 processArgs = nil
processArgs = []string{"/bin/sh"}
} }
if commands != nil { if commands != nil {
processArgs = append(processArgs, commands...) processArgs = append(processArgs, commands...)
@ -135,8 +126,6 @@ func (s *Server) createSandboxContainer(containerID string, containerName string
processArgs = append(processArgs, args...) processArgs = append(processArgs, args...)
} }
specgen.SetProcessArgs(processArgs)
cwd := containerConfig.GetWorkingDir() cwd := containerConfig.GetWorkingDir()
if cwd == "" { if cwd == "" {
cwd = "/" cwd = "/"
@ -357,17 +346,46 @@ func (s *Server) createSandboxContainer(containerID string, containerName string
return nil, err return nil, err
} }
if err = specgen.SaveToFile(filepath.Join(containerDir, "config.json"), generate.ExportOptions{}); err != nil { metaname := metadata.GetName()
attempt := metadata.GetAttempt()
containerInfo, err := s.storage.CreateContainer(s.imageContext,
sb.name, sb.id,
image, "",
containerName, containerID,
metaname,
attempt,
sb.mountLabel,
nil)
if err != nil {
return nil, err return nil, err
} }
// TODO: copy the rootfs into the bundle. mountPoint, err := s.storage.StartContainer(containerID)
// Currently, utils.CreateFakeRootfs is used to populate the rootfs. if err != nil {
if err = utils.CreateFakeRootfs(containerDir, image); err != nil { return nil, fmt.Errorf("failed to mount container %s(%s): %v", containerName, containerID, err)
}
if processArgs == nil {
if containerInfo.Config != nil && len(containerInfo.Config.Config.Cmd) > 0 {
processArgs = containerInfo.Config.Config.Cmd
} else {
processArgs = []string{"/bin/sh"}
}
}
specgen.SetProcessArgs(processArgs)
// by default, the root path is an empty string. set it now.
specgen.SetRootPath(mountPoint)
saveOptions := generate.ExportOptions{}
if err = specgen.SaveToFile(filepath.Join(containerInfo.Dir, "config.json"), saveOptions); err != nil {
return nil, err
}
if err = specgen.SaveToFile(filepath.Join(containerInfo.RunDir, "config.json"), saveOptions); err != nil {
return nil, err return nil, err
} }
container, err := oci.NewContainer(containerID, containerName, containerDir, logPath, sb.netNs(), labels, annotations, imageSpec, metadata, sb.id, containerConfig.GetTty()) container, err := oci.NewContainer(containerID, containerName, containerInfo.RunDir, logPath, sb.netNs(), labels, annotations, imageSpec, metadata, sb.id, containerConfig.GetTty())
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -29,6 +29,7 @@ func filterContainer(c *pb.Container, filter *pb.ContainerFilter) bool {
// ListContainers lists all containers by filters. // ListContainers lists all containers by filters.
func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) { func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) {
logrus.Debugf("ListContainersRequest %+v", req) logrus.Debugf("ListContainersRequest %+v", req)
s.Update()
var ctrs []*pb.Container var ctrs []*pb.Container
filter := req.Filter filter := req.Filter
ctrList := s.state.containers.List() ctrList := s.state.containers.List()

View file

@ -2,8 +2,6 @@ package server
import ( import (
"fmt" "fmt"
"os"
"path/filepath"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/oci"
@ -15,6 +13,7 @@ import (
// should be force removed. // should be force removed.
func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) { func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) {
logrus.Debugf("RemoveContainerRequest %+v", req) logrus.Debugf("RemoveContainerRequest %+v", req)
s.Update()
c, err := s.getContainerFromRequest(req) c, err := s.getContainerFromRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err
@ -35,9 +34,12 @@ func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerReq
return nil, fmt.Errorf("failed to delete container %s: %v", c.ID(), err) return nil, fmt.Errorf("failed to delete container %s: %v", c.ID(), err)
} }
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID()) if err := s.storage.StopContainer(c.ID()); err != nil {
if err := os.RemoveAll(containerDir); err != nil { return nil, fmt.Errorf("failed to unmount container %s: %v", c.ID(), err)
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.ID(), err) }
if err := s.storage.DeleteContainer(c.ID()); err != nil {
return nil, fmt.Errorf("failed to delete storage for container %s: %v", c.ID(), err)
} }
s.releaseContainerName(c.Name()) s.releaseContainerName(c.Name())

View file

@ -11,12 +11,13 @@ import (
// StartContainer starts the container. // StartContainer starts the container.
func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) { func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) {
logrus.Debugf("StartContainerRequest %+v", req) logrus.Debugf("StartContainerRequest %+v", req)
s.Update()
c, err := s.getContainerFromRequest(req) c, err := s.getContainerFromRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err := s.runtime.StartContainer(c); err != nil { if err = s.runtime.StartContainer(c); err != nil {
return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err) return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err)
} }

View file

@ -10,6 +10,7 @@ import (
// ContainerStatus returns status of the container. // ContainerStatus returns status of the container.
func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) { func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) {
logrus.Debugf("ContainerStatusRequest %+v", req) logrus.Debugf("ContainerStatusRequest %+v", req)
s.Update()
c, err := s.getContainerFromRequest(req) c, err := s.getContainerFromRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -12,6 +12,7 @@ import (
// StopContainer stops a running container with a grace period (i.e., timeout). // StopContainer stops a running container with a grace period (i.e., timeout).
func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) { func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) {
logrus.Debugf("StopContainerRequest %+v", req) logrus.Debugf("StopContainerRequest %+v", req)
s.Update()
c, err := s.getContainerFromRequest(req) c, err := s.getContainerFromRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -8,9 +8,27 @@ import (
// ListImages lists existing images. // ListImages lists existing images.
func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) { func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) {
logrus.Debugf("ListImages: %+v", req) logrus.Debugf("ListImagesRequest: %+v", req)
// TODO filter := ""
// containers/storage will take care of this by looking inside /var/lib/ocid/images reqFilter := req.GetFilter()
// and listing images. if reqFilter != nil {
return &pb.ListImagesResponse{}, nil filterImage := reqFilter.GetImage()
if filterImage != nil {
filter = filterImage.GetImage()
}
}
results, err := s.images.ListImages(filter)
if err != nil {
return nil, err
}
response := pb.ListImagesResponse{}
for _, result := range results {
response.Images = append(response.Images, &pb.Image{
Id: sPtr(result.ID),
RepoTags: result.Names,
Size_: result.Size,
})
}
logrus.Debugf("ListImagesResponse: %+v", response)
return &response, nil
} }

View file

@ -1,86 +1,28 @@
package server package server
import ( import (
"errors"
"io"
"os"
"path/filepath"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/directory" "github.com/containers/image/copy"
"github.com/containers/image/image"
"github.com/containers/image/transports"
"golang.org/x/net/context" "golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
// PullImage pulls a image with authentication config. // PullImage pulls a image with authentication config.
func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) { func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) {
logrus.Debugf("PullImage: %+v", req) logrus.Debugf("PullImageRequest: %+v", req)
img := req.GetImage().GetImage() // TODO(runcom?): deal with AuthConfig in req.GetAuth()
if img == "" {
return nil, errors.New("got empty imagespec name")
}
// TODO(runcom): deal with AuthConfig in req.GetAuth()
// TODO(mrunalp,runcom): why do we need the SandboxConfig here?
// how do we pull in a specified sandbox?
tr, err := transports.ParseImageName(img)
if err != nil {
return nil, err
}
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
src, err := tr.NewImageSource(nil, nil)
if err != nil {
return nil, err
}
i, err := image.FromSource(src)
if err != nil {
return nil, err
}
blobs := i.LayerInfos()
config := i.ConfigInfo()
if config.Digest != "" {
blobs = append(blobs, config)
}
if err = os.Mkdir(filepath.Join(s.config.ImageDir, tr.StringWithinTransport()), 0755); err != nil {
return nil, err
}
dir, err := directory.NewReference(filepath.Join(s.config.ImageDir, tr.StringWithinTransport()))
if err != nil {
return nil, err
}
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
dest, err := dir.NewImageDestination(nil)
if err != nil {
return nil, err
}
// save blobs (layer + config for docker v2s2, layers only for docker v2s1 [the config is in the manifest])
for _, b := range blobs {
// TODO(runcom,nalin): we need do-then-commit to later purge on error
var r io.ReadCloser
r, _, err = src.GetBlob(b)
if err != nil {
return nil, err
}
if _, err = dest.PutBlob(r, b); err != nil {
r.Close()
return nil, err
}
r.Close()
}
// save manifest
m, _, err := i.Manifest()
if err != nil {
return nil, err
}
if err := dest.PutManifest(m); err != nil {
return nil, err
}
// TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://) // TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://)
image := ""
return &pb.PullImageResponse{}, nil img := req.GetImage()
if img != nil {
image = img.GetImage()
}
options := &copy.Options{}
_, err := s.images.PullImage(s.imageContext, image, options)
if err != nil {
return nil, err
}
resp := &pb.PullImageResponse{}
logrus.Debugf("PullImageResponse: %+v", resp)
return resp, nil
} }

View file

@ -1,6 +1,8 @@
package server package server
import ( import (
"fmt"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
@ -8,6 +10,20 @@ import (
// RemoveImage removes the image. // RemoveImage removes the image.
func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) { func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) {
logrus.Debugf("RemoveImage: %+v", req) logrus.Debugf("RemoveImageRequest: %+v", req)
return &pb.RemoveImageResponse{}, nil image := ""
img := req.GetImage()
if img != nil {
image = img.GetImage()
}
if image == "" {
return nil, fmt.Errorf("no image specified")
}
err := s.images.RemoveImage(s.imageContext, image)
if err != nil {
return nil, err
}
resp := &pb.RemoveImageResponse{}
logrus.Debugf("RemoveImageResponse: %+v", resp)
return resp, nil
} }

View file

@ -1,6 +1,8 @@
package server package server
import ( import (
"fmt"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
@ -8,9 +10,26 @@ import (
// ImageStatus returns the status of the image. // ImageStatus returns the status of the image.
func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) { func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) {
logrus.Debugf("ImageStatus: %+v", req) logrus.Debugf("ImageStatusRequest: %+v", req)
// TODO image := ""
// containers/storage will take care of this by looking inside /var/lib/ocid/images img := req.GetImage()
// and getting the image status if img != nil {
return &pb.ImageStatusResponse{}, nil image = img.GetImage()
}
if image == "" {
return nil, fmt.Errorf("no image specified")
}
status, err := s.images.ImageStatus(s.imageContext, image)
if err != nil {
return nil, err
}
resp := &pb.ImageStatusResponse{
Image: &pb.Image{
Id: &status.ID,
RepoTags: status.Names,
Size_: status.Size,
},
}
logrus.Debugf("ImageStatusResponse: %+v", resp)
return resp, nil
} }

View file

@ -145,6 +145,7 @@ const (
podDefaultNamespace = "default" podDefaultNamespace = "default"
defaultShmSize = 64 * 1024 * 1024 defaultShmSize = 64 * 1024 * 1024
nsRunDir = "/var/run/netns" nsRunDir = "/var/run/netns"
podInfraCommand = "/pause"
) )
var ( var (
@ -277,7 +278,7 @@ func (s *Server) getPodSandboxFromRequest(req podSandboxRequest) (*sandbox, erro
sb := s.getSandbox(sandboxID) sb := s.getSandbox(sandboxID)
if sb == nil { if sb == nil {
return nil, fmt.Errorf("specified sandbox not found: %s", sandboxID) return nil, fmt.Errorf("specified pod sandbox not found: %s", sandboxID)
} }
return sb, nil return sb, nil
} }

View file

@ -29,6 +29,7 @@ func filterSandbox(p *pb.PodSandbox, filter *pb.PodSandboxFilter) bool {
// ListPodSandbox returns a list of SandBoxes. // ListPodSandbox returns a list of SandBoxes.
func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) { func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) {
logrus.Debugf("ListPodSandboxRequest %+v", req) logrus.Debugf("ListPodSandboxRequest %+v", req)
s.Update()
var pods []*pb.PodSandbox var pods []*pb.PodSandbox
var podList []*sandbox var podList []*sandbox
for _, sb := range s.state.sandboxes { for _, sb := range s.state.sandboxes {

View file

@ -2,8 +2,6 @@ package server
import ( import (
"fmt" "fmt"
"os"
"path/filepath"
"syscall" "syscall"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
@ -17,6 +15,7 @@ import (
// sandbox, they should be force deleted. // sandbox, they should be force deleted.
func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) { func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) {
logrus.Debugf("RemovePodSandboxRequest %+v", req) logrus.Debugf("RemovePodSandboxRequest %+v", req)
s.Update()
sb, err := s.getPodSandboxFromRequest(req) sb, err := s.getPodSandboxFromRequest(req)
if err != nil { if err != nil {
if err == errSandboxIDEmpty { if err == errSandboxIDEmpty {
@ -46,16 +45,18 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
} }
if err := s.runtime.DeleteContainer(c); err != nil { if err := s.runtime.DeleteContainer(c); err != nil {
return nil, fmt.Errorf("failed to delete container %s in sandbox %s: %v", c.Name(), sb.id, err) return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
} }
if c == podInfraContainer { if c == podInfraContainer {
continue continue
} }
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID()) if err := s.storage.StopContainer(c.ID()); err != nil {
if err := os.RemoveAll(containerDir); err != nil { return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.Name(), err) }
if err := s.storage.DeleteContainer(c.ID()); err != nil {
return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
} }
s.releaseContainerName(c.Name()) s.releaseContainerName(c.Name())
@ -81,10 +82,13 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
} }
// Remove the files related to the sandbox // Remove the files related to the sandbox
podSandboxDir := filepath.Join(s.config.SandboxDir, sb.id) if err := s.storage.StopContainer(sb.id); err != nil {
if err := os.RemoveAll(podSandboxDir); err != nil { return nil, fmt.Errorf("failed to delete sandbox container in pod sandbox %s: %v", sb.id, err)
return nil, fmt.Errorf("failed to remove sandbox %s directory: %v", sb.id, err)
} }
if err := s.storage.RemovePodSandbox(sb.id); err != nil {
return nil, fmt.Errorf("failed to remove pod sandbox %s: %v", sb.id, err)
}
s.releaseContainerName(podInfraContainer.Name()) s.releaseContainerName(podInfraContainer.Name())
s.removeContainer(podInfraContainer) s.removeContainer(podInfraContainer)
sb.infraContainer = nil sb.infraContainer = nil

View file

@ -9,8 +9,8 @@ import (
"syscall" "syscall"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/storage/storage"
"github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/oci"
"github.com/kubernetes-incubator/cri-o/utils"
"github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runc/libcontainer/label"
"github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/runtime-tools/generate"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -54,6 +54,10 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
if err != nil { if err != nil {
return nil, err return nil, err
} }
_, containerName, err := s.generateContainerIDandName(name, "infra", attempt)
if err != nil {
return nil, err
}
defer func() { defer func() {
if err != nil { if err != nil {
@ -67,39 +71,51 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
defer func() { defer func() {
if err != nil { if err != nil {
if err = s.podIDIndex.Delete(id); err != nil { if err2 := s.podIDIndex.Delete(id); err2 != nil {
logrus.Warnf("couldn't delete pod id %s from idIndex", id) logrus.Warnf("couldn't delete pod id %s from idIndex", id)
} }
} }
}() }()
podSandboxDir := filepath.Join(s.config.SandboxDir, id) podContainer, err := s.storage.CreatePodSandbox(s.imageContext,
if _, err = os.Stat(podSandboxDir); err == nil { name, id,
return nil, fmt.Errorf("pod sandbox (%s) already exists", podSandboxDir) s.config.PauseImage, "",
containerName,
req.GetConfig().GetMetadata().GetName(),
req.GetConfig().GetMetadata().GetUid(),
namespace,
attempt,
nil)
if err == storage.ErrDuplicateName {
return nil, fmt.Errorf("pod sandbox with name %q already exists", name)
}
if err != nil {
return nil, fmt.Errorf("error creating pod sandbox with name %q: %v", name, err)
} }
defer func() { defer func() {
if err != nil { if err != nil {
if err2 := os.RemoveAll(podSandboxDir); err2 != nil { if err2 := s.storage.RemovePodSandbox(id); err2 != nil {
logrus.Warnf("couldn't cleanup podSandboxDir %s: %v", podSandboxDir, err2) logrus.Warnf("couldn't cleanup pod sandbox %q: %v", id, err2)
} }
} }
}() }()
if err = os.MkdirAll(podSandboxDir, 0755); err != nil { // TODO: factor generating/updating the spec into something other projects can vendor
return nil, err
}
// creates a spec Generator with the default spec. // creates a spec Generator with the default spec.
g := generate.New() g := generate.New()
// TODO: Make the `graph/vfs` part of this configurable once the storage
// integration has been merged.
podInfraRootfs := filepath.Join(s.config.Root, "graph/vfs/pause")
// setup defaults for the pod sandbox // setup defaults for the pod sandbox
g.SetRootPath(filepath.Join(podInfraRootfs, "rootfs"))
g.SetRootReadonly(true) g.SetRootReadonly(true)
g.SetProcessArgs([]string{"/pause"}) if s.config.PauseCommand == "" {
if podContainer.Config != nil {
g.SetProcessArgs(podContainer.Config.Config.Cmd)
} else {
g.SetProcessArgs([]string{podInfraCommand})
}
} else {
g.SetProcessArgs([]string{s.config.PauseCommand})
}
// set hostname // set hostname
hostname := req.GetConfig().GetHostname() hostname := req.GetConfig().GetHostname()
@ -117,7 +133,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
dnsServers := req.GetConfig().GetDnsConfig().GetServers() dnsServers := req.GetConfig().GetDnsConfig().GetServers()
dnsSearches := req.GetConfig().GetDnsConfig().GetSearches() dnsSearches := req.GetConfig().GetDnsConfig().GetSearches()
dnsOptions := req.GetConfig().GetDnsConfig().GetOptions() dnsOptions := req.GetConfig().GetDnsConfig().GetOptions()
resolvPath := fmt.Sprintf("%s/resolv.conf", podSandboxDir) resolvPath := fmt.Sprintf("%s/resolv.conf", podContainer.RunDir)
err = parseDNSOptions(dnsServers, dnsSearches, dnsOptions, resolvPath) err = parseDNSOptions(dnsServers, dnsSearches, dnsOptions, resolvPath)
if err != nil { if err != nil {
err1 := removeFile(resolvPath) err1 := removeFile(resolvPath)
@ -165,7 +181,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() { if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
shmPath = "/dev/shm" shmPath = "/dev/shm"
} else { } else {
shmPath, err = setupShm(podSandboxDir, mountLabel) shmPath, err = setupShm(podContainer.RunDir, mountLabel)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -178,7 +194,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
}() }()
} }
containerID, containerName, err := s.generateContainerIDandName(name, "infra", 0) err = s.setPodSandboxMountLabel(id, mountLabel)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -189,14 +205,14 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
} }
}() }()
if err = s.ctrIDIndex.Add(containerID); err != nil { if err = s.ctrIDIndex.Add(id); err != nil {
return nil, err return nil, err
} }
defer func() { defer func() {
if err != nil { if err != nil {
if err = s.ctrIDIndex.Delete(containerID); err != nil { if err2 := s.ctrIDIndex.Delete(id); err2 != nil {
logrus.Warnf("couldn't delete ctr id %s from idIndex", containerID) logrus.Warnf("couldn't delete ctr id %s from idIndex", id)
} }
} }
}() }()
@ -207,8 +223,9 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
g.AddAnnotation("ocid/log_path", logDir) g.AddAnnotation("ocid/log_path", logDir)
g.AddAnnotation("ocid/name", name) g.AddAnnotation("ocid/name", name)
g.AddAnnotation("ocid/container_type", containerTypeSandbox) g.AddAnnotation("ocid/container_type", containerTypeSandbox)
g.AddAnnotation("ocid/sandbox_id", id)
g.AddAnnotation("ocid/container_name", containerName) g.AddAnnotation("ocid/container_name", containerName)
g.AddAnnotation("ocid/container_id", containerID) g.AddAnnotation("ocid/container_id", id)
g.AddAnnotation("ocid/shm_path", shmPath) g.AddAnnotation("ocid/shm_path", shmPath)
sb := &sandbox{ sb := &sandbox{
@ -246,11 +263,11 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
cgroupParent := req.GetConfig().GetLinux().GetCgroupParent() cgroupParent := req.GetConfig().GetLinux().GetCgroupParent()
if cgroupParent != "" { if cgroupParent != "" {
if s.config.CgroupManager == "systemd" { if s.config.CgroupManager == "systemd" {
cgPath := sb.cgroupParent + ":" + "ocid" + ":" + containerID cgPath := sb.cgroupParent + ":" + "ocid" + ":" + id
g.SetLinuxCgroupsPath(cgPath) g.SetLinuxCgroupsPath(cgPath)
} else { } else {
g.SetLinuxCgroupsPath(sb.cgroupParent + "/" + containerID) g.SetLinuxCgroupsPath(sb.cgroupParent + "/" + id)
} }
sb.cgroupParent = cgroupParent sb.cgroupParent = cgroupParent
@ -308,23 +325,21 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
} }
} }
err = g.SaveToFile(filepath.Join(podSandboxDir, "config.json"), generate.ExportOptions{}) saveOptions := generate.ExportOptions{}
mountPoint, err := s.storage.StartContainer(id)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("failed to mount container %s in pod sandbox %s(%s): %v", containerName, sb.name, id, err)
}
g.SetRootPath(mountPoint)
err = g.SaveToFile(filepath.Join(podContainer.Dir, "config.json"), saveOptions)
if err != nil {
return nil, fmt.Errorf("failed to save template configuration for pod sandbox %s(%s): %v", sb.name, id, err)
}
if err = g.SaveToFile(filepath.Join(podContainer.RunDir, "config.json"), saveOptions); err != nil {
return nil, fmt.Errorf("failed to write runtime configuration for pod sandbox %s(%s): %v", sb.name, id, err)
} }
if _, err = os.Stat(podInfraRootfs); err != nil { container, err := oci.NewContainer(id, containerName, podContainer.RunDir, logDir, sb.netNs(), labels, annotations, nil, nil, id, false)
if os.IsNotExist(err) {
// TODO: Replace by rootfs creation API when it is ready
if err = utils.CreateInfraRootfs(podInfraRootfs, s.config.Pause); err != nil {
return nil, err
}
} else {
return nil, err
}
}
container, err := oci.NewContainer(containerID, containerName, podSandboxDir, podSandboxDir, sb.netNs(), labels, annotations, nil, nil, id, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -348,6 +363,19 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
return resp, nil return resp, nil
} }
func (s *Server) setPodSandboxMountLabel(id, mountLabel string) error {
storageMetadata, err := s.storage.GetContainerMetadata(id)
if err != nil {
return err
}
storageMetadata.SetMountLabel(mountLabel)
err = s.storage.SetContainerMetadata(id, storageMetadata)
if err != nil {
return err
}
return nil
}
func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mountLabel string, err error) { func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mountLabel string, err error) {
processLabel = "" processLabel = ""
if selinuxOptions != nil { if selinuxOptions != nil {
@ -375,8 +403,8 @@ func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mo
return label.InitLabels(label.DupSecOpt(processLabel)) return label.InitLabels(label.DupSecOpt(processLabel))
} }
func setupShm(podSandboxDir, mountLabel string) (shmPath string, err error) { func setupShm(podSandboxRunDir, mountLabel string) (shmPath string, err error) {
shmPath = filepath.Join(podSandboxDir, "shm") shmPath = filepath.Join(podSandboxRunDir, "shm")
if err = os.Mkdir(shmPath, 0700); err != nil { if err = os.Mkdir(shmPath, 0700); err != nil {
return "", err return "", err
} }

View file

@ -10,6 +10,7 @@ import (
// PodSandboxStatus returns the Status of the PodSandbox. // PodSandboxStatus returns the Status of the PodSandbox.
func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) { func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) {
logrus.Debugf("PodSandboxStatusRequest %+v", req) logrus.Debugf("PodSandboxStatusRequest %+v", req)
s.Update()
sb, err := s.getPodSandboxFromRequest(req) sb, err := s.getPodSandboxFromRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -14,6 +14,7 @@ import (
// sandbox, they should be force terminated. // sandbox, they should be force terminated.
func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) { func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) {
logrus.Debugf("StopPodSandboxRequest %+v", req) logrus.Debugf("StopPodSandboxRequest %+v", req)
s.Update()
sb, err := s.getPodSandboxFromRequest(req) sb, err := s.getPodSandboxFromRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err
@ -50,7 +51,7 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque
cStatus := s.runtime.ContainerStatus(c) cStatus := s.runtime.ContainerStatus(c)
if cStatus.Status != oci.ContainerStateStopped { if cStatus.Status != oci.ContainerStateStopped {
if err := s.runtime.StopContainer(c); err != nil { if err := s.runtime.StopContainer(c); err != nil {
return nil, fmt.Errorf("failed to stop container %s in sandbox %s: %v", c.Name(), sb.id, err) return nil, fmt.Errorf("failed to stop container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
} }
} }
} }

View file

@ -5,14 +5,16 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath"
"sync" "sync"
"syscall" "syscall"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/types"
sstorage "github.com/containers/storage/storage"
"github.com/docker/docker/pkg/registrar" "github.com/docker/docker/pkg/registrar"
"github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/pkg/truncindex"
"github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/oci"
"github.com/kubernetes-incubator/cri-o/pkg/storage"
"github.com/kubernetes-incubator/cri-o/server/apparmor" "github.com/kubernetes-incubator/cri-o/server/apparmor"
"github.com/kubernetes-incubator/cri-o/server/seccomp" "github.com/kubernetes-incubator/cri-o/server/seccomp"
"github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runc/libcontainer/label"
@ -29,6 +31,9 @@ const (
type Server struct { type Server struct {
config Config config Config
runtime *oci.Runtime runtime *oci.Runtime
store sstorage.Store
images storage.ImageServer
storage storage.RuntimeServer
stateLock sync.Mutex stateLock sync.Mutex
state *serverState state *serverState
netPlugin ocicni.CNIPlugin netPlugin ocicni.CNIPlugin
@ -36,6 +41,7 @@ type Server struct {
podIDIndex *truncindex.TruncIndex podIDIndex *truncindex.TruncIndex
ctrNameIndex *registrar.Registrar ctrNameIndex *registrar.Registrar
ctrIDIndex *truncindex.TruncIndex ctrIDIndex *truncindex.TruncIndex
imageContext *types.SystemContext
seccompEnabled bool seccompEnabled bool
seccompProfile seccomp.Seccomp seccompProfile seccomp.Seccomp
@ -45,7 +51,7 @@ type Server struct {
} }
func (s *Server) loadContainer(id string) error { func (s *Server) loadContainer(id string) error {
config, err := ioutil.ReadFile(filepath.Join(s.runtime.ContainerDir(), id, "config.json")) config, err := s.store.GetFromContainerDirectory(id, "config.json")
if err != nil { if err != nil {
return err return err
} }
@ -76,7 +82,10 @@ func (s *Server) loadContainer(id string) error {
if v := m.Annotations["ocid/tty"]; v == "true" { if v := m.Annotations["ocid/tty"]; v == "true" {
tty = true tty = true
} }
containerPath := filepath.Join(s.runtime.ContainerDir(), id) containerPath, err := s.store.GetContainerRunDirectory(id)
if err != nil {
return err
}
var img *pb.ImageSpec var img *pb.ImageSpec
image, ok := m.Annotations["ocid/image"] image, ok := m.Annotations["ocid/image"]
@ -122,7 +131,7 @@ func configNetNsPath(spec rspec.Spec) (string, error) {
} }
func (s *Server) loadSandbox(id string) error { func (s *Server) loadSandbox(id string) error {
config, err := ioutil.ReadFile(filepath.Join(s.config.SandboxDir, id, "config.json")) config, err := s.store.GetFromContainerDirectory(id, "config.json")
if err != nil { if err != nil {
return err return err
} }
@ -184,7 +193,10 @@ func (s *Server) loadSandbox(id string) error {
s.addSandbox(sb) s.addSandbox(sb)
sandboxPath := filepath.Join(s.config.SandboxDir, id) sandboxPath, err := s.store.GetContainerRunDirectory(id)
if err != nil {
return err
}
if err = label.ReserveLabel(processLabel); err != nil { if err = label.ReserveLabel(processLabel); err != nil {
return err return err
@ -200,7 +212,7 @@ func (s *Server) loadSandbox(id string) error {
} }
sb.infraContainer = scontainer sb.infraContainer = scontainer
if err = s.runtime.UpdateStatus(scontainer); err != nil { if err = s.runtime.UpdateStatus(scontainer); err != nil {
logrus.Warnf("error updating status for container %s: %v", scontainer.ID(), err) logrus.Warnf("error updating status for pod sandbox infra container %s: %v", scontainer.ID(), err)
} }
if err = s.ctrIDIndex.Add(scontainer.ID()); err != nil { if err = s.ctrIDIndex.Add(scontainer.ID()); err != nil {
return err return err
@ -212,31 +224,138 @@ func (s *Server) loadSandbox(id string) error {
} }
func (s *Server) restore() { func (s *Server) restore() {
sandboxDir, err := ioutil.ReadDir(s.config.SandboxDir) containers, err := s.store.Containers()
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
logrus.Warnf("could not read sandbox directory %s: %v", sandboxDir, err) logrus.Warnf("could not read containers and sandboxes: %v", err)
} }
for _, v := range sandboxDir { pods := map[string]*storage.RuntimeContainerMetadata{}
if !v.IsDir() { podContainers := map[string]*storage.RuntimeContainerMetadata{}
for _, container := range containers {
metadata, err2 := s.storage.GetContainerMetadata(container.ID)
if err2 != nil {
logrus.Warnf("error parsing metadata for %s: %v, ignoring", container.ID, err2)
continue continue
} }
if err = s.loadSandbox(v.Name()); err != nil { if metadata.Pod {
logrus.Warnf("could not restore sandbox %s: %v", v.Name(), err) pods[container.ID] = &metadata
} else {
podContainers[container.ID] = &metadata
} }
} }
containerDir, err := ioutil.ReadDir(s.runtime.ContainerDir()) for containerID, metadata := range pods {
if err != nil && !os.IsNotExist(err) { if err = s.loadSandbox(containerID); err != nil {
logrus.Warnf("could not read container directory %s: %v", s.runtime.ContainerDir(), err) logrus.Warnf("could not restore sandbox %s container %s: %v", metadata.PodID, containerID, err)
}
for _, v := range containerDir {
if !v.IsDir() {
continue
} }
if err := s.loadContainer(v.Name()); err != nil { }
logrus.Warnf("could not restore container %s: %v", v.Name(), err) for containerID := range podContainers {
if err := s.loadContainer(containerID); err != nil {
logrus.Warnf("could not restore container %s: %v", containerID, err)
}
}
}
// Update makes changes to the server's state (lists of pods and containers) to
// reflect the list of pods and containers that are stored on disk, possibly
// having been modified by other parties
func (s *Server) Update() {
logrus.Debugf("updating sandbox and container information")
if err := s.update(); err != nil {
logrus.Errorf("error updating sandbox and container information: %v", err)
}
}
func (s *Server) update() error {
containers, err := s.store.Containers()
if err != nil && !os.IsNotExist(err) {
logrus.Warnf("could not read containers and sandboxes: %v", err)
return err
}
newPods := map[string]*storage.RuntimeContainerMetadata{}
oldPods := map[string]string{}
removedPods := map[string]string{}
newPodContainers := map[string]*storage.RuntimeContainerMetadata{}
oldPodContainers := map[string]string{}
removedPodContainers := map[string]string{}
for _, container := range containers {
if s.hasSandbox(container.ID) {
// FIXME: do we need to reload/update any info about the sandbox?
oldPods[container.ID] = container.ID
oldPodContainers[container.ID] = container.ID
continue
}
if s.getContainer(container.ID) != nil {
// FIXME: do we need to reload/update any info about the container?
oldPodContainers[container.ID] = container.ID
continue
}
// not previously known, so figure out what it is
metadata, err2 := s.storage.GetContainerMetadata(container.ID)
if err2 != nil {
logrus.Errorf("error parsing metadata for %s: %v, ignoring", container.ID, err2)
continue
}
if metadata.Pod {
newPods[container.ID] = &metadata
} else {
newPodContainers[container.ID] = &metadata
} }
} }
s.ctrIDIndex.Iterate(func(id string) {
if _, ok := oldPodContainers[id]; !ok {
// this container's ID wasn't in the updated list -> removed
removedPodContainers[id] = id
}
})
for removedPodContainer := range removedPodContainers {
// forget this container
c := s.getContainer(removedPodContainer)
s.releaseContainerName(c.Name())
s.removeContainer(c)
if err = s.ctrIDIndex.Delete(c.ID()); err != nil {
return err
}
logrus.Debugf("forgetting removed pod container %s", c.ID())
}
s.podIDIndex.Iterate(func(id string) {
if _, ok := oldPods[id]; !ok {
// this pod's ID wasn't in the updated list -> removed
removedPods[id] = id
}
})
for removedPod := range removedPods {
// forget this pod
sb := s.getSandbox(removedPod)
podInfraContainer := sb.infraContainer
s.releaseContainerName(podInfraContainer.Name())
s.removeContainer(podInfraContainer)
if err = s.ctrIDIndex.Delete(podInfraContainer.ID()); err != nil {
return err
}
sb.infraContainer = nil
s.releasePodName(sb.name)
s.removeSandbox(sb.id)
if err = s.podIDIndex.Delete(sb.id); err != nil {
return err
}
logrus.Debugf("forgetting removed pod %s", sb.id)
}
for sandboxID := range newPods {
// load this pod
if err = s.loadSandbox(sandboxID); err != nil {
logrus.Warnf("could not load new pod sandbox %s: %v, ignoring", sandboxID, err)
} else {
logrus.Debugf("loaded new pod sandbox %s", sandboxID, err)
}
}
for containerID := range newPodContainers {
// load this container
if err = s.loadContainer(containerID); err != nil {
logrus.Warnf("could not load new sandbox container %s: %v, ignoring", containerID, err)
} else {
logrus.Debugf("loaded new pod container %s", containerID, err)
}
}
return nil
} }
func (s *Server) reservePodName(id, name string) (string, error) { func (s *Server) reservePodName(id, name string) (string, error) {
@ -294,17 +413,35 @@ func seccompEnabled() bool {
return enabled return enabled
} }
// Shutdown attempts to shut down the server's storage cleanly
func (s *Server) Shutdown() error {
_, err := s.store.Shutdown(false)
return err
}
// New creates a new Server with options provided // New creates a new Server with options provided
func New(config *Config) (*Server, error) { func New(config *Config) (*Server, error) {
if err := os.MkdirAll(config.ImageDir, 0755); err != nil { store, err := sstorage.GetStore(sstorage.StoreOptions{
RunRoot: config.RunRoot,
GraphRoot: config.Root,
GraphDriverName: config.Storage,
GraphDriverOptions: config.StorageOptions,
})
if err != nil {
return nil, err return nil, err
} }
if err := os.MkdirAll(config.SandboxDir, 0755); err != nil { imageService, err := storage.GetImageService(store, config.DefaultTransport)
if err != nil {
return nil, err return nil, err
} }
r, err := oci.New(config.Runtime, config.ContainerDir, config.Conmon, config.ConmonEnv, config.CgroupManager) storageRuntimeService := storage.GetRuntimeService(imageService)
if err != nil {
return nil, err
}
r, err := oci.New(config.Runtime, config.Conmon, config.ConmonEnv, config.CgroupManager)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -316,6 +453,9 @@ func New(config *Config) (*Server, error) {
} }
s := &Server{ s := &Server{
runtime: r, runtime: r,
store: store,
images: imageService,
storage: storageRuntimeService,
netPlugin: netPlugin, netPlugin: netPlugin,
config: *config, config: *config,
state: &serverState{ state: &serverState{
@ -346,6 +486,9 @@ func New(config *Config) (*Server, error) {
s.podNameIndex = registrar.NewRegistrar() s.podNameIndex = registrar.NewRegistrar()
s.ctrIDIndex = truncindex.NewTruncIndex([]string{}) s.ctrIDIndex = truncindex.NewTruncIndex([]string{})
s.ctrNameIndex = registrar.NewRegistrar() s.ctrNameIndex = registrar.NewRegistrar()
s.imageContext = &types.SystemContext{
SignaturePolicyPath: config.ImageConfig.SignaturePolicyPath,
}
s.restore() s.restore()

View file

@ -59,7 +59,7 @@ PATH=$PATH:$TESTDIR
# Run ocid using the binary specified by $OCID_BINARY. # Run ocid using the binary specified by $OCID_BINARY.
# This must ONLY be run on engines created with `start_ocid`. # This must ONLY be run on engines created with `start_ocid`.
function ocid() { function ocid() {
"$OCID_BINARY" "$@" "$OCID_BINARY" --listen "$OCID_SOCKET" "$@"
} }
# Run ocic using the binary specified by $OCID_BINARY. # Run ocic using the binary specified by $OCID_BINARY.
@ -112,7 +112,7 @@ function start_ocid() {
apparmor="$APPARMOR_PROFILE" apparmor="$APPARMOR_PROFILE"
fi fi
"$OCID_BINARY" --conmon "$CONMON_BINARY" --pause "$PAUSE_BINARY" --listen "$OCID_SOCKET" --runtime "$RUNC_BINARY" --root "$TESTDIR/ocid" --sandboxdir "$TESTDIR/sandboxes" --containerdir "$TESTDIR/ocid/containers" --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$OCID_CNI_CONFIG" config >$OCID_CONFIG "$OCID_BINARY" --conmon "$CONMON_BINARY" --listen "$OCID_SOCKET" --runtime "$RUNC_BINARY" --root "$TESTDIR/ocid" --runroot "$TESTDIR/ocid-run" --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$OCID_CNI_CONFIG" --signature-policy "$INTEGRATION_ROOT"/policy.json config >$OCID_CONFIG
"$OCID_BINARY" --debug --config "$OCID_CONFIG" & OCID_PID=$! "$OCID_BINARY" --debug --config "$OCID_CONFIG" & OCID_PID=$!
wait_until_reachable wait_until_reachable
} }
@ -130,6 +130,18 @@ function cleanup_ctrs() {
fi fi
} }
function cleanup_images() {
run ocic image list --quiet
if [ "$status" -eq 0 ]; then
if [ "$output" != "" ]; then
printf '%s\n' "$output" | while IFS= read -r line
do
ocic image remove --id "$line"
done
fi
fi
}
function cleanup_pods() { function cleanup_pods() {
run ocic pod list --quiet run ocic pod list --quiet
if [ "$status" -eq 0 ]; then if [ "$status" -eq 0 ]; then
@ -147,6 +159,7 @@ function cleanup_pods() {
function stop_ocid() { function stop_ocid() {
if [ "$OCID_PID" != "" ]; then if [ "$OCID_PID" != "" ]; then
kill "$OCID_PID" >/dev/null 2>&1 kill "$OCID_PID" >/dev/null 2>&1
wait "$OCID_PID"
rm -f "$OCID_CONFIG" rm -f "$OCID_CONFIG"
fi fi
} }

7
test/policy.json Normal file
View file

@ -0,0 +1,7 @@
{
"default": [
{
"type": "insecureAcceptAnything"
}
]
}

View file

@ -4,13 +4,9 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"os"
"os/exec" "os/exec"
"path/filepath"
"strings" "strings"
"syscall" "syscall"
"github.com/Sirupsen/logrus"
) )
// ExecCmd executes a command with args and returns its output as a string along // ExecCmd executes a command with args and returns its output as a string along
@ -54,74 +50,7 @@ func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) {
return return
} }
// CreateFakeRootfs creates a fake rootfs for test. // StatusToExitCode converts wait status code to an exit code
func CreateFakeRootfs(dir string, image string) error { func StatusToExitCode(status int) int {
if len(image) <= 9 || image[:9] != "docker://" { return ((status) & 0xff00) >> 8
return fmt.Errorf("CreateFakeRootfs only support docker images currently")
}
rootfs := filepath.Join(dir, "rootfs")
if err := os.MkdirAll(rootfs, 0755); err != nil {
return err
}
// docker export $(docker create image[9:]) | tar -C rootfs -xf -
return dockerExport(image[9:], rootfs)
}
// CreateInfraRootfs creates a rootfs similar to CreateFakeRootfs, but only
// copies a single binary from the host into the rootfs. This is all done
// without Docker, and is only used currently for the pause container which is
// required for all sandboxes.
func CreateInfraRootfs(dir string, src string) error {
rootfs := filepath.Join(dir, "rootfs")
if err := os.MkdirAll(rootfs, 0755); err != nil {
return err
}
dest := filepath.Join(rootfs, filepath.Base(src))
logrus.Debugf("copying infra rootfs binary: %v -> %v", src, dest)
in, err := os.OpenFile(src, os.O_RDONLY, 0755)
if err != nil {
return err
}
defer in.Close()
out, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0755)
if err != nil {
return err
}
defer out.Close()
if _, err := io.Copy(out, in); err != nil {
return err
}
return out.Sync()
}
func dockerExport(image string, rootfs string) error {
out, err := ExecCmd("docker", "create", image)
if err != nil {
return err
}
container := out[:strings.Index(out, "\n")]
cmd := fmt.Sprintf("docker export %s | tar -C %s -xf -", container, rootfs)
if _, err := ExecCmd("/bin/bash", "-c", cmd); err != nil {
err1 := dockerRemove(container)
if err1 == nil {
return err
}
return fmt.Errorf("%v; %v", err, err1)
}
return dockerRemove(container)
}
func dockerRemove(container string) error {
_, err := ExecCmd("docker", "rm", container)
return err
} }