diff --git a/.travis.yml b/.travis.yml index 811546a7..8f2a6b9d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,6 +9,10 @@ sudo: required services: - docker +before_install: + - sudo apt-get -qq update + - sudo apt-get -qq install btrfs-tools libdevmapper-dev libgpgme11-dev + install: - make install.tools diff --git a/Dockerfile b/Dockerfile index 84a47ca9..48e46a7d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,6 +23,7 @@ RUN apt-get update && apt-get install -y \ btrfs-tools \ libdevmapper1.02.1 \ libdevmapper-dev \ + libgpgme11-dev \ --no-install-recommends \ && apt-get clean @@ -52,6 +53,10 @@ RUN set -x \ && cp runc /usr/local/bin/runc \ && rm -rf "$GOPATH" +# Make sure we have some policy for pulling images +RUN mkdir -p /etc/containers +COPY test/policy.json /etc/containers/policy.json + WORKDIR /go/src/github.com/kubernetes-incubator/cri-o ADD . /go/src/github.com/kubernetes-incubator/cri-o diff --git a/README.md b/README.md index 42bfce55..986e07ff 100644 --- a/README.md +++ b/README.md @@ -42,9 +42,11 @@ It is currently in active development in the Kubernetes community through the [d ### Build -`glib2-devel` and `glibc-static` packages on Fedora or ` libglib2.0-dev` on Ubuntu or equivalent is required. -In order to enable seccomp support you will need to install `libseccomp` on your platform. +`btrfs-progs-devel`, `device-mapper-devel`, `glib2-devel`, `glibc-devel`, `gpgme-devel`, `libassuan-devel`, `libgpg-error-devel`, and `pkg-config` packages on CentOS/Fedora or `btrfs-tools`, `libassuan-dev`, `libc6-dev`, `libdevmapper-dev`, `libglib2.0-dev`, `libgpg-error-dev`, `libgpgme11-dev`, and `pkg-config` on Ubuntu or equivalent is required. +In order to enable seccomp support you will need to install development files for `libseccomp` on your platform. > e.g. `libseccomp-devel` for CentOS/Fedora, or `libseccomp-dev` for Ubuntu +In order to enable apparmor support you will need to install development files for `libapparmor` on your platform. +> e.g. `libapparmor-dev` for Ubuntu ```bash $ GOPATH=/path/to/gopath diff --git a/cmd/ocid/config.go b/cmd/ocid/config.go index ec3c92f7..db1ccc15 100644 --- a/cmd/ocid/config.go +++ b/cmd/ocid/config.go @@ -12,17 +12,21 @@ var commentedConfigTemplate = template.Must(template.New("config").Parse(` # The "ocid" table contains all of the server options. [ocid] -# root is a path to the "root directory". OCID stores all of its state -# data, including container images, in this directory. +# root is a path to the "root directory". OCID stores all of its data, +# including container images, in this directory. root = "{{ .Root }}" -# sandbox_dir is the directory where ocid will store all of its sandbox -# state and other information. -sandbox_dir = "{{ .SandboxDir }}" +# run is a path to the "run directory". OCID stores all of its state +# in this directory. +runroot = "{{ .RunRoot }}" -# container_dir is the directory where ocid will store all of its -# container state and other information. -container_dir = "{{ .ContainerDir }}" +# storage_driver select which storage driver is used to manage storage +# of images and containers. +storage_driver = "{{ .Storage }}" + +# storage_option is used to pass an option to the storage driver. +storage_option = [ +{{ range $opt := .StorageOptions }}{{ printf "\t%q,\n" $opt }}{{ end }}] # The "ocid.api" table contains settings for the kubelet/gRPC # interface (which is also used by ocic). @@ -67,9 +71,23 @@ cgroup_manager = "{{ .CgroupManager }}" # management of OCI images. [ocid.image] -# pause is the path to the statically linked pause container binary, used -# as the entrypoint for infra containers. -pause = "{{ .Pause }}" +# default_transport is the prefix we try prepending to an image name if the +# image name as we receive it can't be parsed as a valid source reference +default_transport = "{{ .DefaultTransport }}" + +# pause_image is the image which we use to instantiate infra containers. +pause_image = "{{ .PauseImage }}" + +# pause_command is the command to run in a pause_image to have a container just +# sit there. If the image contains the necessary information, this value need +# not be specified. +pause_command = "{{ .PauseCommand }}" + +# signature_policy is the name of the file which decides what sort of policy we +# use when deciding whether or not to trust an image that we've pulled. +# Outside of testing situations, it is strongly advised that this be left +# unspecified so that the default system-wide policy will be used. +signature_policy = "{{ .SignaturePolicyPath }}" # The "ocid.network" table contains settings pertaining to the # management of CNI plugins. diff --git a/cmd/ocid/main.go b/cmd/ocid/main.go index 528d592a..a87cdd1d 100644 --- a/cmd/ocid/main.go +++ b/cmd/ocid/main.go @@ -4,7 +4,10 @@ import ( "fmt" "net" "os" + "os/signal" "sort" + "strings" + "syscall" "github.com/Sirupsen/logrus" "github.com/containers/storage/pkg/reexec" @@ -36,17 +39,29 @@ func mergeConfig(config *server.Config, ctx *cli.Context) error { if ctx.GlobalIsSet("conmon") { config.Conmon = ctx.GlobalString("conmon") } - if ctx.GlobalIsSet("containerdir") { - config.ContainerDir = ctx.GlobalString("containerdir") + if ctx.GlobalIsSet("pause-command") { + config.PauseCommand = ctx.GlobalString("pause-command") } - if ctx.GlobalIsSet("pause") { - config.Pause = ctx.GlobalString("pause") + if ctx.GlobalIsSet("pause-image") { + config.PauseImage = ctx.GlobalString("pause-image") + } + if ctx.GlobalIsSet("signature-policy") { + config.SignaturePolicyPath = ctx.GlobalString("signature-policy") } if ctx.GlobalIsSet("root") { config.Root = ctx.GlobalString("root") } - if ctx.GlobalIsSet("sandboxdir") { - config.SandboxDir = ctx.GlobalString("sandboxdir") + if ctx.GlobalIsSet("runroot") { + config.RunRoot = ctx.GlobalString("runroot") + } + if ctx.GlobalIsSet("storage-driver") { + config.Storage = ctx.GlobalString("storage-driver") + } + if ctx.GlobalIsSet("storage-option") { + config.StorageOptions = ctx.GlobalStringSlice("storage-option") + } + if ctx.GlobalIsSet("default-transport") { + config.DefaultTransport = ctx.GlobalString("default-transport") } if ctx.GlobalIsSet("listen") { config.Listen = ctx.GlobalString("listen") @@ -75,6 +90,26 @@ func mergeConfig(config *server.Config, ctx *cli.Context) error { return nil } +func catchShutdown(gserver *grpc.Server, sserver *server.Server, signalled *bool) { + sig := make(chan os.Signal, 10) + signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) + go func() { + for s := range sig { + switch s { + case syscall.SIGINT: + logrus.Debugf("Caught SIGINT") + case syscall.SIGTERM: + logrus.Debugf("Caught SIGTERM") + default: + continue + } + *signalled = true + gserver.GracefulStop() + return + } + }() +} + func main() { if reexec.Init() { return @@ -97,10 +132,6 @@ func main() { Name: "conmon", Usage: "path to the conmon executable", }, - cli.StringFlag{ - Name: "containerdir", - Usage: "ocid container dir", - }, cli.BoolFlag{ Name: "debug", Usage: "enable debug output for logging", @@ -120,20 +151,40 @@ func main() { Usage: "set the format used by logs ('text' (default), or 'json')", }, cli.StringFlag{ - Name: "pause", - Usage: "path to the pause executable", + Name: "pause-command", + Usage: "name of the pause command in the pause image", + }, + cli.StringFlag{ + Name: "pause-image", + Usage: "name of the pause image", + }, + cli.StringFlag{ + Name: "signature-policy", + Usage: "path to signature policy file", }, cli.StringFlag{ Name: "root", Usage: "ocid root dir", }, cli.StringFlag{ - Name: "runtime", - Usage: "OCI runtime path", + Name: "runroot", + Usage: "ocid state dir", }, cli.StringFlag{ - Name: "sandboxdir", - Usage: "ocid pod sandbox dir", + Name: "storage-driver", + Usage: "storage driver", + }, + cli.StringSliceFlag{ + Name: "storage-option", + Usage: "storage driver option", + }, + cli.StringFlag{ + Name: "default-transport", + Usage: "default transport", + }, + cli.StringFlag{ + Name: "runtime", + Usage: "OCI runtime path", }, cli.StringFlag{ Name: "seccomp-profile", @@ -236,13 +287,24 @@ func main() { logrus.Fatal(err) } + graceful := false + catchShutdown(s, service, &graceful) runtime.RegisterRuntimeServiceServer(s, service) runtime.RegisterImageServiceServer(s, service) // after the daemon is done setting up we can notify systemd api notifySystem() - if err := s.Serve(lis); err != nil { + err = s.Serve(lis) + if graceful && strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") { + err = nil + } + + if err2 := service.Shutdown(); err2 != nil { + logrus.Infof("error shutting down layer storage: %v", err2) + } + + if err != nil { logrus.Fatal(err) } return nil diff --git a/docs/ocid.8.md b/docs/ocid.8.md index c0bbc353..0f007e07 100644 --- a/docs/ocid.8.md +++ b/docs/ocid.8.md @@ -8,16 +8,20 @@ ocid - Enable OCI Kubernetes Container Runtime daemon **ocid** [**--config**=[*value*]] [**--conmon**=[*value*]] -[**--containerdir**=[*value*]] [**--debug**] +[**--default-transport**=[*value*]] [**--help**|**-h**] [**--listen**=[*value*]] [**--log**=[*value*]] [**--log-format value**] -[**--pause**=[*value*]] +[**--pause-command**=[*value*]] +[**--pause-image**=[*value*]] [**--root**=[*value*]] +[**--runroot**=[*value*]] [**--runtime**=[*value*]] -[**--sandboxdir**=[*value*]] +[**--signature-policy**=[*value*]] +[**--storage-driver**=[*value*]] +[**--storage-option**=[*value*]] [**--selinux**] [**--seccomp-profile**=[*value*]] [**--apparmor-profile**=[*value*]] @@ -43,18 +47,21 @@ ocid is meant to provide an integration path between OCI conformant runtimes and # GLOBAL OPTIONS +**--apparmor_profile**="" + Name of the apparmor profile to be used as the runtime's default (default: "ocid-default") + **--config**="" path to configuration file **--conmon**="" path to the conmon executable (default: "/usr/libexec/ocid/conmon") -**--containerdir**="" - OCID container dir (default: "/var/lib/ocid/containers") - **--debug** Enable debug output for logging +**--default-transport** + A prefix to prepend to image names that can't be pulled as-is. + **--help, -h** Print usage statement @@ -67,32 +74,41 @@ ocid is meant to provide an integration path between OCI conformant runtimes and **--log-format**="" Set the format used by logs ('text' (default), or 'json') (default: "text") -**--pause**="" - Path to the pause executable (default: "/usr/libexec/ocid/pause") +**--pause-command**="" + Path to the pause executable in the pause image (default: "/pause") + +**--pause-image**="" + Image which contains the pause executable (default: "kubernetes/pause") **--root**="" - OCID root dir (default: "/var/lib/ocid") + OCID root dir (default: "/var/lib/containers") + +**--runroot**="" + OCID state dir (default: "/var/run/containers") **--runtime**="" OCI runtime path (default: "/usr/bin/runc") -**--sandboxdir**="" - OCID pod sandbox dir (default: "/var/lib/ocid/sandboxes") - **--selinux**=*true*|*false* Enable selinux support (default: false) -**--seccomp_profile**="" +**--seccomp-profile**="" Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json") -**--apparmor_profile**="" - Name of the apparmor profile to be used as the runtime's default (default: "ocid-default") +**--signature-policy**="" + Path to the signature policy json file (default: "", to use the system-wide default) + +**--storage-driver** + OCI storage driver (default: "devicemapper") + +**--storage-option** + OCI storage driver option (no default) **--cni-config-dir**="" - CNI configuration files directory (defautl: "/etc/cni/net.d/") + CNI configuration files directory (default: "/etc/cni/net.d/") **--cni-plugin-dir**="" - CNI plugin binaries directory (defautl: "/opt/cni/bin/") + CNI plugin binaries directory (default: "/opt/cni/bin/") **--version, -v** Print the version diff --git a/docs/ocid.conf.5.md b/docs/ocid.conf.5.md index 4d554640..2eec29db 100644 --- a/docs/ocid.conf.5.md +++ b/docs/ocid.conf.5.md @@ -29,15 +29,17 @@ No bare options are used. The format of TOML can be simplified to: The `ocid` table supports the following options: -**container_dir**="" - OCID container dir (default: "/var/lib/ocid/containers") - **root**="" - OCID root dir (default: "/var/lib/ocid") + OCID root dir (default: "/var/lib/containers") -**sandbox_dir**="" - OCID pod sandbox dir (default: "/var/lib/ocid/sandboxes") +**runroot**="" + OCID state dir (default: "/var/run/containers") +**storage_driver**="" + OCID storage driver (default is "devicemapper") + +**storage_option**=[] + OCID storage driver option list (no default) ## OCID.API TABLE @@ -58,6 +60,9 @@ The `ocid` table supports the following options: **selinux**=*true*|*false* Enable selinux support (default: false) +**signature_policy**="" + Path to the signature policy json file (default: "", to use the system-wide default) + **seccomp_profile**="" Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json") @@ -66,8 +71,14 @@ The `ocid` table supports the following options: ## OCID.IMAGE TABLE -**pause**="" - Path to the pause executable (default: "/usr/libexec/ocid/pause") +**default_transport** + A prefix to prepend to image names that can't be pulled as-is (default: "docker://") + +**pause_command**="" + Path to the pause executable in the pause image (default: "/pause") + +**pause_image**="" + Image which contains the pause executable (default: "kubernetes/pause") ## OCID.NETWORK TABLE diff --git a/oci/oci.go b/oci/oci.go index c155ed79..167756a3 100644 --- a/oci/oci.go +++ b/oci/oci.go @@ -34,11 +34,10 @@ const ( ) // New creates a new Runtime with options provided -func New(runtimePath string, containerDir string, conmonPath string, conmonEnv []string, cgroupManager string) (*Runtime, error) { +func New(runtimePath string, conmonPath string, conmonEnv []string, cgroupManager string) (*Runtime, error) { r := &Runtime{ name: filepath.Base(runtimePath), path: runtimePath, - containerDir: containerDir, conmonPath: conmonPath, conmonEnv: conmonEnv, cgroupManager: cgroupManager, @@ -50,7 +49,6 @@ func New(runtimePath string, containerDir string, conmonPath string, conmonEnv [ type Runtime struct { name string path string - containerDir string conmonPath string conmonEnv []string cgroupManager string @@ -76,11 +74,6 @@ func (r *Runtime) Path() string { return r.path } -// ContainerDir returns the path to the base directory for storing container configurations -func (r *Runtime) ContainerDir() string { - return r.containerDir -} - // Version returns the version of the OCI Runtime func (r *Runtime) Version() (string, error) { runtimeVersion, err := getOCIVersion(r.path, "-v") diff --git a/server/config.go b/server/config.go index b49462c0..56fcdda5 100644 --- a/server/config.go +++ b/server/config.go @@ -3,7 +3,6 @@ package server import ( "bytes" "io/ioutil" - "path/filepath" "github.com/BurntSushi/toml" "github.com/opencontainers/runc/libcontainer/selinux" @@ -11,16 +10,16 @@ import ( // Default paths if none are specified const ( - ocidRoot = "/var/lib/ocid" - conmonPath = "/usr/libexec/ocid/conmon" - pausePath = "/usr/libexec/ocid/pause" - seccompProfilePath = "/etc/ocid/seccomp.json" - cniConfigDir = "/etc/cni/net.d/" - cniBinDir = "/opt/cni/bin/" -) - -const ( + ocidRoot = "/var/lib/ocid" + ocidRunRoot = "/var/run/containers" + conmonPath = "/usr/libexec/ocid/conmon" + pauseImage = "kubernetes/pause" + pauseCommand = "/pause" + defaultTransport = "docker://" + seccompProfilePath = "/etc/ocid/seccomp.json" apparmorProfileName = "ocid-default" + cniConfigDir = "/etc/cni/net.d/" + cniBinDir = "/opt/cni/bin/" cgroupManager = "cgroupfs" ) @@ -40,17 +39,20 @@ type Config struct { // RootConfig represents the root of the "ocid" TOML config table. type RootConfig struct { - // Root is a path to the "root directory" where all information not + // Root is a path to the "root directory" where data not // explicitly handled by other options will be stored. Root string `toml:"root"` - // SandboxDir is the directory where ocid will store all of its sandbox - // state and other information. - SandboxDir string `toml:"sandbox_dir"` + // RunRoot is a path to the "run directory" where state information not + // explicitly handled by other options will be stored. + RunRoot string `toml:"runroot"` - // ContainerDir is the directory where ocid will store all of its container - // state and other information. - ContainerDir string `toml:"container_dir"` + // Storage is the name of the storage driver which handles actually + // storing the contents of containers. + Storage string `toml:"storage_driver"` + + // StorageOption is a list of storage driver specific options. + StorageOptions []string `toml:"storage_option"` // LogDir is the default log directory were all logs will go unless kubelet // tells us to put them somewhere else. @@ -98,17 +100,21 @@ type RuntimeConfig struct { // ImageConfig represents the "ocid.image" TOML config table. type ImageConfig struct { - // Pause is the path to the statically linked pause container binary, used - // as the entrypoint for infra containers. - // - // TODO(cyphar): This should be replaced with a path to an OCI image - // bundle, once the OCI image/storage code has been implemented. - Pause string `toml:"pause"` - - // ImageStore is the directory where the ocid image store will be stored. - // TODO: This is currently not really used because we don't have - // containers/storage integrated. - ImageDir string `toml:"image_dir"` + // DefaultTransport is a value we prefix to image names that fail to + // validate source references. + DefaultTransport string `toml:"default_transport"` + // PauseImage is the name of an image which we use to instantiate infra + // containers. + PauseImage string `toml:"pause_image"` + // PauseCommand is the path of the binary we run in an infra + // container that's been instantiated using PauseImage. + PauseCommand string `toml:"pause_command"` + // SignaturePolicyPath is the name of the file which decides what sort + // of policy we use when deciding whether or not to trust an image that + // we've pulled. Outside of testing situations, it is strongly advised + // that this be left unspecified so that the default system-wide policy + // will be used. + SignaturePolicyPath string `toml:"signature_policy"` } // NetworkConfig represents the "ocid.network" TOML config table @@ -191,10 +197,9 @@ func (c *Config) ToFile(path string) error { func DefaultConfig() *Config { return &Config{ RootConfig: RootConfig{ - Root: ocidRoot, - SandboxDir: filepath.Join(ocidRoot, "sandboxes"), - ContainerDir: filepath.Join(ocidRoot, "containers"), - LogDir: "/var/log/ocid/pods", + Root: ocidRoot, + RunRoot: ocidRunRoot, + LogDir: "/var/log/ocid/pods", }, APIConfig: APIConfig{ Listen: "/var/run/ocid.sock", @@ -211,8 +216,10 @@ func DefaultConfig() *Config { CgroupManager: cgroupManager, }, ImageConfig: ImageConfig{ - Pause: pausePath, - ImageDir: filepath.Join(ocidRoot, "store"), + DefaultTransport: defaultTransport, + PauseImage: pauseImage, + PauseCommand: pauseCommand, + SignaturePolicyPath: "", }, NetworkConfig: NetworkConfig{ NetworkDir: cniConfigDir, diff --git a/server/container_create.go b/server/container_create.go index bd219198..e4354607 100644 --- a/server/container_create.go +++ b/server/container_create.go @@ -4,7 +4,6 @@ import ( "encoding/json" "errors" "fmt" - "os" "path/filepath" "strings" "syscall" @@ -14,7 +13,6 @@ import ( "github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/server/apparmor" "github.com/kubernetes-incubator/cri-o/server/seccomp" - "github.com/kubernetes-incubator/cri-o/utils" "github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runtime-tools/generate" "golang.org/x/net/context" @@ -30,6 +28,7 @@ const ( // CreateContainer creates a new container in specified PodSandbox func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) { logrus.Debugf("CreateContainerRequest %+v", req) + s.Update() sbID := req.GetPodSandboxId() if sbID == "" { return nil, fmt.Errorf("PodSandboxId should not be empty") @@ -62,30 +61,24 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq return nil, err } - // containerDir is the dir for the container bundle. - containerDir := filepath.Join(s.runtime.ContainerDir(), containerID) defer func() { if err != nil { s.releaseContainerName(containerName) - err1 := os.RemoveAll(containerDir) - if err1 != nil { - logrus.Warnf("Failed to cleanup container directory: %v", err1) - } } }() - if _, err = os.Stat(containerDir); err == nil { - return nil, fmt.Errorf("container (%s) already exists", containerDir) - } - - if err = os.MkdirAll(containerDir, 0755); err != nil { - return nil, err - } - - container, err := s.createSandboxContainer(containerID, containerName, sb, containerDir, containerConfig) + container, err := s.createSandboxContainer(ctx, containerID, containerName, sb, req.GetSandboxConfig(), containerConfig) if err != nil { return nil, err } + defer func() { + if err != nil { + err2 := s.storage.DeleteContainer(containerID) + if err2 != nil { + logrus.Warnf("Failed to cleanup container directory: %v", err2) + } + } + }() if err = s.runtime.CreateContainer(container); err != nil { return nil, err @@ -110,23 +103,21 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq return resp, nil } -func (s *Server) createSandboxContainer(containerID string, containerName string, sb *sandbox, containerDir string, containerConfig *pb.ContainerConfig) (*oci.Container, error) { +func (s *Server) createSandboxContainer(ctx context.Context, containerID string, containerName string, sb *sandbox, SandboxConfig *pb.PodSandboxConfig, containerConfig *pb.ContainerConfig) (*oci.Container, error) { if sb == nil { return nil, errors.New("createSandboxContainer needs a sandbox") } + + // TODO: factor generating/updating the spec into something other projects can vendor + // creates a spec Generator with the default spec. specgen := generate.New() - // by default, the root path is an empty string. - // here set it to be "rootfs". - specgen.SetRootPath("rootfs") - processArgs := []string{} commands := containerConfig.GetCommand() args := containerConfig.GetArgs() if commands == nil && args == nil { - // TODO: override with image's config in #189 - processArgs = []string{"/bin/sh"} + processArgs = nil } if commands != nil { processArgs = append(processArgs, commands...) @@ -135,8 +126,6 @@ func (s *Server) createSandboxContainer(containerID string, containerName string processArgs = append(processArgs, args...) } - specgen.SetProcessArgs(processArgs) - cwd := containerConfig.GetWorkingDir() if cwd == "" { cwd = "/" @@ -357,17 +346,46 @@ func (s *Server) createSandboxContainer(containerID string, containerName string return nil, err } - if err = specgen.SaveToFile(filepath.Join(containerDir, "config.json"), generate.ExportOptions{}); err != nil { + metaname := metadata.GetName() + attempt := metadata.GetAttempt() + containerInfo, err := s.storage.CreateContainer(s.imageContext, + sb.name, sb.id, + image, "", + containerName, containerID, + metaname, + attempt, + sb.mountLabel, + nil) + if err != nil { return nil, err } - // TODO: copy the rootfs into the bundle. - // Currently, utils.CreateFakeRootfs is used to populate the rootfs. - if err = utils.CreateFakeRootfs(containerDir, image); err != nil { + mountPoint, err := s.storage.StartContainer(containerID) + if err != nil { + return nil, fmt.Errorf("failed to mount container %s(%s): %v", containerName, containerID, err) + } + + if processArgs == nil { + if containerInfo.Config != nil && len(containerInfo.Config.Config.Cmd) > 0 { + processArgs = containerInfo.Config.Config.Cmd + } else { + processArgs = []string{"/bin/sh"} + } + } + specgen.SetProcessArgs(processArgs) + + // by default, the root path is an empty string. set it now. + specgen.SetRootPath(mountPoint) + + saveOptions := generate.ExportOptions{} + if err = specgen.SaveToFile(filepath.Join(containerInfo.Dir, "config.json"), saveOptions); err != nil { + return nil, err + } + if err = specgen.SaveToFile(filepath.Join(containerInfo.RunDir, "config.json"), saveOptions); err != nil { return nil, err } - container, err := oci.NewContainer(containerID, containerName, containerDir, logPath, sb.netNs(), labels, annotations, imageSpec, metadata, sb.id, containerConfig.GetTty()) + container, err := oci.NewContainer(containerID, containerName, containerInfo.RunDir, logPath, sb.netNs(), labels, annotations, imageSpec, metadata, sb.id, containerConfig.GetTty()) if err != nil { return nil, err } diff --git a/server/container_list.go b/server/container_list.go index 776b85bc..b7a0b5e3 100644 --- a/server/container_list.go +++ b/server/container_list.go @@ -29,6 +29,7 @@ func filterContainer(c *pb.Container, filter *pb.ContainerFilter) bool { // ListContainers lists all containers by filters. func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) { logrus.Debugf("ListContainersRequest %+v", req) + s.Update() var ctrs []*pb.Container filter := req.Filter ctrList := s.state.containers.List() diff --git a/server/container_remove.go b/server/container_remove.go index 37c318ff..a4d06f17 100644 --- a/server/container_remove.go +++ b/server/container_remove.go @@ -2,8 +2,6 @@ package server import ( "fmt" - "os" - "path/filepath" "github.com/Sirupsen/logrus" "github.com/kubernetes-incubator/cri-o/oci" @@ -15,6 +13,7 @@ import ( // should be force removed. func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) { logrus.Debugf("RemoveContainerRequest %+v", req) + s.Update() c, err := s.getContainerFromRequest(req) if err != nil { return nil, err @@ -35,9 +34,12 @@ func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerReq return nil, fmt.Errorf("failed to delete container %s: %v", c.ID(), err) } - containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID()) - if err := os.RemoveAll(containerDir); err != nil { - return nil, fmt.Errorf("failed to remove container %s directory: %v", c.ID(), err) + if err := s.storage.StopContainer(c.ID()); err != nil { + return nil, fmt.Errorf("failed to unmount container %s: %v", c.ID(), err) + } + + if err := s.storage.DeleteContainer(c.ID()); err != nil { + return nil, fmt.Errorf("failed to delete storage for container %s: %v", c.ID(), err) } s.releaseContainerName(c.Name()) diff --git a/server/container_start.go b/server/container_start.go index 5e724865..23a33b90 100644 --- a/server/container_start.go +++ b/server/container_start.go @@ -11,12 +11,13 @@ import ( // StartContainer starts the container. func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) { logrus.Debugf("StartContainerRequest %+v", req) + s.Update() c, err := s.getContainerFromRequest(req) if err != nil { return nil, err } - if err := s.runtime.StartContainer(c); err != nil { + if err = s.runtime.StartContainer(c); err != nil { return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err) } diff --git a/server/container_status.go b/server/container_status.go index fa07c89c..24ff260c 100644 --- a/server/container_status.go +++ b/server/container_status.go @@ -10,6 +10,7 @@ import ( // ContainerStatus returns status of the container. func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) { logrus.Debugf("ContainerStatusRequest %+v", req) + s.Update() c, err := s.getContainerFromRequest(req) if err != nil { return nil, err diff --git a/server/container_stop.go b/server/container_stop.go index 1aba8801..a6457c0e 100644 --- a/server/container_stop.go +++ b/server/container_stop.go @@ -12,6 +12,7 @@ import ( // StopContainer stops a running container with a grace period (i.e., timeout). func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) { logrus.Debugf("StopContainerRequest %+v", req) + s.Update() c, err := s.getContainerFromRequest(req) if err != nil { return nil, err diff --git a/server/image_list.go b/server/image_list.go index 964dbd74..a9cb25d4 100644 --- a/server/image_list.go +++ b/server/image_list.go @@ -8,9 +8,27 @@ import ( // ListImages lists existing images. func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) { - logrus.Debugf("ListImages: %+v", req) - // TODO - // containers/storage will take care of this by looking inside /var/lib/ocid/images - // and listing images. - return &pb.ListImagesResponse{}, nil + logrus.Debugf("ListImagesRequest: %+v", req) + filter := "" + reqFilter := req.GetFilter() + if reqFilter != nil { + filterImage := reqFilter.GetImage() + if filterImage != nil { + filter = filterImage.GetImage() + } + } + results, err := s.images.ListImages(filter) + if err != nil { + return nil, err + } + response := pb.ListImagesResponse{} + for _, result := range results { + response.Images = append(response.Images, &pb.Image{ + Id: sPtr(result.ID), + RepoTags: result.Names, + Size_: result.Size, + }) + } + logrus.Debugf("ListImagesResponse: %+v", response) + return &response, nil } diff --git a/server/image_pull.go b/server/image_pull.go index 3a0aa65c..afed362c 100644 --- a/server/image_pull.go +++ b/server/image_pull.go @@ -1,86 +1,28 @@ package server import ( - "errors" - "io" - "os" - "path/filepath" - "github.com/Sirupsen/logrus" - "github.com/containers/image/directory" - "github.com/containers/image/image" - "github.com/containers/image/transports" + "github.com/containers/image/copy" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" ) // PullImage pulls a image with authentication config. func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) { - logrus.Debugf("PullImage: %+v", req) - img := req.GetImage().GetImage() - if img == "" { - return nil, errors.New("got empty imagespec name") - } - - // TODO(runcom): deal with AuthConfig in req.GetAuth() - - // TODO(mrunalp,runcom): why do we need the SandboxConfig here? - // how do we pull in a specified sandbox? - tr, err := transports.ParseImageName(img) - if err != nil { - return nil, err - } - // TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true) - src, err := tr.NewImageSource(nil, nil) - if err != nil { - return nil, err - } - i, err := image.FromSource(src) - if err != nil { - return nil, err - } - blobs := i.LayerInfos() - config := i.ConfigInfo() - if config.Digest != "" { - blobs = append(blobs, config) - } - - if err = os.Mkdir(filepath.Join(s.config.ImageDir, tr.StringWithinTransport()), 0755); err != nil { - return nil, err - } - dir, err := directory.NewReference(filepath.Join(s.config.ImageDir, tr.StringWithinTransport())) - if err != nil { - return nil, err - } - // TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true) - dest, err := dir.NewImageDestination(nil) - if err != nil { - return nil, err - } - // save blobs (layer + config for docker v2s2, layers only for docker v2s1 [the config is in the manifest]) - for _, b := range blobs { - // TODO(runcom,nalin): we need do-then-commit to later purge on error - var r io.ReadCloser - r, _, err = src.GetBlob(b) - if err != nil { - return nil, err - } - if _, err = dest.PutBlob(r, b); err != nil { - r.Close() - return nil, err - } - r.Close() - } - // save manifest - m, _, err := i.Manifest() - if err != nil { - return nil, err - } - if err := dest.PutManifest(m); err != nil { - return nil, err - } - + logrus.Debugf("PullImageRequest: %+v", req) + // TODO(runcom?): deal with AuthConfig in req.GetAuth() // TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://) - - return &pb.PullImageResponse{}, nil + image := "" + img := req.GetImage() + if img != nil { + image = img.GetImage() + } + options := ©.Options{} + _, err := s.images.PullImage(s.imageContext, image, options) + if err != nil { + return nil, err + } + resp := &pb.PullImageResponse{} + logrus.Debugf("PullImageResponse: %+v", resp) + return resp, nil } diff --git a/server/image_remove.go b/server/image_remove.go index 21bf30ff..f68dd03f 100644 --- a/server/image_remove.go +++ b/server/image_remove.go @@ -1,6 +1,8 @@ package server import ( + "fmt" + "github.com/Sirupsen/logrus" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" @@ -8,6 +10,20 @@ import ( // RemoveImage removes the image. func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) { - logrus.Debugf("RemoveImage: %+v", req) - return &pb.RemoveImageResponse{}, nil + logrus.Debugf("RemoveImageRequest: %+v", req) + image := "" + img := req.GetImage() + if img != nil { + image = img.GetImage() + } + if image == "" { + return nil, fmt.Errorf("no image specified") + } + err := s.images.RemoveImage(s.imageContext, image) + if err != nil { + return nil, err + } + resp := &pb.RemoveImageResponse{} + logrus.Debugf("RemoveImageResponse: %+v", resp) + return resp, nil } diff --git a/server/image_status.go b/server/image_status.go index 4ab113d5..cf253d66 100644 --- a/server/image_status.go +++ b/server/image_status.go @@ -1,6 +1,8 @@ package server import ( + "fmt" + "github.com/Sirupsen/logrus" "golang.org/x/net/context" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" @@ -8,9 +10,26 @@ import ( // ImageStatus returns the status of the image. func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) { - logrus.Debugf("ImageStatus: %+v", req) - // TODO - // containers/storage will take care of this by looking inside /var/lib/ocid/images - // and getting the image status - return &pb.ImageStatusResponse{}, nil + logrus.Debugf("ImageStatusRequest: %+v", req) + image := "" + img := req.GetImage() + if img != nil { + image = img.GetImage() + } + if image == "" { + return nil, fmt.Errorf("no image specified") + } + status, err := s.images.ImageStatus(s.imageContext, image) + if err != nil { + return nil, err + } + resp := &pb.ImageStatusResponse{ + Image: &pb.Image{ + Id: &status.ID, + RepoTags: status.Names, + Size_: status.Size, + }, + } + logrus.Debugf("ImageStatusResponse: %+v", resp) + return resp, nil } diff --git a/server/sandbox.go b/server/sandbox.go index 3348ed53..732f0044 100644 --- a/server/sandbox.go +++ b/server/sandbox.go @@ -145,6 +145,7 @@ const ( podDefaultNamespace = "default" defaultShmSize = 64 * 1024 * 1024 nsRunDir = "/var/run/netns" + podInfraCommand = "/pause" ) var ( @@ -277,7 +278,7 @@ func (s *Server) getPodSandboxFromRequest(req podSandboxRequest) (*sandbox, erro sb := s.getSandbox(sandboxID) if sb == nil { - return nil, fmt.Errorf("specified sandbox not found: %s", sandboxID) + return nil, fmt.Errorf("specified pod sandbox not found: %s", sandboxID) } return sb, nil } diff --git a/server/sandbox_list.go b/server/sandbox_list.go index 3d8ae6a6..c8627018 100644 --- a/server/sandbox_list.go +++ b/server/sandbox_list.go @@ -29,6 +29,7 @@ func filterSandbox(p *pb.PodSandbox, filter *pb.PodSandboxFilter) bool { // ListPodSandbox returns a list of SandBoxes. func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) { logrus.Debugf("ListPodSandboxRequest %+v", req) + s.Update() var pods []*pb.PodSandbox var podList []*sandbox for _, sb := range s.state.sandboxes { diff --git a/server/sandbox_remove.go b/server/sandbox_remove.go index 00f8c3b8..db7010f5 100644 --- a/server/sandbox_remove.go +++ b/server/sandbox_remove.go @@ -2,8 +2,6 @@ package server import ( "fmt" - "os" - "path/filepath" "syscall" "github.com/Sirupsen/logrus" @@ -17,6 +15,7 @@ import ( // sandbox, they should be force deleted. func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) { logrus.Debugf("RemovePodSandboxRequest %+v", req) + s.Update() sb, err := s.getPodSandboxFromRequest(req) if err != nil { if err == errSandboxIDEmpty { @@ -46,16 +45,18 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR } if err := s.runtime.DeleteContainer(c); err != nil { - return nil, fmt.Errorf("failed to delete container %s in sandbox %s: %v", c.Name(), sb.id, err) + return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.id, err) } if c == podInfraContainer { continue } - containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID()) - if err := os.RemoveAll(containerDir); err != nil { - return nil, fmt.Errorf("failed to remove container %s directory: %v", c.Name(), err) + if err := s.storage.StopContainer(c.ID()); err != nil { + return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.id, err) + } + if err := s.storage.DeleteContainer(c.ID()); err != nil { + return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.id, err) } s.releaseContainerName(c.Name()) @@ -81,10 +82,13 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR } // Remove the files related to the sandbox - podSandboxDir := filepath.Join(s.config.SandboxDir, sb.id) - if err := os.RemoveAll(podSandboxDir); err != nil { - return nil, fmt.Errorf("failed to remove sandbox %s directory: %v", sb.id, err) + if err := s.storage.StopContainer(sb.id); err != nil { + return nil, fmt.Errorf("failed to delete sandbox container in pod sandbox %s: %v", sb.id, err) } + if err := s.storage.RemovePodSandbox(sb.id); err != nil { + return nil, fmt.Errorf("failed to remove pod sandbox %s: %v", sb.id, err) + } + s.releaseContainerName(podInfraContainer.Name()) s.removeContainer(podInfraContainer) sb.infraContainer = nil diff --git a/server/sandbox_run.go b/server/sandbox_run.go index 581a5b86..2e279b6e 100644 --- a/server/sandbox_run.go +++ b/server/sandbox_run.go @@ -9,8 +9,8 @@ import ( "syscall" "github.com/Sirupsen/logrus" + "github.com/containers/storage/storage" "github.com/kubernetes-incubator/cri-o/oci" - "github.com/kubernetes-incubator/cri-o/utils" "github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runtime-tools/generate" "golang.org/x/net/context" @@ -54,6 +54,10 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest if err != nil { return nil, err } + _, containerName, err := s.generateContainerIDandName(name, "infra", attempt) + if err != nil { + return nil, err + } defer func() { if err != nil { @@ -67,39 +71,51 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest defer func() { if err != nil { - if err = s.podIDIndex.Delete(id); err != nil { + if err2 := s.podIDIndex.Delete(id); err2 != nil { logrus.Warnf("couldn't delete pod id %s from idIndex", id) } } }() - podSandboxDir := filepath.Join(s.config.SandboxDir, id) - if _, err = os.Stat(podSandboxDir); err == nil { - return nil, fmt.Errorf("pod sandbox (%s) already exists", podSandboxDir) + podContainer, err := s.storage.CreatePodSandbox(s.imageContext, + name, id, + s.config.PauseImage, "", + containerName, + req.GetConfig().GetMetadata().GetName(), + req.GetConfig().GetMetadata().GetUid(), + namespace, + attempt, + nil) + if err == storage.ErrDuplicateName { + return nil, fmt.Errorf("pod sandbox with name %q already exists", name) + } + if err != nil { + return nil, fmt.Errorf("error creating pod sandbox with name %q: %v", name, err) } - defer func() { if err != nil { - if err2 := os.RemoveAll(podSandboxDir); err2 != nil { - logrus.Warnf("couldn't cleanup podSandboxDir %s: %v", podSandboxDir, err2) + if err2 := s.storage.RemovePodSandbox(id); err2 != nil { + logrus.Warnf("couldn't cleanup pod sandbox %q: %v", id, err2) } } }() - if err = os.MkdirAll(podSandboxDir, 0755); err != nil { - return nil, err - } + // TODO: factor generating/updating the spec into something other projects can vendor // creates a spec Generator with the default spec. g := generate.New() - // TODO: Make the `graph/vfs` part of this configurable once the storage - // integration has been merged. - podInfraRootfs := filepath.Join(s.config.Root, "graph/vfs/pause") // setup defaults for the pod sandbox - g.SetRootPath(filepath.Join(podInfraRootfs, "rootfs")) g.SetRootReadonly(true) - g.SetProcessArgs([]string{"/pause"}) + if s.config.PauseCommand == "" { + if podContainer.Config != nil { + g.SetProcessArgs(podContainer.Config.Config.Cmd) + } else { + g.SetProcessArgs([]string{podInfraCommand}) + } + } else { + g.SetProcessArgs([]string{s.config.PauseCommand}) + } // set hostname hostname := req.GetConfig().GetHostname() @@ -117,7 +133,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest dnsServers := req.GetConfig().GetDnsConfig().GetServers() dnsSearches := req.GetConfig().GetDnsConfig().GetSearches() dnsOptions := req.GetConfig().GetDnsConfig().GetOptions() - resolvPath := fmt.Sprintf("%s/resolv.conf", podSandboxDir) + resolvPath := fmt.Sprintf("%s/resolv.conf", podContainer.RunDir) err = parseDNSOptions(dnsServers, dnsSearches, dnsOptions, resolvPath) if err != nil { err1 := removeFile(resolvPath) @@ -165,7 +181,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() { shmPath = "/dev/shm" } else { - shmPath, err = setupShm(podSandboxDir, mountLabel) + shmPath, err = setupShm(podContainer.RunDir, mountLabel) if err != nil { return nil, err } @@ -178,7 +194,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest }() } - containerID, containerName, err := s.generateContainerIDandName(name, "infra", 0) + err = s.setPodSandboxMountLabel(id, mountLabel) if err != nil { return nil, err } @@ -189,14 +205,14 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest } }() - if err = s.ctrIDIndex.Add(containerID); err != nil { + if err = s.ctrIDIndex.Add(id); err != nil { return nil, err } defer func() { if err != nil { - if err = s.ctrIDIndex.Delete(containerID); err != nil { - logrus.Warnf("couldn't delete ctr id %s from idIndex", containerID) + if err2 := s.ctrIDIndex.Delete(id); err2 != nil { + logrus.Warnf("couldn't delete ctr id %s from idIndex", id) } } }() @@ -207,8 +223,9 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest g.AddAnnotation("ocid/log_path", logDir) g.AddAnnotation("ocid/name", name) g.AddAnnotation("ocid/container_type", containerTypeSandbox) + g.AddAnnotation("ocid/sandbox_id", id) g.AddAnnotation("ocid/container_name", containerName) - g.AddAnnotation("ocid/container_id", containerID) + g.AddAnnotation("ocid/container_id", id) g.AddAnnotation("ocid/shm_path", shmPath) sb := &sandbox{ @@ -246,11 +263,11 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest cgroupParent := req.GetConfig().GetLinux().GetCgroupParent() if cgroupParent != "" { if s.config.CgroupManager == "systemd" { - cgPath := sb.cgroupParent + ":" + "ocid" + ":" + containerID + cgPath := sb.cgroupParent + ":" + "ocid" + ":" + id g.SetLinuxCgroupsPath(cgPath) } else { - g.SetLinuxCgroupsPath(sb.cgroupParent + "/" + containerID) + g.SetLinuxCgroupsPath(sb.cgroupParent + "/" + id) } sb.cgroupParent = cgroupParent @@ -308,23 +325,21 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest } } - err = g.SaveToFile(filepath.Join(podSandboxDir, "config.json"), generate.ExportOptions{}) + saveOptions := generate.ExportOptions{} + mountPoint, err := s.storage.StartContainer(id) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to mount container %s in pod sandbox %s(%s): %v", containerName, sb.name, id, err) + } + g.SetRootPath(mountPoint) + err = g.SaveToFile(filepath.Join(podContainer.Dir, "config.json"), saveOptions) + if err != nil { + return nil, fmt.Errorf("failed to save template configuration for pod sandbox %s(%s): %v", sb.name, id, err) + } + if err = g.SaveToFile(filepath.Join(podContainer.RunDir, "config.json"), saveOptions); err != nil { + return nil, fmt.Errorf("failed to write runtime configuration for pod sandbox %s(%s): %v", sb.name, id, err) } - if _, err = os.Stat(podInfraRootfs); err != nil { - if os.IsNotExist(err) { - // TODO: Replace by rootfs creation API when it is ready - if err = utils.CreateInfraRootfs(podInfraRootfs, s.config.Pause); err != nil { - return nil, err - } - } else { - return nil, err - } - } - - container, err := oci.NewContainer(containerID, containerName, podSandboxDir, podSandboxDir, sb.netNs(), labels, annotations, nil, nil, id, false) + container, err := oci.NewContainer(id, containerName, podContainer.RunDir, logDir, sb.netNs(), labels, annotations, nil, nil, id, false) if err != nil { return nil, err } @@ -348,6 +363,19 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest return resp, nil } +func (s *Server) setPodSandboxMountLabel(id, mountLabel string) error { + storageMetadata, err := s.storage.GetContainerMetadata(id) + if err != nil { + return err + } + storageMetadata.SetMountLabel(mountLabel) + err = s.storage.SetContainerMetadata(id, storageMetadata) + if err != nil { + return err + } + return nil +} + func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mountLabel string, err error) { processLabel = "" if selinuxOptions != nil { @@ -375,8 +403,8 @@ func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mo return label.InitLabels(label.DupSecOpt(processLabel)) } -func setupShm(podSandboxDir, mountLabel string) (shmPath string, err error) { - shmPath = filepath.Join(podSandboxDir, "shm") +func setupShm(podSandboxRunDir, mountLabel string) (shmPath string, err error) { + shmPath = filepath.Join(podSandboxRunDir, "shm") if err = os.Mkdir(shmPath, 0700); err != nil { return "", err } diff --git a/server/sandbox_status.go b/server/sandbox_status.go index d3826c3a..7f087fcd 100644 --- a/server/sandbox_status.go +++ b/server/sandbox_status.go @@ -10,6 +10,7 @@ import ( // PodSandboxStatus returns the Status of the PodSandbox. func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) { logrus.Debugf("PodSandboxStatusRequest %+v", req) + s.Update() sb, err := s.getPodSandboxFromRequest(req) if err != nil { return nil, err diff --git a/server/sandbox_stop.go b/server/sandbox_stop.go index 47f570c2..fa615acd 100644 --- a/server/sandbox_stop.go +++ b/server/sandbox_stop.go @@ -14,6 +14,7 @@ import ( // sandbox, they should be force terminated. func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) { logrus.Debugf("StopPodSandboxRequest %+v", req) + s.Update() sb, err := s.getPodSandboxFromRequest(req) if err != nil { return nil, err @@ -50,7 +51,7 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque cStatus := s.runtime.ContainerStatus(c) if cStatus.Status != oci.ContainerStateStopped { if err := s.runtime.StopContainer(c); err != nil { - return nil, fmt.Errorf("failed to stop container %s in sandbox %s: %v", c.Name(), sb.id, err) + return nil, fmt.Errorf("failed to stop container %s in pod sandbox %s: %v", c.Name(), sb.id, err) } } } diff --git a/server/server.go b/server/server.go index b2e2cc3c..1800c1f0 100644 --- a/server/server.go +++ b/server/server.go @@ -5,14 +5,16 @@ import ( "fmt" "io/ioutil" "os" - "path/filepath" "sync" "syscall" "github.com/Sirupsen/logrus" + "github.com/containers/image/types" + sstorage "github.com/containers/storage/storage" "github.com/docker/docker/pkg/registrar" "github.com/docker/docker/pkg/truncindex" "github.com/kubernetes-incubator/cri-o/oci" + "github.com/kubernetes-incubator/cri-o/pkg/storage" "github.com/kubernetes-incubator/cri-o/server/apparmor" "github.com/kubernetes-incubator/cri-o/server/seccomp" "github.com/opencontainers/runc/libcontainer/label" @@ -29,6 +31,9 @@ const ( type Server struct { config Config runtime *oci.Runtime + store sstorage.Store + images storage.ImageServer + storage storage.RuntimeServer stateLock sync.Mutex state *serverState netPlugin ocicni.CNIPlugin @@ -36,6 +41,7 @@ type Server struct { podIDIndex *truncindex.TruncIndex ctrNameIndex *registrar.Registrar ctrIDIndex *truncindex.TruncIndex + imageContext *types.SystemContext seccompEnabled bool seccompProfile seccomp.Seccomp @@ -45,7 +51,7 @@ type Server struct { } func (s *Server) loadContainer(id string) error { - config, err := ioutil.ReadFile(filepath.Join(s.runtime.ContainerDir(), id, "config.json")) + config, err := s.store.GetFromContainerDirectory(id, "config.json") if err != nil { return err } @@ -76,7 +82,10 @@ func (s *Server) loadContainer(id string) error { if v := m.Annotations["ocid/tty"]; v == "true" { tty = true } - containerPath := filepath.Join(s.runtime.ContainerDir(), id) + containerPath, err := s.store.GetContainerRunDirectory(id) + if err != nil { + return err + } var img *pb.ImageSpec image, ok := m.Annotations["ocid/image"] @@ -122,7 +131,7 @@ func configNetNsPath(spec rspec.Spec) (string, error) { } func (s *Server) loadSandbox(id string) error { - config, err := ioutil.ReadFile(filepath.Join(s.config.SandboxDir, id, "config.json")) + config, err := s.store.GetFromContainerDirectory(id, "config.json") if err != nil { return err } @@ -184,7 +193,10 @@ func (s *Server) loadSandbox(id string) error { s.addSandbox(sb) - sandboxPath := filepath.Join(s.config.SandboxDir, id) + sandboxPath, err := s.store.GetContainerRunDirectory(id) + if err != nil { + return err + } if err = label.ReserveLabel(processLabel); err != nil { return err @@ -200,7 +212,7 @@ func (s *Server) loadSandbox(id string) error { } sb.infraContainer = scontainer if err = s.runtime.UpdateStatus(scontainer); err != nil { - logrus.Warnf("error updating status for container %s: %v", scontainer.ID(), err) + logrus.Warnf("error updating status for pod sandbox infra container %s: %v", scontainer.ID(), err) } if err = s.ctrIDIndex.Add(scontainer.ID()); err != nil { return err @@ -212,31 +224,138 @@ func (s *Server) loadSandbox(id string) error { } func (s *Server) restore() { - sandboxDir, err := ioutil.ReadDir(s.config.SandboxDir) + containers, err := s.store.Containers() if err != nil && !os.IsNotExist(err) { - logrus.Warnf("could not read sandbox directory %s: %v", sandboxDir, err) + logrus.Warnf("could not read containers and sandboxes: %v", err) } - for _, v := range sandboxDir { - if !v.IsDir() { + pods := map[string]*storage.RuntimeContainerMetadata{} + podContainers := map[string]*storage.RuntimeContainerMetadata{} + for _, container := range containers { + metadata, err2 := s.storage.GetContainerMetadata(container.ID) + if err2 != nil { + logrus.Warnf("error parsing metadata for %s: %v, ignoring", container.ID, err2) continue } - if err = s.loadSandbox(v.Name()); err != nil { - logrus.Warnf("could not restore sandbox %s: %v", v.Name(), err) + if metadata.Pod { + pods[container.ID] = &metadata + } else { + podContainers[container.ID] = &metadata } } - containerDir, err := ioutil.ReadDir(s.runtime.ContainerDir()) - if err != nil && !os.IsNotExist(err) { - logrus.Warnf("could not read container directory %s: %v", s.runtime.ContainerDir(), err) - } - for _, v := range containerDir { - if !v.IsDir() { - continue + for containerID, metadata := range pods { + if err = s.loadSandbox(containerID); err != nil { + logrus.Warnf("could not restore sandbox %s container %s: %v", metadata.PodID, containerID, err) } - if err := s.loadContainer(v.Name()); err != nil { - logrus.Warnf("could not restore container %s: %v", v.Name(), err) + } + for containerID := range podContainers { + if err := s.loadContainer(containerID); err != nil { + logrus.Warnf("could not restore container %s: %v", containerID, err) + } + } +} +// Update makes changes to the server's state (lists of pods and containers) to +// reflect the list of pods and containers that are stored on disk, possibly +// having been modified by other parties +func (s *Server) Update() { + logrus.Debugf("updating sandbox and container information") + if err := s.update(); err != nil { + logrus.Errorf("error updating sandbox and container information: %v", err) + } +} + +func (s *Server) update() error { + containers, err := s.store.Containers() + if err != nil && !os.IsNotExist(err) { + logrus.Warnf("could not read containers and sandboxes: %v", err) + return err + } + newPods := map[string]*storage.RuntimeContainerMetadata{} + oldPods := map[string]string{} + removedPods := map[string]string{} + newPodContainers := map[string]*storage.RuntimeContainerMetadata{} + oldPodContainers := map[string]string{} + removedPodContainers := map[string]string{} + for _, container := range containers { + if s.hasSandbox(container.ID) { + // FIXME: do we need to reload/update any info about the sandbox? + oldPods[container.ID] = container.ID + oldPodContainers[container.ID] = container.ID + continue + } + if s.getContainer(container.ID) != nil { + // FIXME: do we need to reload/update any info about the container? + oldPodContainers[container.ID] = container.ID + continue + } + // not previously known, so figure out what it is + metadata, err2 := s.storage.GetContainerMetadata(container.ID) + if err2 != nil { + logrus.Errorf("error parsing metadata for %s: %v, ignoring", container.ID, err2) + continue + } + if metadata.Pod { + newPods[container.ID] = &metadata + } else { + newPodContainers[container.ID] = &metadata } } + s.ctrIDIndex.Iterate(func(id string) { + if _, ok := oldPodContainers[id]; !ok { + // this container's ID wasn't in the updated list -> removed + removedPodContainers[id] = id + } + }) + for removedPodContainer := range removedPodContainers { + // forget this container + c := s.getContainer(removedPodContainer) + s.releaseContainerName(c.Name()) + s.removeContainer(c) + if err = s.ctrIDIndex.Delete(c.ID()); err != nil { + return err + } + logrus.Debugf("forgetting removed pod container %s", c.ID()) + } + s.podIDIndex.Iterate(func(id string) { + if _, ok := oldPods[id]; !ok { + // this pod's ID wasn't in the updated list -> removed + removedPods[id] = id + } + }) + for removedPod := range removedPods { + // forget this pod + sb := s.getSandbox(removedPod) + podInfraContainer := sb.infraContainer + s.releaseContainerName(podInfraContainer.Name()) + s.removeContainer(podInfraContainer) + if err = s.ctrIDIndex.Delete(podInfraContainer.ID()); err != nil { + return err + } + sb.infraContainer = nil + s.releasePodName(sb.name) + s.removeSandbox(sb.id) + if err = s.podIDIndex.Delete(sb.id); err != nil { + return err + } + logrus.Debugf("forgetting removed pod %s", sb.id) + } + for sandboxID := range newPods { + // load this pod + if err = s.loadSandbox(sandboxID); err != nil { + logrus.Warnf("could not load new pod sandbox %s: %v, ignoring", sandboxID, err) + } else { + logrus.Debugf("loaded new pod sandbox %s", sandboxID, err) + } + } + for containerID := range newPodContainers { + // load this container + if err = s.loadContainer(containerID); err != nil { + logrus.Warnf("could not load new sandbox container %s: %v, ignoring", containerID, err) + } else { + logrus.Debugf("loaded new pod container %s", containerID, err) + } + } + return nil } func (s *Server) reservePodName(id, name string) (string, error) { @@ -294,17 +413,35 @@ func seccompEnabled() bool { return enabled } +// Shutdown attempts to shut down the server's storage cleanly +func (s *Server) Shutdown() error { + _, err := s.store.Shutdown(false) + return err +} + // New creates a new Server with options provided func New(config *Config) (*Server, error) { - if err := os.MkdirAll(config.ImageDir, 0755); err != nil { + store, err := sstorage.GetStore(sstorage.StoreOptions{ + RunRoot: config.RunRoot, + GraphRoot: config.Root, + GraphDriverName: config.Storage, + GraphDriverOptions: config.StorageOptions, + }) + if err != nil { return nil, err } - if err := os.MkdirAll(config.SandboxDir, 0755); err != nil { + imageService, err := storage.GetImageService(store, config.DefaultTransport) + if err != nil { return nil, err } - r, err := oci.New(config.Runtime, config.ContainerDir, config.Conmon, config.ConmonEnv, config.CgroupManager) + storageRuntimeService := storage.GetRuntimeService(imageService) + if err != nil { + return nil, err + } + + r, err := oci.New(config.Runtime, config.Conmon, config.ConmonEnv, config.CgroupManager) if err != nil { return nil, err } @@ -316,6 +453,9 @@ func New(config *Config) (*Server, error) { } s := &Server{ runtime: r, + store: store, + images: imageService, + storage: storageRuntimeService, netPlugin: netPlugin, config: *config, state: &serverState{ @@ -346,6 +486,9 @@ func New(config *Config) (*Server, error) { s.podNameIndex = registrar.NewRegistrar() s.ctrIDIndex = truncindex.NewTruncIndex([]string{}) s.ctrNameIndex = registrar.NewRegistrar() + s.imageContext = &types.SystemContext{ + SignaturePolicyPath: config.ImageConfig.SignaturePolicyPath, + } s.restore() diff --git a/test/helpers.bash b/test/helpers.bash index 8819b77c..f705b85c 100644 --- a/test/helpers.bash +++ b/test/helpers.bash @@ -59,7 +59,7 @@ PATH=$PATH:$TESTDIR # Run ocid using the binary specified by $OCID_BINARY. # This must ONLY be run on engines created with `start_ocid`. function ocid() { - "$OCID_BINARY" "$@" + "$OCID_BINARY" --listen "$OCID_SOCKET" "$@" } # Run ocic using the binary specified by $OCID_BINARY. @@ -112,7 +112,7 @@ function start_ocid() { apparmor="$APPARMOR_PROFILE" fi - "$OCID_BINARY" --conmon "$CONMON_BINARY" --pause "$PAUSE_BINARY" --listen "$OCID_SOCKET" --runtime "$RUNC_BINARY" --root "$TESTDIR/ocid" --sandboxdir "$TESTDIR/sandboxes" --containerdir "$TESTDIR/ocid/containers" --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$OCID_CNI_CONFIG" config >$OCID_CONFIG + "$OCID_BINARY" --conmon "$CONMON_BINARY" --listen "$OCID_SOCKET" --runtime "$RUNC_BINARY" --root "$TESTDIR/ocid" --runroot "$TESTDIR/ocid-run" --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$OCID_CNI_CONFIG" --signature-policy "$INTEGRATION_ROOT"/policy.json config >$OCID_CONFIG "$OCID_BINARY" --debug --config "$OCID_CONFIG" & OCID_PID=$! wait_until_reachable } @@ -130,6 +130,18 @@ function cleanup_ctrs() { fi } +function cleanup_images() { + run ocic image list --quiet + if [ "$status" -eq 0 ]; then + if [ "$output" != "" ]; then + printf '%s\n' "$output" | while IFS= read -r line + do + ocic image remove --id "$line" + done + fi + fi +} + function cleanup_pods() { run ocic pod list --quiet if [ "$status" -eq 0 ]; then @@ -147,6 +159,7 @@ function cleanup_pods() { function stop_ocid() { if [ "$OCID_PID" != "" ]; then kill "$OCID_PID" >/dev/null 2>&1 + wait "$OCID_PID" rm -f "$OCID_CONFIG" fi } diff --git a/test/policy.json b/test/policy.json new file mode 100644 index 00000000..bb26e57f --- /dev/null +++ b/test/policy.json @@ -0,0 +1,7 @@ +{ + "default": [ + { + "type": "insecureAcceptAnything" + } + ] +} diff --git a/utils/utils.go b/utils/utils.go index 5323d290..0db64aaf 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -4,13 +4,9 @@ import ( "bytes" "fmt" "io" - "os" "os/exec" - "path/filepath" "strings" "syscall" - - "github.com/Sirupsen/logrus" ) // ExecCmd executes a command with args and returns its output as a string along @@ -54,74 +50,7 @@ func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { return } -// CreateFakeRootfs creates a fake rootfs for test. -func CreateFakeRootfs(dir string, image string) error { - if len(image) <= 9 || image[:9] != "docker://" { - return fmt.Errorf("CreateFakeRootfs only support docker images currently") - } - - rootfs := filepath.Join(dir, "rootfs") - if err := os.MkdirAll(rootfs, 0755); err != nil { - return err - } - - // docker export $(docker create image[9:]) | tar -C rootfs -xf - - return dockerExport(image[9:], rootfs) -} - -// CreateInfraRootfs creates a rootfs similar to CreateFakeRootfs, but only -// copies a single binary from the host into the rootfs. This is all done -// without Docker, and is only used currently for the pause container which is -// required for all sandboxes. -func CreateInfraRootfs(dir string, src string) error { - rootfs := filepath.Join(dir, "rootfs") - if err := os.MkdirAll(rootfs, 0755); err != nil { - return err - } - - dest := filepath.Join(rootfs, filepath.Base(src)) - logrus.Debugf("copying infra rootfs binary: %v -> %v", src, dest) - - in, err := os.OpenFile(src, os.O_RDONLY, 0755) - if err != nil { - return err - } - defer in.Close() - - out, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0755) - if err != nil { - return err - } - defer out.Close() - - if _, err := io.Copy(out, in); err != nil { - return err - } - - return out.Sync() -} - -func dockerExport(image string, rootfs string) error { - out, err := ExecCmd("docker", "create", image) - if err != nil { - return err - } - - container := out[:strings.Index(out, "\n")] - - cmd := fmt.Sprintf("docker export %s | tar -C %s -xf -", container, rootfs) - if _, err := ExecCmd("/bin/bash", "-c", cmd); err != nil { - err1 := dockerRemove(container) - if err1 == nil { - return err - } - return fmt.Errorf("%v; %v", err, err1) - } - - return dockerRemove(container) -} - -func dockerRemove(container string) error { - _, err := ExecCmd("docker", "rm", container) - return err +// StatusToExitCode converts wait status code to an exit code +func StatusToExitCode(status int) int { + return ((status) & 0xff00) >> 8 }