Merge pull request #30 from runcom/restore-sandboxes

sandboxes restore on server start
This commit is contained in:
Mrunal Patel 2016-09-19 11:10:26 -07:00 committed by GitHub
commit fd265ef527
16 changed files with 598 additions and 482 deletions

13
.tool/check-license Executable file
View file

@ -0,0 +1,13 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
ret=0
for file in $(find . -type f -iname '*.go' ! -path './vendor/*'); do
(head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)") || (echo "${file}:missing license header" && ret=1)
done
exit $ret

24
.tool/lint Executable file
View file

@ -0,0 +1,24 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
if [ ! $(command -v gometalinter) ]; then
go get -u github.com/alecthomas/gometalinter
gometalinter --update --install
fi
for d in $(find . -type d -not -iwholename '*.git*' -a -not -iname '.tool' -a -not -iwholename '*vendor*'); do
gometalinter \
--exclude='error return value not checked.*(Close|Log|Print).*\(errcheck\)$' \
--exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$' \
--exclude='duplicate of.*_test.go.*\(dupl\)$' \
--exclude='duplicate of.*main.go.*\(dupl\)$' \
--disable=aligncheck \
--disable=gotype \
--disable=gas \
--cyclo-over=50 \
--tests \
--deadline=10s "${d}"
done

View file

@ -1,14 +1,22 @@
language: go
go:
- 1.6
- 1.7
sudo: false
sudo: required
before_script:
- export PATH=$HOME/gopath/bin:$PATH
before_install:
- go get github.com/vbatts/git-validation
- make install.tools
- go get -u github.com/alecthomas/gometalinter
- gometalinter --install --update
- go get -t -d ./...
install: true
script:
- $HOME/gopath/bin/git-validation -run DCO,short-subject -v -range ${TRAVIS_COMMIT_RANGE}
- make .gitvalidation
- make lint
- make

View file

@ -1,6 +1,18 @@
.PHONY: all clean conmon ocid ocic update-deps
EPOCH_TEST_COMMIT ?= 7fc874e05e74faa81e7c423b6514fc5c474c6b34
all: conmon ocid ocic
default: help
help:
@echo "Usage: make <target>"
@echo
@echo " * 'binaries' - Build ocid, conmon andocic"
@echo " * 'clean' - Clean artifacts"
@echo " * 'lint' - Execute the source code linter"
@echo " * 'update-deps' - Update vendored dependencies"
lint:
@echo "checking lint"
@./.tool/lint
conmon:
make -C $@
@ -13,6 +25,7 @@ ocic:
clean:
rm -f ocic ocid
rm -f conmon/conmon.o conmon/conmon
update-deps:
@which glide > /dev/null 2>/dev/null || (echo "ERROR: glide not found." && false)
@ -20,3 +33,41 @@ update-deps:
glide-vc --only-code --no-tests
# see http://sed.sourceforge.net/sed1line.txt
find vendor -type f -exec sed -i -e :a -e '/^\n*$$/{$$d;N;ba' -e '}' "{}" \;
binaries: ocid ocic conmon
.PHONY: .gitvalidation
#
# When this is running in travis, it will only check the travis commit range
.gitvalidation:
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found. Consider 'make install.tools' target" && false)
ifeq ($(TRAVIS),true)
git-validation -q -run DCO,short-subject,dangling-whitespace
else
git-validation -v -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..HEAD
endif
.PHONY: install.tools
install.tools: .install.gitvalidation .install.glide .install.glide-vc .install.gometalinter
.install.gitvalidation:
go get github.com/vbatts/git-validation
.install.glide:
go get github.com/Masterminds/glide
.install.glide-vc:
go get github.com/sgotti/glide-vc
.install.gometalinter:
go get github.com/alecthomas/gometalinter
gometalinter --install --update
.PHONY: \
binaries \
conmon \
ocid \
ocic \
clean \
lint

View file

@ -250,12 +250,10 @@ func main() {
}
}
func PullImage(client pb.ImageServiceClient, image string) error {
_, err := client.PullImage(context.Background(), &pb.PullImageRequest{Image: &pb.ImageSpec{Image: &image}})
if err != nil {
return err
}
return nil
// PullImage sends a PullImageRequest to the server, and parses
// the returned ContainerStatusResponse.
func PullImage(client pb.ImageServiceClient, image string) (*pb.PullImageResponse, error) {
return client.PullImage(context.Background(), &pb.PullImageRequest{Image: &pb.ImageSpec{Image: &image}})
}
// try this with ./ocic pullimage docker://busybox
@ -271,7 +269,7 @@ var pullImageCommand = cli.Command{
defer conn.Close()
client := pb.NewImageServiceClient(conn)
err = PullImage(client, context.Args().Get(0))
_, err = PullImage(client, context.Args().Get(0))
if err != nil {
return fmt.Errorf("pulling image failed: %v", err)
}

View file

@ -2,7 +2,6 @@ package main
import (
"fmt"
"log"
"net"
"os"
@ -80,12 +79,12 @@ func main() {
// Remove the socket if it already exists
if _, err := os.Stat(unixDomainSocket); err == nil {
if err := os.Remove(unixDomainSocket); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
lis, err := net.Listen("unix", unixDomainSocket)
if err != nil {
log.Fatalf("failed to listen: %v", err)
logrus.Fatalf("failed to listen: %v", err)
}
s := grpc.NewServer()
@ -94,16 +93,18 @@ func main() {
sandboxDir := c.String("sandboxdir")
service, err := server.New(c.String("runtime"), sandboxDir, containerDir)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
runtime.RegisterRuntimeServiceServer(s, service)
runtime.RegisterImageServiceServer(s, service)
s.Serve(lis)
if err := s.Serve(lis); err != nil {
logrus.Fatal(err)
}
return nil
}
if err := app.Run(os.Args); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}

View file

@ -38,7 +38,7 @@ type Runtime struct {
// syncInfo is used to return data from monitor process to daemon
type syncInfo struct {
Pid int `"json:pid"`
Pid int `json:"pid"`
}
// Name returns the name of the OCI Runtime
@ -141,7 +141,7 @@ func (r *Runtime) DeleteContainer(c *Container) error {
return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.path, "delete", c.name)
}
// updateStatus refreshes the status of the container.
// UpdateStatus refreshes the status of the container.
func (r *Runtime) UpdateStatus(c *Container) error {
c.stateLock.Lock()
defer c.stateLock.Unlock()
@ -161,7 +161,7 @@ func (r *Runtime) UpdateStatus(c *Container) error {
return fmt.Errorf("failed to find container exit file: %v", err)
}
st := fi.Sys().(*syscall.Stat_t)
c.state.Finished = time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
c.state.Finished = time.Unix(st.Ctim.Sec, st.Ctim.Nsec)
statusCodeStr, err := ioutil.ReadFile(exitFilePath)
if err != nil {
@ -196,7 +196,7 @@ type Container struct {
stateLock sync.Mutex
}
// ContainerStatus represents the status of a container.
// ContainerState represents the status of a container.
type ContainerState struct {
specs.State
Created time.Time `json:"created"`

View file

@ -1,16 +0,0 @@
package server
const (
// According to http://man7.org/linux/man-pages/man5/resolv.conf.5.html:
// "The search list is currently limited to six domains with a total of 256 characters."
maxDNSSearches = 6
)
const (
// by default, cpu.cfs_period_us is set to be 1000000 (i.e., 1s).
defaultCPUCFSPeriod = 1000000
// the upper limit of cpu.cfs_quota_us is 1000000.
maxCPUCFSQuota = 1000000
// the lower limit of cpu.cfs_quota_us is 1000.
minCPUCFSQuota = 1000
)

View file

@ -13,313 +13,12 @@ import (
"golang.org/x/net/context"
)
// Version returns the runtime name, runtime version and runtime API version
func (s *Server) Version(ctx context.Context, req *pb.VersionRequest) (*pb.VersionResponse, error) {
version, err := getGPRCVersion()
if err != nil {
return nil, err
}
runtimeVersion, err := s.runtime.Version()
if err != nil {
return nil, err
}
// taking const address
rav := runtimeAPIVersion
runtimeName := s.runtime.Name()
return &pb.VersionResponse{
Version: &version,
RuntimeName: &runtimeName,
RuntimeVersion: &runtimeVersion,
RuntimeApiVersion: &rav,
}, nil
}
// CreatePodSandbox creates a pod-level sandbox.
// The definition of PodSandbox is at https://github.com/kubernetes/kubernetes/pull/25899
func (s *Server) CreatePodSandbox(ctx context.Context, req *pb.CreatePodSandboxRequest) (*pb.CreatePodSandboxResponse, error) {
var err error
if err := os.MkdirAll(s.sandboxDir, 0755); err != nil {
return nil, err
}
// process req.Name
name := req.GetConfig().GetMetadata().GetName()
if name == "" {
return nil, fmt.Errorf("PodSandboxConfig.Name should not be empty")
}
podSandboxDir := filepath.Join(s.sandboxDir, name)
if _, err := os.Stat(podSandboxDir); err == nil {
return nil, fmt.Errorf("pod sandbox (%s) already exists", podSandboxDir)
}
if err := os.MkdirAll(podSandboxDir, 0755); err != nil {
return nil, err
}
defer func() {
if err != nil {
os.RemoveAll(podSandboxDir)
}
}()
// creates a spec Generator with the default spec.
g := generate.New()
// setup defaults for the pod sandbox
g.SetRootPath("/var/lib/ocid/graph/vfs/pause")
g.SetRootReadonly(true)
g.SetProcessArgs([]string{"/pause"})
// process req.Hostname
hostname := req.GetConfig().GetHostname()
if hostname != "" {
g.SetHostname(hostname)
}
// process req.LogDirectory
logDir := req.GetConfig().GetLogDirectory()
if logDir == "" {
logDir = fmt.Sprintf("/var/log/ocid/pods/%s", name)
}
dnsServers := req.GetConfig().GetDnsOptions().GetServers()
dnsSearches := req.GetConfig().GetDnsOptions().GetSearches()
resolvPath := fmt.Sprintf("%s/resolv.conf", podSandboxDir)
err = parseDNSOptions(dnsServers, dnsSearches, resolvPath)
if err != nil {
err1 := removeFile(resolvPath)
if err1 != nil {
err = err1
return nil, fmt.Errorf("%v; failed to remove %s: %v", err, resolvPath, err1)
}
return nil, err
}
g.AddBindMount(resolvPath, "/etc/resolv.conf", "ro")
labels := req.GetConfig().GetLabels()
s.addSandbox(&sandbox{
name: name,
logDir: logDir,
labels: labels,
containers: oci.NewMemoryStore(),
})
annotations := req.GetConfig().GetAnnotations()
for k, v := range annotations {
g.AddAnnotation(k, v)
}
cgroupParent := req.GetConfig().GetLinux().GetCgroupParent()
if cgroupParent != "" {
g.SetLinuxCgroupsPath(cgroupParent)
}
// set up namespaces
if req.GetConfig().GetLinux().GetNamespaceOptions().GetHostNetwork() {
err = g.RemoveLinuxNamespace("network")
if err != nil {
return nil, err
}
}
if req.GetConfig().GetLinux().GetNamespaceOptions().GetHostPid() {
err = g.RemoveLinuxNamespace("pid")
if err != nil {
return nil, err
}
}
if req.GetConfig().GetLinux().GetNamespaceOptions().GetHostIpc() {
err = g.RemoveLinuxNamespace("ipc")
if err != nil {
return nil, err
}
}
err = g.SaveToFile(filepath.Join(podSandboxDir, "config.json"))
if err != nil {
return nil, err
}
containerName := name + "-infra"
container, err := oci.NewContainer(containerName, podSandboxDir, podSandboxDir, labels, name, false)
if err != nil {
return nil, err
}
if err = s.runtime.CreateContainer(container); err != nil {
return nil, err
}
if err = s.runtime.UpdateStatus(container); err != nil {
return nil, err
}
// Setup the network
podNamespace := ""
netnsPath, err := container.NetNsPath()
if err != nil {
return nil, err
}
if err = s.netPlugin.SetUpPod(netnsPath, podNamespace, name, containerName); err != nil {
return nil, fmt.Errorf("failed to create network for container %s in sandbox %s: %v", containerName, name, err)
}
if err = s.runtime.StartContainer(container); err != nil {
return nil, err
}
s.addContainer(container)
if err = s.runtime.UpdateStatus(container); err != nil {
return nil, err
}
return &pb.CreatePodSandboxResponse{PodSandboxId: &name}, nil
}
// StopPodSandbox stops the sandbox. If there are any running containers in the
// sandbox, they should be force terminated.
func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) {
sbName := req.PodSandboxId
if *sbName == "" {
return nil, fmt.Errorf("PodSandboxId should not be empty")
}
sb := s.getSandbox(*sbName)
if sb == nil {
return nil, fmt.Errorf("specified sandbox not found: %s", *sbName)
}
podInfraContainer := *sbName + "-infra"
containersList := sb.containers.List()
for _, c := range containersList {
if podInfraContainer == c.Name() {
podNamespace := ""
netnsPath, err := c.NetNsPath()
if err != nil {
return nil, err
}
if err := s.netPlugin.TearDownPod(netnsPath, podNamespace, *sbName, podInfraContainer); err != nil {
return nil, fmt.Errorf("failed to destroy network for container %s in sandbox %s: %v", c.Name(), *sbName, err)
}
}
if err := s.runtime.StopContainer(c); err != nil {
return nil, fmt.Errorf("failed to stop container %s in sandbox %s: %v", c.Name(), *sbName, err)
}
}
return &pb.StopPodSandboxResponse{}, nil
}
// RemovePodSandbox deletes the sandbox. If there are any running containers in the
// sandbox, they should be force deleted.
func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) {
sbName := req.PodSandboxId
if *sbName == "" {
return nil, fmt.Errorf("PodSandboxId should not be empty")
}
sb := s.getSandbox(*sbName)
if sb == nil {
return nil, fmt.Errorf("specified sandbox not found: %s", *sbName)
}
podInfraContainer := *sbName + "-infra"
// Delete all the containers in the sandbox
containersList := sb.containers.List()
for _, c := range containersList {
if err := s.runtime.DeleteContainer(c); err != nil {
return nil, fmt.Errorf("failed to delete container %s in sandbox %s: %v", c.Name(), *sbName, err)
}
if podInfraContainer == c.Name() {
continue
}
containerDir := filepath.Join(s.runtime.ContainerDir(), c.Name())
if err := os.RemoveAll(containerDir); err != nil {
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.Name(), err)
}
}
// Remove the files related to the sandbox
podSandboxDir := filepath.Join(s.sandboxDir, *sbName)
if err := os.RemoveAll(podSandboxDir); err != nil {
return nil, fmt.Errorf("failed to remove sandbox %s directory: %v", *sbName, err)
}
return &pb.RemovePodSandboxResponse{}, nil
}
func int64Ptr(i int64) *int64 {
return &i
}
func int32Ptr(i int32) *int32 {
return &i
}
func sPtr(s string) *string {
return &s
}
// PodSandboxStatus returns the Status of the PodSandbox.
func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) {
sbName := req.PodSandboxId
if *sbName == "" {
return nil, fmt.Errorf("PodSandboxId should not be empty")
}
sb := s.getSandbox(*sbName)
if sb == nil {
return nil, fmt.Errorf("specified sandbox not found: %s", *sbName)
}
podInfraContainerName := *sbName + "-infra"
podInfraContainer := sb.getContainer(podInfraContainerName)
cState := s.runtime.ContainerStatus(podInfraContainer)
created := cState.Created.Unix()
netNsPath, err := podInfraContainer.NetNsPath()
if err != nil {
return nil, err
}
podNamespace := ""
ip, err := s.netPlugin.GetContainerNetworkStatus(netNsPath, podNamespace, *sbName, podInfraContainerName)
if err != nil {
// ignore the error on network status
ip = ""
}
return &pb.PodSandboxStatusResponse{
Status: &pb.PodSandboxStatus{
Id: sbName,
CreatedAt: int64Ptr(created),
Linux: &pb.LinuxPodSandboxStatus{
Namespaces: &pb.Namespace{
Network: sPtr(netNsPath),
},
},
Network: &pb.PodSandboxNetworkStatus{Ip: &ip},
},
}, nil
}
// ListPodSandbox returns a list of SandBox.
func (s *Server) ListPodSandbox(context.Context, *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) {
return nil, nil
}
// CreateContainer creates a new container in specified PodSandbox
func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (*pb.CreateContainerResponse, error) {
// The id of the PodSandbox
podSandboxId := req.GetPodSandboxId()
if !s.hasSandbox(podSandboxId) {
return nil, fmt.Errorf("the pod sandbox (%s) does not exist", podSandboxId)
podSandboxID := req.GetPodSandboxId()
if !s.hasSandbox(podSandboxID) {
return nil, fmt.Errorf("the pod sandbox (%s) does not exist", podSandboxID)
}
// The config of the container
@ -344,16 +43,27 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq
return nil, err
}
imageSpec := containerConfig.GetImage()
if imageSpec == nil {
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image is nil")
container, err := s.createSandboxContainer(name, podSandboxID, req.GetSandboxConfig(), containerDir, containerConfig)
if err != nil {
return nil, err
}
image := imageSpec.GetImage()
if image == "" {
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image.Image is empty")
if err := s.runtime.CreateContainer(container); err != nil {
return nil, err
}
if err := s.runtime.UpdateStatus(container); err != nil {
return nil, err
}
s.addContainer(container)
return &pb.CreateContainerResponse{
ContainerId: &name,
}, nil
}
func (s *Server) createSandboxContainer(name, podSandboxID string, SandboxConfig *pb.PodSandboxConfig, containerDir string, containerConfig *pb.ContainerConfig) (*oci.Container, error) {
// creates a spec Generator with the default spec.
specgen := generate.New()
@ -404,8 +114,7 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq
}
//TODO(hmeng): how to use this info? Do we need to handle relabel a FS with Selinux?
selinuxRelabel := mount.GetSelinuxRelabel()
fmt.Printf("selinuxRelabel: %v\n", selinuxRelabel)
//selinuxRelabel := mount.GetSelinuxRelabel()
specgen.AddBindMount(src, dest, options)
@ -420,6 +129,8 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq
}
}
specgen.AddAnnotation("pod_sandbox_id", podSandboxID)
if containerConfig.GetPrivileged() {
specgen.SetupPrivileged(true)
}
@ -525,12 +236,8 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq
}
}
// The config of the PodSandbox
sandboxConfig := req.GetSandboxConfig()
fmt.Printf("sandboxConfig: %v\n", sandboxConfig)
// Join the namespace paths for the pod sandbox container.
podContainerName := podSandboxId + "-infra"
podContainerName := podSandboxID + "-infra"
podInfraContainer := s.state.containers.Get(podContainerName)
podInfraState := s.runtime.ContainerStatus(podInfraContainer)
@ -542,37 +249,37 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq
"network": "net",
} {
nsPath := fmt.Sprintf("/proc/%d/ns/%s", podInfraState.Pid, nsFile)
specgen.AddOrReplaceLinuxNamespace(nsType, nsPath)
if err := specgen.AddOrReplaceLinuxNamespace(nsType, nsPath); err != nil {
return nil, err
}
}
if err := specgen.SaveToFile(filepath.Join(containerDir, "config.json")); err != nil {
return nil, err
}
imageSpec := containerConfig.GetImage()
if imageSpec == nil {
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image is nil")
}
image := imageSpec.GetImage()
if image == "" {
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image.Image is empty")
}
// TODO: copy the rootfs into the bundle.
// Currently, utils.CreateFakeRootfs is used to populate the rootfs.
if err := utils.CreateFakeRootfs(containerDir, image); err != nil {
return nil, err
}
container, err := oci.NewContainer(name, containerDir, logPath, labels, podSandboxId, containerConfig.GetTty())
container, err := oci.NewContainer(name, containerDir, logPath, labels, podSandboxID, containerConfig.GetTty())
if err != nil {
return nil, err
}
if err := s.runtime.CreateContainer(container); err != nil {
return nil, err
}
if err := s.runtime.UpdateStatus(container); err != nil {
return nil, err
}
s.addContainer(container)
return &pb.CreateContainerResponse{
ContainerId: &name,
}, nil
return container, nil
}
// StartContainer starts the container.

View file

@ -2,6 +2,7 @@ package server
import (
"errors"
"io"
"os"
"path/filepath"
@ -54,7 +55,7 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P
return nil, err
}
if err := os.Mkdir(filepath.Join(imageStore, tr.StringWithinTransport()), 0755); err != nil {
if err = os.Mkdir(filepath.Join(imageStore, tr.StringWithinTransport()), 0755); err != nil {
return nil, err
}
dir, err := directory.NewReference(filepath.Join(imageStore, tr.StringWithinTransport()))
@ -69,11 +70,12 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P
// save blobs (layer + config for docker v2s2, layers only for docker v2s1 [the config is in the manifest])
for _, b := range blobs {
// TODO(runcom,nalin): we need do-then-commit to later purge on error
r, _, err := src.GetBlob(b)
var r io.ReadCloser
r, _, err = src.GetBlob(b)
if err != nil {
return nil, err
}
if _, _, err := dest.PutBlob(r, b, -1); err != nil {
if _, _, err = dest.PutBlob(r, b, -1); err != nil {
r.Close()
return nil, err
}

319
server/sandbox.go Normal file
View file

@ -0,0 +1,319 @@
package server
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/Sirupsen/logrus"
"github.com/kubernetes-incubator/ocid/oci"
pb "github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"github.com/opencontainers/ocitools/generate"
"golang.org/x/net/context"
)
type sandbox struct {
name string
logDir string
labels map[string]string
containers oci.Store
}
type metadata struct {
LogDir string `json:"log_dir"`
ContainerName string `json:"container_name"`
Labels map[string]string `json:"labels"`
}
func (s *sandbox) addContainer(c *oci.Container) {
s.containers.Add(c.Name(), c)
}
func (s *sandbox) getContainer(name string) *oci.Container {
return s.containers.Get(name)
}
func (s *sandbox) removeContainer(c *oci.Container) {
s.containers.Delete(c.Name())
}
// CreatePodSandbox creates a pod-level sandbox.
// The definition of PodSandbox is at https://github.com/kubernetes/kubernetes/pull/25899
func (s *Server) CreatePodSandbox(ctx context.Context, req *pb.CreatePodSandboxRequest) (*pb.CreatePodSandboxResponse, error) {
// process req.Name
name := req.GetConfig().GetMetadata().GetName()
if name == "" {
return nil, fmt.Errorf("PodSandboxConfig.Name should not be empty")
}
podSandboxDir := filepath.Join(s.sandboxDir, name)
if _, err := os.Stat(podSandboxDir); err == nil {
return nil, fmt.Errorf("pod sandbox (%s) already exists", podSandboxDir)
}
if err := os.MkdirAll(podSandboxDir, 0755); err != nil {
return nil, err
}
var err error
defer func() {
if err != nil {
if err2 := os.RemoveAll(podSandboxDir); err2 != nil {
logrus.Warnf("couldn't cleanup podSandboxDir %s: %v", podSandboxDir, err2)
}
}
}()
// creates a spec Generator with the default spec.
g := generate.New()
// setup defaults for the pod sandbox
g.SetRootPath("/var/lib/ocid/graph/vfs/pause")
g.SetRootReadonly(true)
g.SetProcessArgs([]string{"/pause"})
// process req.Hostname
hostname := req.GetConfig().GetHostname()
if hostname != "" {
g.SetHostname(hostname)
}
// process req.LogDirectory
logDir := req.GetConfig().GetLogDirectory()
if logDir == "" {
logDir = fmt.Sprintf("/var/log/ocid/pods/%s", name)
}
dnsServers := req.GetConfig().GetDnsOptions().GetServers()
dnsSearches := req.GetConfig().GetDnsOptions().GetSearches()
resolvPath := fmt.Sprintf("%s/resolv.conf", podSandboxDir)
err = parseDNSOptions(dnsServers, dnsSearches, resolvPath)
if err != nil {
err1 := removeFile(resolvPath)
if err1 != nil {
err = err1
return nil, fmt.Errorf("%v; failed to remove %s: %v", err, resolvPath, err1)
}
return nil, err
}
g.AddBindMount(resolvPath, "/etc/resolv.conf", "ro")
labels := req.GetConfig().GetLabels()
s.addSandbox(&sandbox{
name: name,
logDir: logDir,
labels: labels,
containers: oci.NewMemoryStore(),
})
annotations := req.GetConfig().GetAnnotations()
for k, v := range annotations {
g.AddAnnotation(k, v)
}
cgroupParent := req.GetConfig().GetLinux().GetCgroupParent()
if cgroupParent != "" {
g.SetLinuxCgroupsPath(cgroupParent)
}
// set up namespaces
if req.GetConfig().GetLinux().GetNamespaceOptions().GetHostNetwork() {
err = g.RemoveLinuxNamespace("network")
if err != nil {
return nil, err
}
}
if req.GetConfig().GetLinux().GetNamespaceOptions().GetHostPid() {
err = g.RemoveLinuxNamespace("pid")
if err != nil {
return nil, err
}
}
if req.GetConfig().GetLinux().GetNamespaceOptions().GetHostIpc() {
err = g.RemoveLinuxNamespace("ipc")
if err != nil {
return nil, err
}
}
err = g.SaveToFile(filepath.Join(podSandboxDir, "config.json"))
if err != nil {
return nil, err
}
containerName := name + "-infra"
container, err := oci.NewContainer(containerName, podSandboxDir, podSandboxDir, labels, name, false)
if err != nil {
return nil, err
}
if err = s.runtime.CreateContainer(container); err != nil {
return nil, err
}
if err = s.runtime.UpdateStatus(container); err != nil {
return nil, err
}
// Setup the network
podNamespace := ""
netnsPath, err := container.NetNsPath()
if err != nil {
return nil, err
}
if err = s.netPlugin.SetUpPod(netnsPath, podNamespace, name, containerName); err != nil {
return nil, fmt.Errorf("failed to create network for container %s in sandbox %s: %v", containerName, name, err)
}
if err = s.runtime.StartContainer(container); err != nil {
return nil, err
}
s.addContainer(container)
meta := &metadata{
LogDir: logDir,
ContainerName: containerName,
Labels: labels,
}
b, err := json.Marshal(meta)
if err != nil {
return nil, err
}
// TODO: eventually we would track all containers in this pod so on server start
// we can repopulate the structs in memory properly...
// e.g. each container can write itself in podSandboxDir
err = ioutil.WriteFile(filepath.Join(podSandboxDir, "metadata.json"), b, 0644)
if err != nil {
return nil, err
}
if err = s.runtime.UpdateStatus(container); err != nil {
return nil, err
}
return &pb.CreatePodSandboxResponse{PodSandboxId: &name}, nil
}
// StopPodSandbox stops the sandbox. If there are any running containers in the
// sandbox, they should be force terminated.
func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) {
sbName := req.PodSandboxId
if *sbName == "" {
return nil, fmt.Errorf("PodSandboxId should not be empty")
}
sb := s.getSandbox(*sbName)
if sb == nil {
return nil, fmt.Errorf("specified sandbox not found: %s", *sbName)
}
podInfraContainer := *sbName + "-infra"
for _, c := range sb.containers.List() {
if podInfraContainer == c.Name() {
podNamespace := ""
netnsPath, err := c.NetNsPath()
if err != nil {
return nil, err
}
if err := s.netPlugin.TearDownPod(netnsPath, podNamespace, *sbName, podInfraContainer); err != nil {
return nil, fmt.Errorf("failed to destroy network for container %s in sandbox %s: %v", c.Name(), *sbName, err)
}
}
if err := s.runtime.StopContainer(c); err != nil {
return nil, fmt.Errorf("failed to stop container %s in sandbox %s: %v", c.Name(), *sbName, err)
}
}
return &pb.StopPodSandboxResponse{}, nil
}
// RemovePodSandbox deletes the sandbox. If there are any running containers in the
// sandbox, they should be force deleted.
func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) {
sbName := req.PodSandboxId
if *sbName == "" {
return nil, fmt.Errorf("PodSandboxId should not be empty")
}
sb := s.getSandbox(*sbName)
if sb == nil {
return nil, fmt.Errorf("specified sandbox not found: %s", *sbName)
}
podInfraContainer := *sbName + "-infra"
// Delete all the containers in the sandbox
for _, c := range sb.containers.List() {
if err := s.runtime.DeleteContainer(c); err != nil {
return nil, fmt.Errorf("failed to delete container %s in sandbox %s: %v", c.Name(), *sbName, err)
}
if podInfraContainer == c.Name() {
continue
}
containerDir := filepath.Join(s.runtime.ContainerDir(), c.Name())
if err := os.RemoveAll(containerDir); err != nil {
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.Name(), err)
}
}
// Remove the files related to the sandbox
podSandboxDir := filepath.Join(s.sandboxDir, *sbName)
if err := os.RemoveAll(podSandboxDir); err != nil {
return nil, fmt.Errorf("failed to remove sandbox %s directory: %v", *sbName, err)
}
return &pb.RemovePodSandboxResponse{}, nil
}
// PodSandboxStatus returns the Status of the PodSandbox.
func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) {
sbName := req.PodSandboxId
if *sbName == "" {
return nil, fmt.Errorf("PodSandboxId should not be empty")
}
sb := s.getSandbox(*sbName)
if sb == nil {
return nil, fmt.Errorf("specified sandbox not found: %s", *sbName)
}
podInfraContainerName := *sbName + "-infra"
podInfraContainer := sb.getContainer(podInfraContainerName)
cState := s.runtime.ContainerStatus(podInfraContainer)
created := cState.Created.Unix()
netNsPath, err := podInfraContainer.NetNsPath()
if err != nil {
return nil, err
}
podNamespace := ""
ip, err := s.netPlugin.GetContainerNetworkStatus(netNsPath, podNamespace, *sbName, podInfraContainerName)
if err != nil {
// ignore the error on network status
ip = ""
}
return &pb.PodSandboxStatusResponse{
Status: &pb.PodSandboxStatus{
Id: sbName,
CreatedAt: int64Ptr(created),
Linux: &pb.LinuxPodSandboxStatus{
Namespaces: &pb.Namespace{
Network: sPtr(netNsPath),
},
},
Network: &pb.PodSandboxNetworkStatus{Ip: &ip},
},
}, nil
}
// ListPodSandbox returns a list of SandBox.
func (s *Server) ListPodSandbox(context.Context, *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) {
return nil, nil
}

View file

@ -1,10 +1,14 @@
package server
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/Sirupsen/logrus"
"github.com/kubernetes-incubator/ocid/oci"
"github.com/kubernetes-incubator/ocid/utils"
"github.com/rajatchopra/ocicni"
@ -24,6 +28,46 @@ type Server struct {
netPlugin ocicni.CNIPlugin
}
func (s *Server) loadSandbox(id string) error {
metaJSON, err := ioutil.ReadFile(filepath.Join(s.sandboxDir, id, "metadata.json"))
if err != nil {
return err
}
var m metadata
if err = json.Unmarshal(metaJSON, &m); err != nil {
return err
}
s.addSandbox(&sandbox{
name: id,
logDir: m.LogDir,
labels: m.Labels,
containers: oci.NewMemoryStore(),
})
sandboxPath := filepath.Join(s.sandboxDir, id)
scontainer, err := oci.NewContainer(m.ContainerName, sandboxPath, sandboxPath, m.Labels, id, false)
if err != nil {
return err
}
s.addContainer(scontainer)
if err = s.runtime.UpdateStatus(scontainer); err != nil {
logrus.Warnf("error updating status for container %s: %v", scontainer, err)
}
return nil
}
func (s *Server) restore() error {
dir, err := ioutil.ReadDir(s.sandboxDir)
if err != nil {
return err
}
for _, v := range dir {
if err := s.loadSandbox(v.Name()); err != nil {
return err
}
}
return nil
}
// New creates a new Server with options provided
func New(runtimePath, sandboxDir, containerDir string) (*Server, error) {
// TODO: This will go away later when we have wrapper process or systemd acting as
@ -38,6 +82,10 @@ func New(runtimePath, sandboxDir, containerDir string) (*Server, error) {
return nil, err
}
if err := os.MkdirAll(sandboxDir, 0755); err != nil {
return nil, err
}
r, err := oci.New(runtimePath, containerDir)
if err != nil {
return nil, err
@ -48,7 +96,7 @@ func New(runtimePath, sandboxDir, containerDir string) (*Server, error) {
if err != nil {
return nil, err
}
return &Server{
s := &Server{
runtime: r,
netPlugin: netPlugin,
sandboxDir: sandboxDir,
@ -56,7 +104,13 @@ func New(runtimePath, sandboxDir, containerDir string) (*Server, error) {
sandboxes: sandboxes,
containers: containers,
},
}, nil
}
if err := s.restore(); err != nil {
logrus.Warnf("couldn't restore: %v", err)
}
logrus.Debugf("sandboxes: %v", s.state.sandboxes)
logrus.Debugf("containers: %v", s.state.containers)
return s, nil
}
type serverState struct {
@ -64,25 +118,6 @@ type serverState struct {
containers oci.Store
}
type sandbox struct {
name string
logDir string
labels map[string]string
containers oci.Store
}
func (s *sandbox) addContainer(c *oci.Container) {
s.containers.Add(c.Name(), c)
}
func (s *sandbox) getContainer(name string) *oci.Container {
return s.containers.Get(name)
}
func (s *sandbox) removeContainer(c *oci.Container) {
s.containers.Delete(c.Name())
}
func (s *Server) addSandbox(sb *sandbox) {
s.stateLock.Lock()
s.state.sandboxes[sb.name] = sb

View file

@ -10,9 +10,26 @@ import (
"strings"
"github.com/kubernetes-incubator/ocid/utils"
"github.com/opencontainers/ocitools/generate"
)
const (
// According to http://man7.org/linux/man-pages/man5/resolv.conf.5.html:
// "The search list is currently limited to six domains with a total of 256 characters."
maxDNSSearches = 6
)
func int64Ptr(i int64) *int64 {
return &i
}
func int32Ptr(i int32) *int32 {
return &i
}
func sPtr(s string) *string {
return &s
}
func getGPRCVersion() (string, error) {
_, file, _, ok := runtime.Caller(0)
if !ok {
@ -44,11 +61,9 @@ func copyFile(src, dest string) error {
}
defer out.Close()
if _, err := io.Copy(out, in); err != nil {
_, err = io.Copy(out, in)
return err
}
return nil
}
func removeFile(path string) error {
if _, err := os.Stat(path); err == nil {
@ -94,62 +109,3 @@ func parseDNSOptions(servers, searches []string, path string) error {
return nil
}
// kubernetes compute resources - CPU: http://kubernetes.io/docs/user-guide/compute-resources/#meaning-of-cpu
func setResourcesCPU(limits, requests, defaultCores float64, g generate.Generator) error {
if requests > limits {
return fmt.Errorf("CPU.Requests should not be greater than CPU.Limits")
}
cores := defaultCores
if limits != 0 || requests != 0 {
if limits > requests {
cores = limits
} else {
cores = requests
}
}
period := uint64(defaultCPUCFSPeriod)
quota := uint64(float64(period) * cores)
if quota < minCPUCFSQuota {
quota = minCPUCFSQuota
}
// adjust quota and period for the case where multiple CPUs are requested
// so that cpu.cfs_quota_us <= maxCPUCFSQuota.
for quota > maxCPUCFSQuota {
quota /= 10
period /= 10
}
g.SetLinuxResourcesCPUPeriod(period)
g.SetLinuxResourcesCPUQuota(quota)
return nil
}
// kubernetes compute resources - Memory: http://kubernetes.io/docs/user-guide/compute-resources/#meaning-of-memory
func setResourcesMemory(limits, requests, defaultMem float64, g generate.Generator) error {
if requests > limits {
return fmt.Errorf("Memory.Requests should not be greater than Memory.Limits")
}
if limits != 0 {
if requests == 0 {
requests = limits
}
} else {
if requests == 0 {
// set the default values of limits and requests
requests = defaultMem
limits = defaultMem
} else {
limits = requests
}
}
g.SetLinuxResourcesMemoryLimit(uint64(limits))
g.SetLinuxResourcesMemoryReservation(uint64(requests))
return nil
}

30
server/version.go Normal file
View file

@ -0,0 +1,30 @@
package server
import (
pb "github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"golang.org/x/net/context"
)
// Version returns the runtime name, runtime version and runtime API version
func (s *Server) Version(ctx context.Context, req *pb.VersionRequest) (*pb.VersionResponse, error) {
version, err := getGPRCVersion()
if err != nil {
return nil, err
}
runtimeVersion, err := s.runtime.Version()
if err != nil {
return nil, err
}
// taking const address
rav := runtimeAPIVersion
runtimeName := s.runtime.Name()
return &pb.VersionResponse{
Version: &version,
RuntimeName: &runtimeName,
RuntimeVersion: &runtimeVersion,
RuntimeApiVersion: &rav,
}, nil
}

View file

@ -1,5 +1,7 @@
{
"name": "container1",
"metadata": {
"name": "container1"
},
"image": {
"image": "docker://redis:latest"
},
@ -28,22 +30,6 @@
"value": "test/file1"
}
],
"mounts": [
{
"name": "mount1",
"container_path": "/dir1",
"host_path": "/tmp/dir1",
"readonly": false,
"selinux_relabel": true
},
{
"name": "mount2",
"container_path": "/dir2",
"host_path": "/tmp/dir2",
"readonly": true,
"selinux_relabel": true
}
],
"labels": {
"type": "small",
"batch": "no"

View file

@ -3,6 +3,7 @@ package utils
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"os/signal"
@ -13,8 +14,11 @@ import (
"github.com/Sirupsen/logrus"
)
const PR_SET_CHILD_SUBREAPER = 36
// PRSetChildSubreaper is the value of PR_SET_CHILD_SUBREAPER in prctl(2)
const PRSetChildSubreaper = 36
// ExecCmd executes a command with args and returns its output as a string along
// with an error, if any
func ExecCmd(name string, args ...string) (string, error) {
cmd := exec.Command(name, args...)
var stdout bytes.Buffer
@ -31,7 +35,7 @@ func ExecCmd(name string, args ...string) (string, error) {
}
// ExecCmdWithStdStreams execute a command with the specified standard streams.
func ExecCmdWithStdStreams(stdin, stdout, stderr *os.File, name string, args ...string) error {
func ExecCmdWithStdStreams(stdin io.Reader, stdout, stderr io.Writer, name string, args ...string) error {
cmd := exec.Command(name, args...)
cmd.Stdin = stdin
cmd.Stdout = stdout
@ -47,7 +51,7 @@ func ExecCmdWithStdStreams(stdin, stdout, stderr *os.File, name string, args ...
// SetSubreaper sets the value i as the subreaper setting for the calling process
func SetSubreaper(i int) error {
return Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0)
return Prctl(PRSetChildSubreaper, uintptr(i), 0, 0, 0)
}
// Prctl is a way to make the prctl linux syscall
@ -95,11 +99,9 @@ func dockerExport(image string, rootfs string) error {
}
func dockerRemove(container string) error {
if _, err := ExecCmd("docker", "rm", container); err != nil {
_, err := ExecCmd("docker", "rm", container)
return err
}
return nil
}
// StartReaper starts a goroutine to reap processes
func StartReaper() {