Merge pull request #814 from runcom/cache-ip-sandbox

cache sandbox's IP address
This commit is contained in:
Mrunal Patel 2017-09-06 09:51:03 -07:00 committed by GitHub
commit ac12018973
16 changed files with 112 additions and 74 deletions

View file

@ -76,6 +76,23 @@ RUN set -x \
&& cp bin/* /opt/cni/bin/ \
&& rm -rf "$GOPATH"
# Install custom CNI bridge test plugin
# XXX: this plugin is meant to be a replacement for the old "test_plugin_args.bash"
# we need this in testing because sandbox_run now gather IP address and the mock
# plugin wasn't able to properly setup the net ns.
# The bridge is based on the same commit as the one above.
#ENV CNI_COMMIT 6bfe036c38c8e1410f1acaa4b2ee16f1851472e4
ENV CNI_TEST_BRANCH custom-bridge
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/runcom/plugins.git "$GOPATH/src/github.com/containernetworking/plugins" \
&& cd "$GOPATH/src/github.com/containernetworking/plugins" \
&& git checkout -q "$CNI_TEST_BRANCH" \
&& ./build.sh \
&& mkdir -p /opt/cni/bin \
&& cp bin/bridge /opt/cni/bin/bridge-custom \
&& rm -rf "$GOPATH"
# Install crictl
ENV CRICTL_COMMIT 16e6fe4d7199c5689db4630a9330e6a8a12cecd1
RUN set -x \
@ -87,8 +104,6 @@ RUN set -x \
&& cp "$GOPATH"/bin/crictl /usr/bin/ \
&& rm -rf "$GOPATH"
COPY test/plugin_test_args.bash /opt/cni/bin/plugin_test_args.bash
# Make sure we have some policy for pulling images
RUN mkdir -p /etc/containers
COPY test/policy.json /etc/containers/policy.json

View file

@ -46,7 +46,7 @@ func logsCmd(c *cli.Context) error {
if len(args) != 1 {
return errors.Errorf("'kpod logs' requires exactly one container name/ID")
}
container := args[0]
container := c.Args().First()
var opts libkpod.LogOptions
opts.Details = c.Bool("details")
opts.Follow = c.Bool("follow")

View file

@ -151,8 +151,6 @@
repo: https://github.com/containernetworking/plugins
dest: /root/src/github.com/containernetworking/plugins
version: "{{ cni_commit }}"
async: 600
poll: 10
- name: Git fetch the PR
shell: "git fetch origin +refs/pull/{{ pullrequest }}/head:refs/remotes/origin/pr/{{ pullrequest }}"
args:
@ -225,10 +223,18 @@
regexp: 'execute time bats --tap \$TESTS'
state: present
when: xunit
- name: Copy plugin args so tests dont hang
shell: "cp test/plugin_test_args.bash /opt/cni/bin/"
- name: git clone cni test repo
git:
repo: https://github.com/runcom/plugins
dest: /root/src/github.com/containernetworking/plugins
version: "custom-bridge"
force: yes
- name: Build cni test networking
shell: ./build.sh
args:
chdir: /root/src/github.com/kubernetes-incubator/cri-o/
chdir: /root/src/github.com/containernetworking/plugins
- name: cp custom-bridge to opt bin
shell: cp /root/src/github.com/containernetworking/plugins/bin/bridge /opt/cni/bin/bridge-custom
# k8s builds with go1.8.x, rhel, fedora don't have it yet
- name: install Golang upstream in Fedora/RHEL
shell: |

View file

@ -294,6 +294,8 @@ func (c *ContainerServer) LoadSandbox(id string) error {
return err
}
ip := m.Annotations[annotations.IP]
processLabel, mountLabel, err := label.InitLabels(label.DupSecOpt(m.Process.SelinuxLabel))
if err != nil {
return err
@ -311,6 +313,7 @@ func (c *ContainerServer) LoadSandbox(id string) error {
if err != nil {
return err
}
sb.AddIP(ip)
// We add a netNS only if we can load a permanent one.
// Otherwise, the sandbox will live in the host namespace.

View file

@ -154,6 +154,8 @@ type Sandbox struct {
hostname string
portMappings []*hostport.PortMapping
stopped bool
// ipv4 or ipv6 cache
ip string
}
const (
@ -202,6 +204,16 @@ func New(id, namespace, name, kubeName, logDir string, labels, annotations map[s
return sb, nil
}
// AddIP stores the ip in the sandbox
func (s *Sandbox) AddIP(ip string) {
s.ip = ip
}
// IP returns the ip of the sandbox
func (s *Sandbox) IP() string {
return s.ip
}
// ID returns the id of the sandbox
func (s *Sandbox) ID() string {
return s.id

View file

@ -202,6 +202,10 @@ func (r *Runtime) CreateContainer(c *Container, cgroupParent string) error {
if err != nil {
logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
} else {
// XXX: this defer does nothing as the cgroup can't be deleted cause
// it contains the conmon pid in tasks
// we need to remove this defer and delete the cgroup once conmon exits
// maybe need a conmon monitor?
defer control.Delete()
if err := control.Add(cgroups.Process{Pid: cmd.Process.Pid}); err != nil {
logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)

View file

@ -19,6 +19,9 @@ const (
// HostName is the container host name annotation
HostName = "io.kubernetes.cri-o.HostName"
// IP is the container ipv4 or ipv6 address
IP = "io.kubernetes.cri-o.IP"
// Image is the container image ID annotation
Image = "io.kubernetes.cri-o.Image"

View file

@ -396,6 +396,11 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
specgen.AddAnnotation(k, v)
}
}
if labels != nil {
for k, v := range labels {
specgen.AddAnnotation(k, v)
}
}
// set this container's apparmor profile if it is set by sandbox
if s.appArmorEnabled {
@ -622,6 +627,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
specgen.AddAnnotation(annotations.ImageName, imageName)
specgen.AddAnnotation(annotations.ImageRef, imageRef)
specgen.AddAnnotation(annotations.IP, sb.IP())
// bind mount the pod shm
specgen.AddBindMount(sb.ShmPath(), "/dev/shm", []string{"rw"})

View file

@ -4,13 +4,13 @@ import (
"encoding/json"
"fmt"
"net/http"
"path/filepath"
"github.com/go-zoo/bone"
)
// ContainerInfo stores information about containers
type ContainerInfo struct {
Name string `json:"name"`
Pid int `json:"pid"`
Image string `json:"image"`
CreatedTime int64 `json:"created_time"`
@ -19,6 +19,7 @@ type ContainerInfo struct {
LogPath string `json:"log_path"`
Root string `json:"root"`
Sandbox string `json:"sandbox"`
IP string `json:"ip_address"`
}
// CrioInfo stores information about the crio daemon
@ -62,16 +63,22 @@ func (s *Server) GetInfoMux() *bone.Mux {
http.Error(w, fmt.Sprintf("container %s state is nil", containerID), http.StatusNotFound)
return
}
sb := s.getSandbox(ctr.Sandbox())
if sb == nil {
http.Error(w, fmt.Sprintf("can't find the sandbox for container id, sandbox id %s: %s", containerID, ctr.Sandbox()), http.StatusNotFound)
return
}
ci := ContainerInfo{
Name: ctr.Name(),
Pid: ctrState.Pid,
Image: ctr.Image(),
CreatedTime: ctrState.Created.UnixNano(),
Labels: ctr.Labels(),
Annotations: ctr.Annotations(),
Root: ctr.MountPoint(),
LogPath: filepath.Dir(ctr.LogPath()),
LogPath: ctr.LogPath(),
Sandbox: ctr.Sandbox(),
IP: sb.IP(),
}
js, err := json.Marshal(ci)
if err != nil {

View file

@ -369,6 +369,9 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
for k, v := range kubeAnnotations {
g.AddAnnotation(k, v)
}
for k, v := range labels {
g.AddAnnotation(k, v)
}
// extract linux sysctls from annotations and pass down to oci runtime
safe, unsafe, err := SysctlsFromPodAnnotations(kubeAnnotations)
@ -449,13 +452,6 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
}
g.AddAnnotation(annotations.MountPoint, mountPoint)
g.SetRootPath(mountPoint)
err = g.SaveToFile(filepath.Join(podContainer.Dir, "config.json"), saveOptions)
if err != nil {
return nil, fmt.Errorf("failed to save template configuration for pod sandbox %s(%s): %v", sb.Name(), id, err)
}
if err = g.SaveToFile(filepath.Join(podContainer.RunDir, "config.json"), saveOptions); err != nil {
return nil, fmt.Errorf("failed to write runtime configuration for pod sandbox %s(%s): %v", sb.Name(), id, err)
}
container, err := oci.NewContainer(id, containerName, podContainer.RunDir, logPath, sb.NetNs(), labels, kubeAnnotations, "", "", "", nil, id, false, false, false, sb.Privileged(), sb.Trusted(), podContainer.Dir, created, podContainer.Config.Config.StopSignal)
if err != nil {
@ -465,18 +461,18 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
sb.SetInfraContainer(container)
var ip string
// setup the network
if !hostNetwork {
if err = s.netPlugin.SetUpPod(netNsPath, namespace, kubeName, id); err != nil {
return nil, fmt.Errorf("failed to create network for container %s in sandbox %s: %v", containerName, id, err)
}
if len(portMappings) != 0 {
ip, err := s.netPlugin.GetContainerNetworkStatus(netNsPath, namespace, id, containerName)
if err != nil {
return nil, fmt.Errorf("failed to get network status for container %s in sandbox %s: %v", containerName, id, err)
}
if ip, err = s.netPlugin.GetContainerNetworkStatus(netNsPath, namespace, id, kubeName); err != nil {
return nil, fmt.Errorf("failed to get network status for container %s in sandbox %s: %v", containerName, id, err)
}
if len(portMappings) != 0 {
ip4 := net.ParseIP(ip).To4()
if ip4 == nil {
return nil, fmt.Errorf("failed to get valid ipv4 address for container %s in sandbox %s", containerName, id)
@ -492,6 +488,19 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
}
}
} else {
ip = s.BindAddress()
}
g.AddAnnotation(annotations.IP, ip)
sb.AddIP(ip)
err = g.SaveToFile(filepath.Join(podContainer.Dir, "config.json"), saveOptions)
if err != nil {
return nil, fmt.Errorf("failed to save template configuration for pod sandbox %s(%s): %v", sb.Name(), id, err)
}
if err = g.SaveToFile(filepath.Join(podContainer.RunDir, "config.json"), saveOptions); err != nil {
return nil, fmt.Errorf("failed to write runtime configuration for pod sandbox %s(%s): %v", sb.Name(), id, err)
}
if err = s.runContainer(container, sb.CgroupParent()); err != nil {

View file

@ -18,16 +18,6 @@ func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusR
podInfraContainer := sb.InfraContainer()
cState := s.Runtime().ContainerStatus(podInfraContainer)
netNsPath, err := podInfraContainer.NetNsPath()
if err != nil {
return nil, err
}
ip, err := s.netPlugin.GetContainerNetworkStatus(netNsPath, sb.Namespace(), sb.KubeName(), sb.ID())
if err != nil {
// ignore the error on network status
ip = ""
}
rStatus := pb.PodSandboxState_SANDBOX_NOTREADY
if cState.Status == oci.ContainerStateRunning {
rStatus = pb.PodSandboxState_SANDBOX_READY
@ -38,7 +28,7 @@ func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusR
Status: &pb.PodSandboxStatus{
Id: sandboxID,
CreatedAt: podInfraContainer.CreatedAt().UnixNano(),
Network: &pb.PodSandboxNetworkStatus{Ip: ip},
Network: &pb.PodSandboxNetworkStatus{Ip: sb.IP()},
State: rStatus,
Labels: sb.Labels(),
Annotations: sb.Annotations(),

View file

@ -66,6 +66,8 @@ type Server struct {
appArmorProfile string
stream streamService
bindAddress string
}
// GetExec returns exec stream request
@ -233,6 +235,7 @@ func New(config *Config) (*Server, error) {
return nil, err
}
}
s.bindAddress = bindAddress.String()
_, err = net.LookupPort("tcp", config.StreamPort)
if err != nil {
@ -289,6 +292,11 @@ func (s *Server) getInfraContainer(id string) *oci.Container {
return s.ContainerServer.GetInfraContainer(id)
}
// BindAddress is used to retrieve host's IP
func (s *Server) BindAddress() string {
return s.bindAddress
}
// GetSandboxContainer returns the infra container for a given sandbox
func (s *Server) GetSandboxContainer(id string) *oci.Container {
return s.ContainerServer.GetSandboxContainer(id)

View file

@ -407,8 +407,18 @@ function prepare_plugin_test_args_network_conf() {
cat >$CRIO_CNI_CONFIG/10-plugin-test-args.conf <<-EOF
{
"cniVersion": "0.2.0",
"name": "crionet",
"type": "plugin_test_args.bash"
"name": "crionet_test_args",
"type": "bridge-custom",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"subnet": "$1",
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
}
EOF

View file

@ -5,7 +5,7 @@ load helpers
IMAGE="alpine:latest"
ROOT="$TESTDIR/crio"
RUNROOT="$TESTDIR/crio-run"
KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT $STORAGE_OPTS"
KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT ${STORAGE_OPTS}"
function teardown() {
cleanup_test

View file

@ -85,6 +85,8 @@ load helpers
[ "$FOUND_K8S_POD_NAMESPACE" = "redhat.test.crio" ]
[ "$FOUND_K8S_POD_NAME" = "podsandbox1" ]
rm -rf /tmp/plugin_test_args.out
cleanup_pods
stop_crio
}

View file

@ -1,37 +0,0 @@
#!/bin/bash
if [[ -z "${CNI_ARGS}" ]]; then
exit 1
fi
IFS=';' read -ra array <<< "${CNI_ARGS}"
for arg in "${array[@]}"; do
IFS='=' read -ra item <<< "${arg}"
if [[ "${item[0]}" = "K8S_POD_NAMESPACE" ]]; then
K8S_POD_NAMESPACE="${item[1]}"
elif [[ "${item[0]}" = "K8S_POD_NAME" ]]; then
K8S_POD_NAME="${item[1]}"
fi
done
if [[ -z "${CNI_CONTAINERID}" ]]; then
exit 1
elif [[ -z "${K8S_POD_NAMESPACE}" ]]; then
exit 1
elif [[ -z "${K8S_POD_NAME}" ]]; then
exit 1
fi
echo "FOUND_CNI_CONTAINERID=${CNI_CONTAINERID}" >> /tmp/plugin_test_args.out
echo "FOUND_K8S_POD_NAMESPACE=${K8S_POD_NAMESPACE}" >> /tmp/plugin_test_args.out
echo "FOUND_K8S_POD_NAME=${K8S_POD_NAME}" >> /tmp/plugin_test_args.out
cat <<-EOF
{
"cniVersion": "0.2.0",
"ip4": {
"ip": "1.1.1.1/24"
}
}
EOF