Merge pull request #814 from runcom/cache-ip-sandbox
cache sandbox's IP address
This commit is contained in:
commit
ac12018973
16 changed files with 112 additions and 74 deletions
19
Dockerfile
19
Dockerfile
|
@ -76,6 +76,23 @@ RUN set -x \
|
||||||
&& cp bin/* /opt/cni/bin/ \
|
&& cp bin/* /opt/cni/bin/ \
|
||||||
&& rm -rf "$GOPATH"
|
&& rm -rf "$GOPATH"
|
||||||
|
|
||||||
|
# Install custom CNI bridge test plugin
|
||||||
|
# XXX: this plugin is meant to be a replacement for the old "test_plugin_args.bash"
|
||||||
|
# we need this in testing because sandbox_run now gather IP address and the mock
|
||||||
|
# plugin wasn't able to properly setup the net ns.
|
||||||
|
# The bridge is based on the same commit as the one above.
|
||||||
|
#ENV CNI_COMMIT 6bfe036c38c8e1410f1acaa4b2ee16f1851472e4
|
||||||
|
ENV CNI_TEST_BRANCH custom-bridge
|
||||||
|
RUN set -x \
|
||||||
|
&& export GOPATH="$(mktemp -d)" \
|
||||||
|
&& git clone https://github.com/runcom/plugins.git "$GOPATH/src/github.com/containernetworking/plugins" \
|
||||||
|
&& cd "$GOPATH/src/github.com/containernetworking/plugins" \
|
||||||
|
&& git checkout -q "$CNI_TEST_BRANCH" \
|
||||||
|
&& ./build.sh \
|
||||||
|
&& mkdir -p /opt/cni/bin \
|
||||||
|
&& cp bin/bridge /opt/cni/bin/bridge-custom \
|
||||||
|
&& rm -rf "$GOPATH"
|
||||||
|
|
||||||
# Install crictl
|
# Install crictl
|
||||||
ENV CRICTL_COMMIT 16e6fe4d7199c5689db4630a9330e6a8a12cecd1
|
ENV CRICTL_COMMIT 16e6fe4d7199c5689db4630a9330e6a8a12cecd1
|
||||||
RUN set -x \
|
RUN set -x \
|
||||||
|
@ -87,8 +104,6 @@ RUN set -x \
|
||||||
&& cp "$GOPATH"/bin/crictl /usr/bin/ \
|
&& cp "$GOPATH"/bin/crictl /usr/bin/ \
|
||||||
&& rm -rf "$GOPATH"
|
&& rm -rf "$GOPATH"
|
||||||
|
|
||||||
COPY test/plugin_test_args.bash /opt/cni/bin/plugin_test_args.bash
|
|
||||||
|
|
||||||
# Make sure we have some policy for pulling images
|
# Make sure we have some policy for pulling images
|
||||||
RUN mkdir -p /etc/containers
|
RUN mkdir -p /etc/containers
|
||||||
COPY test/policy.json /etc/containers/policy.json
|
COPY test/policy.json /etc/containers/policy.json
|
||||||
|
|
|
@ -46,7 +46,7 @@ func logsCmd(c *cli.Context) error {
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
return errors.Errorf("'kpod logs' requires exactly one container name/ID")
|
return errors.Errorf("'kpod logs' requires exactly one container name/ID")
|
||||||
}
|
}
|
||||||
container := args[0]
|
container := c.Args().First()
|
||||||
var opts libkpod.LogOptions
|
var opts libkpod.LogOptions
|
||||||
opts.Details = c.Bool("details")
|
opts.Details = c.Bool("details")
|
||||||
opts.Follow = c.Bool("follow")
|
opts.Follow = c.Bool("follow")
|
||||||
|
|
|
@ -151,8 +151,6 @@
|
||||||
repo: https://github.com/containernetworking/plugins
|
repo: https://github.com/containernetworking/plugins
|
||||||
dest: /root/src/github.com/containernetworking/plugins
|
dest: /root/src/github.com/containernetworking/plugins
|
||||||
version: "{{ cni_commit }}"
|
version: "{{ cni_commit }}"
|
||||||
async: 600
|
|
||||||
poll: 10
|
|
||||||
- name: Git fetch the PR
|
- name: Git fetch the PR
|
||||||
shell: "git fetch origin +refs/pull/{{ pullrequest }}/head:refs/remotes/origin/pr/{{ pullrequest }}"
|
shell: "git fetch origin +refs/pull/{{ pullrequest }}/head:refs/remotes/origin/pr/{{ pullrequest }}"
|
||||||
args:
|
args:
|
||||||
|
@ -225,10 +223,18 @@
|
||||||
regexp: 'execute time bats --tap \$TESTS'
|
regexp: 'execute time bats --tap \$TESTS'
|
||||||
state: present
|
state: present
|
||||||
when: xunit
|
when: xunit
|
||||||
- name: Copy plugin args so tests dont hang
|
- name: git clone cni test repo
|
||||||
shell: "cp test/plugin_test_args.bash /opt/cni/bin/"
|
git:
|
||||||
|
repo: https://github.com/runcom/plugins
|
||||||
|
dest: /root/src/github.com/containernetworking/plugins
|
||||||
|
version: "custom-bridge"
|
||||||
|
force: yes
|
||||||
|
- name: Build cni test networking
|
||||||
|
shell: ./build.sh
|
||||||
args:
|
args:
|
||||||
chdir: /root/src/github.com/kubernetes-incubator/cri-o/
|
chdir: /root/src/github.com/containernetworking/plugins
|
||||||
|
- name: cp custom-bridge to opt bin
|
||||||
|
shell: cp /root/src/github.com/containernetworking/plugins/bin/bridge /opt/cni/bin/bridge-custom
|
||||||
# k8s builds with go1.8.x, rhel, fedora don't have it yet
|
# k8s builds with go1.8.x, rhel, fedora don't have it yet
|
||||||
- name: install Golang upstream in Fedora/RHEL
|
- name: install Golang upstream in Fedora/RHEL
|
||||||
shell: |
|
shell: |
|
||||||
|
|
|
@ -294,6 +294,8 @@ func (c *ContainerServer) LoadSandbox(id string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ip := m.Annotations[annotations.IP]
|
||||||
|
|
||||||
processLabel, mountLabel, err := label.InitLabels(label.DupSecOpt(m.Process.SelinuxLabel))
|
processLabel, mountLabel, err := label.InitLabels(label.DupSecOpt(m.Process.SelinuxLabel))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -311,6 +313,7 @@ func (c *ContainerServer) LoadSandbox(id string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
sb.AddIP(ip)
|
||||||
|
|
||||||
// We add a netNS only if we can load a permanent one.
|
// We add a netNS only if we can load a permanent one.
|
||||||
// Otherwise, the sandbox will live in the host namespace.
|
// Otherwise, the sandbox will live in the host namespace.
|
||||||
|
|
|
@ -154,6 +154,8 @@ type Sandbox struct {
|
||||||
hostname string
|
hostname string
|
||||||
portMappings []*hostport.PortMapping
|
portMappings []*hostport.PortMapping
|
||||||
stopped bool
|
stopped bool
|
||||||
|
// ipv4 or ipv6 cache
|
||||||
|
ip string
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -202,6 +204,16 @@ func New(id, namespace, name, kubeName, logDir string, labels, annotations map[s
|
||||||
return sb, nil
|
return sb, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddIP stores the ip in the sandbox
|
||||||
|
func (s *Sandbox) AddIP(ip string) {
|
||||||
|
s.ip = ip
|
||||||
|
}
|
||||||
|
|
||||||
|
// IP returns the ip of the sandbox
|
||||||
|
func (s *Sandbox) IP() string {
|
||||||
|
return s.ip
|
||||||
|
}
|
||||||
|
|
||||||
// ID returns the id of the sandbox
|
// ID returns the id of the sandbox
|
||||||
func (s *Sandbox) ID() string {
|
func (s *Sandbox) ID() string {
|
||||||
return s.id
|
return s.id
|
||||||
|
|
|
@ -202,6 +202,10 @@ func (r *Runtime) CreateContainer(c *Container, cgroupParent string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
|
logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
|
||||||
} else {
|
} else {
|
||||||
|
// XXX: this defer does nothing as the cgroup can't be deleted cause
|
||||||
|
// it contains the conmon pid in tasks
|
||||||
|
// we need to remove this defer and delete the cgroup once conmon exits
|
||||||
|
// maybe need a conmon monitor?
|
||||||
defer control.Delete()
|
defer control.Delete()
|
||||||
if err := control.Add(cgroups.Process{Pid: cmd.Process.Pid}); err != nil {
|
if err := control.Add(cgroups.Process{Pid: cmd.Process.Pid}); err != nil {
|
||||||
logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
|
logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
|
||||||
|
|
|
@ -19,6 +19,9 @@ const (
|
||||||
// HostName is the container host name annotation
|
// HostName is the container host name annotation
|
||||||
HostName = "io.kubernetes.cri-o.HostName"
|
HostName = "io.kubernetes.cri-o.HostName"
|
||||||
|
|
||||||
|
// IP is the container ipv4 or ipv6 address
|
||||||
|
IP = "io.kubernetes.cri-o.IP"
|
||||||
|
|
||||||
// Image is the container image ID annotation
|
// Image is the container image ID annotation
|
||||||
Image = "io.kubernetes.cri-o.Image"
|
Image = "io.kubernetes.cri-o.Image"
|
||||||
|
|
||||||
|
|
|
@ -396,6 +396,11 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
|
||||||
specgen.AddAnnotation(k, v)
|
specgen.AddAnnotation(k, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if labels != nil {
|
||||||
|
for k, v := range labels {
|
||||||
|
specgen.AddAnnotation(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// set this container's apparmor profile if it is set by sandbox
|
// set this container's apparmor profile if it is set by sandbox
|
||||||
if s.appArmorEnabled {
|
if s.appArmorEnabled {
|
||||||
|
@ -622,6 +627,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string,
|
||||||
|
|
||||||
specgen.AddAnnotation(annotations.ImageName, imageName)
|
specgen.AddAnnotation(annotations.ImageName, imageName)
|
||||||
specgen.AddAnnotation(annotations.ImageRef, imageRef)
|
specgen.AddAnnotation(annotations.ImageRef, imageRef)
|
||||||
|
specgen.AddAnnotation(annotations.IP, sb.IP())
|
||||||
|
|
||||||
// bind mount the pod shm
|
// bind mount the pod shm
|
||||||
specgen.AddBindMount(sb.ShmPath(), "/dev/shm", []string{"rw"})
|
specgen.AddBindMount(sb.ShmPath(), "/dev/shm", []string{"rw"})
|
||||||
|
|
|
@ -4,13 +4,13 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/go-zoo/bone"
|
"github.com/go-zoo/bone"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ContainerInfo stores information about containers
|
// ContainerInfo stores information about containers
|
||||||
type ContainerInfo struct {
|
type ContainerInfo struct {
|
||||||
|
Name string `json:"name"`
|
||||||
Pid int `json:"pid"`
|
Pid int `json:"pid"`
|
||||||
Image string `json:"image"`
|
Image string `json:"image"`
|
||||||
CreatedTime int64 `json:"created_time"`
|
CreatedTime int64 `json:"created_time"`
|
||||||
|
@ -19,6 +19,7 @@ type ContainerInfo struct {
|
||||||
LogPath string `json:"log_path"`
|
LogPath string `json:"log_path"`
|
||||||
Root string `json:"root"`
|
Root string `json:"root"`
|
||||||
Sandbox string `json:"sandbox"`
|
Sandbox string `json:"sandbox"`
|
||||||
|
IP string `json:"ip_address"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CrioInfo stores information about the crio daemon
|
// CrioInfo stores information about the crio daemon
|
||||||
|
@ -62,16 +63,22 @@ func (s *Server) GetInfoMux() *bone.Mux {
|
||||||
http.Error(w, fmt.Sprintf("container %s state is nil", containerID), http.StatusNotFound)
|
http.Error(w, fmt.Sprintf("container %s state is nil", containerID), http.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
sb := s.getSandbox(ctr.Sandbox())
|
||||||
|
if sb == nil {
|
||||||
|
http.Error(w, fmt.Sprintf("can't find the sandbox for container id, sandbox id %s: %s", containerID, ctr.Sandbox()), http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
ci := ContainerInfo{
|
ci := ContainerInfo{
|
||||||
|
Name: ctr.Name(),
|
||||||
Pid: ctrState.Pid,
|
Pid: ctrState.Pid,
|
||||||
Image: ctr.Image(),
|
Image: ctr.Image(),
|
||||||
CreatedTime: ctrState.Created.UnixNano(),
|
CreatedTime: ctrState.Created.UnixNano(),
|
||||||
Labels: ctr.Labels(),
|
Labels: ctr.Labels(),
|
||||||
Annotations: ctr.Annotations(),
|
Annotations: ctr.Annotations(),
|
||||||
Root: ctr.MountPoint(),
|
Root: ctr.MountPoint(),
|
||||||
LogPath: filepath.Dir(ctr.LogPath()),
|
LogPath: ctr.LogPath(),
|
||||||
Sandbox: ctr.Sandbox(),
|
Sandbox: ctr.Sandbox(),
|
||||||
|
IP: sb.IP(),
|
||||||
}
|
}
|
||||||
js, err := json.Marshal(ci)
|
js, err := json.Marshal(ci)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -369,6 +369,9 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
||||||
for k, v := range kubeAnnotations {
|
for k, v := range kubeAnnotations {
|
||||||
g.AddAnnotation(k, v)
|
g.AddAnnotation(k, v)
|
||||||
}
|
}
|
||||||
|
for k, v := range labels {
|
||||||
|
g.AddAnnotation(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
// extract linux sysctls from annotations and pass down to oci runtime
|
// extract linux sysctls from annotations and pass down to oci runtime
|
||||||
safe, unsafe, err := SysctlsFromPodAnnotations(kubeAnnotations)
|
safe, unsafe, err := SysctlsFromPodAnnotations(kubeAnnotations)
|
||||||
|
@ -449,13 +452,6 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
||||||
}
|
}
|
||||||
g.AddAnnotation(annotations.MountPoint, mountPoint)
|
g.AddAnnotation(annotations.MountPoint, mountPoint)
|
||||||
g.SetRootPath(mountPoint)
|
g.SetRootPath(mountPoint)
|
||||||
err = g.SaveToFile(filepath.Join(podContainer.Dir, "config.json"), saveOptions)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to save template configuration for pod sandbox %s(%s): %v", sb.Name(), id, err)
|
|
||||||
}
|
|
||||||
if err = g.SaveToFile(filepath.Join(podContainer.RunDir, "config.json"), saveOptions); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to write runtime configuration for pod sandbox %s(%s): %v", sb.Name(), id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
container, err := oci.NewContainer(id, containerName, podContainer.RunDir, logPath, sb.NetNs(), labels, kubeAnnotations, "", "", "", nil, id, false, false, false, sb.Privileged(), sb.Trusted(), podContainer.Dir, created, podContainer.Config.Config.StopSignal)
|
container, err := oci.NewContainer(id, containerName, podContainer.RunDir, logPath, sb.NetNs(), labels, kubeAnnotations, "", "", "", nil, id, false, false, false, sb.Privileged(), sb.Trusted(), podContainer.Dir, created, podContainer.Config.Config.StopSignal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -465,18 +461,18 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
||||||
|
|
||||||
sb.SetInfraContainer(container)
|
sb.SetInfraContainer(container)
|
||||||
|
|
||||||
|
var ip string
|
||||||
// setup the network
|
// setup the network
|
||||||
if !hostNetwork {
|
if !hostNetwork {
|
||||||
if err = s.netPlugin.SetUpPod(netNsPath, namespace, kubeName, id); err != nil {
|
if err = s.netPlugin.SetUpPod(netNsPath, namespace, kubeName, id); err != nil {
|
||||||
return nil, fmt.Errorf("failed to create network for container %s in sandbox %s: %v", containerName, id, err)
|
return nil, fmt.Errorf("failed to create network for container %s in sandbox %s: %v", containerName, id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(portMappings) != 0 {
|
if ip, err = s.netPlugin.GetContainerNetworkStatus(netNsPath, namespace, id, kubeName); err != nil {
|
||||||
ip, err := s.netPlugin.GetContainerNetworkStatus(netNsPath, namespace, id, containerName)
|
return nil, fmt.Errorf("failed to get network status for container %s in sandbox %s: %v", containerName, id, err)
|
||||||
if err != nil {
|
}
|
||||||
return nil, fmt.Errorf("failed to get network status for container %s in sandbox %s: %v", containerName, id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
if len(portMappings) != 0 {
|
||||||
ip4 := net.ParseIP(ip).To4()
|
ip4 := net.ParseIP(ip).To4()
|
||||||
if ip4 == nil {
|
if ip4 == nil {
|
||||||
return nil, fmt.Errorf("failed to get valid ipv4 address for container %s in sandbox %s", containerName, id)
|
return nil, fmt.Errorf("failed to get valid ipv4 address for container %s in sandbox %s", containerName, id)
|
||||||
|
@ -492,6 +488,19 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
ip = s.BindAddress()
|
||||||
|
}
|
||||||
|
|
||||||
|
g.AddAnnotation(annotations.IP, ip)
|
||||||
|
sb.AddIP(ip)
|
||||||
|
|
||||||
|
err = g.SaveToFile(filepath.Join(podContainer.Dir, "config.json"), saveOptions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to save template configuration for pod sandbox %s(%s): %v", sb.Name(), id, err)
|
||||||
|
}
|
||||||
|
if err = g.SaveToFile(filepath.Join(podContainer.RunDir, "config.json"), saveOptions); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to write runtime configuration for pod sandbox %s(%s): %v", sb.Name(), id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = s.runContainer(container, sb.CgroupParent()); err != nil {
|
if err = s.runContainer(container, sb.CgroupParent()); err != nil {
|
||||||
|
|
|
@ -18,16 +18,6 @@ func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusR
|
||||||
podInfraContainer := sb.InfraContainer()
|
podInfraContainer := sb.InfraContainer()
|
||||||
cState := s.Runtime().ContainerStatus(podInfraContainer)
|
cState := s.Runtime().ContainerStatus(podInfraContainer)
|
||||||
|
|
||||||
netNsPath, err := podInfraContainer.NetNsPath()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ip, err := s.netPlugin.GetContainerNetworkStatus(netNsPath, sb.Namespace(), sb.KubeName(), sb.ID())
|
|
||||||
if err != nil {
|
|
||||||
// ignore the error on network status
|
|
||||||
ip = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
rStatus := pb.PodSandboxState_SANDBOX_NOTREADY
|
rStatus := pb.PodSandboxState_SANDBOX_NOTREADY
|
||||||
if cState.Status == oci.ContainerStateRunning {
|
if cState.Status == oci.ContainerStateRunning {
|
||||||
rStatus = pb.PodSandboxState_SANDBOX_READY
|
rStatus = pb.PodSandboxState_SANDBOX_READY
|
||||||
|
@ -38,7 +28,7 @@ func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusR
|
||||||
Status: &pb.PodSandboxStatus{
|
Status: &pb.PodSandboxStatus{
|
||||||
Id: sandboxID,
|
Id: sandboxID,
|
||||||
CreatedAt: podInfraContainer.CreatedAt().UnixNano(),
|
CreatedAt: podInfraContainer.CreatedAt().UnixNano(),
|
||||||
Network: &pb.PodSandboxNetworkStatus{Ip: ip},
|
Network: &pb.PodSandboxNetworkStatus{Ip: sb.IP()},
|
||||||
State: rStatus,
|
State: rStatus,
|
||||||
Labels: sb.Labels(),
|
Labels: sb.Labels(),
|
||||||
Annotations: sb.Annotations(),
|
Annotations: sb.Annotations(),
|
||||||
|
|
|
@ -66,6 +66,8 @@ type Server struct {
|
||||||
appArmorProfile string
|
appArmorProfile string
|
||||||
|
|
||||||
stream streamService
|
stream streamService
|
||||||
|
|
||||||
|
bindAddress string
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetExec returns exec stream request
|
// GetExec returns exec stream request
|
||||||
|
@ -233,6 +235,7 @@ func New(config *Config) (*Server, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
s.bindAddress = bindAddress.String()
|
||||||
|
|
||||||
_, err = net.LookupPort("tcp", config.StreamPort)
|
_, err = net.LookupPort("tcp", config.StreamPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -289,6 +292,11 @@ func (s *Server) getInfraContainer(id string) *oci.Container {
|
||||||
return s.ContainerServer.GetInfraContainer(id)
|
return s.ContainerServer.GetInfraContainer(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BindAddress is used to retrieve host's IP
|
||||||
|
func (s *Server) BindAddress() string {
|
||||||
|
return s.bindAddress
|
||||||
|
}
|
||||||
|
|
||||||
// GetSandboxContainer returns the infra container for a given sandbox
|
// GetSandboxContainer returns the infra container for a given sandbox
|
||||||
func (s *Server) GetSandboxContainer(id string) *oci.Container {
|
func (s *Server) GetSandboxContainer(id string) *oci.Container {
|
||||||
return s.ContainerServer.GetSandboxContainer(id)
|
return s.ContainerServer.GetSandboxContainer(id)
|
||||||
|
|
|
@ -407,8 +407,18 @@ function prepare_plugin_test_args_network_conf() {
|
||||||
cat >$CRIO_CNI_CONFIG/10-plugin-test-args.conf <<-EOF
|
cat >$CRIO_CNI_CONFIG/10-plugin-test-args.conf <<-EOF
|
||||||
{
|
{
|
||||||
"cniVersion": "0.2.0",
|
"cniVersion": "0.2.0",
|
||||||
"name": "crionet",
|
"name": "crionet_test_args",
|
||||||
"type": "plugin_test_args.bash"
|
"type": "bridge-custom",
|
||||||
|
"bridge": "cni0",
|
||||||
|
"isGateway": true,
|
||||||
|
"ipMasq": true,
|
||||||
|
"ipam": {
|
||||||
|
"type": "host-local",
|
||||||
|
"subnet": "$1",
|
||||||
|
"routes": [
|
||||||
|
{ "dst": "0.0.0.0/0" }
|
||||||
|
]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ load helpers
|
||||||
IMAGE="alpine:latest"
|
IMAGE="alpine:latest"
|
||||||
ROOT="$TESTDIR/crio"
|
ROOT="$TESTDIR/crio"
|
||||||
RUNROOT="$TESTDIR/crio-run"
|
RUNROOT="$TESTDIR/crio-run"
|
||||||
KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT $STORAGE_OPTS"
|
KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT ${STORAGE_OPTS}"
|
||||||
|
|
||||||
function teardown() {
|
function teardown() {
|
||||||
cleanup_test
|
cleanup_test
|
||||||
|
|
|
@ -85,6 +85,8 @@ load helpers
|
||||||
[ "$FOUND_K8S_POD_NAMESPACE" = "redhat.test.crio" ]
|
[ "$FOUND_K8S_POD_NAMESPACE" = "redhat.test.crio" ]
|
||||||
[ "$FOUND_K8S_POD_NAME" = "podsandbox1" ]
|
[ "$FOUND_K8S_POD_NAME" = "podsandbox1" ]
|
||||||
|
|
||||||
|
rm -rf /tmp/plugin_test_args.out
|
||||||
|
|
||||||
cleanup_pods
|
cleanup_pods
|
||||||
stop_crio
|
stop_crio
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
if [[ -z "${CNI_ARGS}" ]]; then
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
IFS=';' read -ra array <<< "${CNI_ARGS}"
|
|
||||||
for arg in "${array[@]}"; do
|
|
||||||
IFS='=' read -ra item <<< "${arg}"
|
|
||||||
if [[ "${item[0]}" = "K8S_POD_NAMESPACE" ]]; then
|
|
||||||
K8S_POD_NAMESPACE="${item[1]}"
|
|
||||||
elif [[ "${item[0]}" = "K8S_POD_NAME" ]]; then
|
|
||||||
K8S_POD_NAME="${item[1]}"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [[ -z "${CNI_CONTAINERID}" ]]; then
|
|
||||||
exit 1
|
|
||||||
elif [[ -z "${K8S_POD_NAMESPACE}" ]]; then
|
|
||||||
exit 1
|
|
||||||
elif [[ -z "${K8S_POD_NAME}" ]]; then
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "FOUND_CNI_CONTAINERID=${CNI_CONTAINERID}" >> /tmp/plugin_test_args.out
|
|
||||||
echo "FOUND_K8S_POD_NAMESPACE=${K8S_POD_NAMESPACE}" >> /tmp/plugin_test_args.out
|
|
||||||
echo "FOUND_K8S_POD_NAME=${K8S_POD_NAME}" >> /tmp/plugin_test_args.out
|
|
||||||
|
|
||||||
cat <<-EOF
|
|
||||||
{
|
|
||||||
"cniVersion": "0.2.0",
|
|
||||||
"ip4": {
|
|
||||||
"ip": "1.1.1.1/24"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
Loading…
Reference in a new issue