server: adhere to CRI for sandbox stop/remove

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2017-05-21 15:47:01 +02:00
parent 00444753e7
commit 9f68cb4507
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
9 changed files with 226 additions and 30 deletions

View file

@ -27,6 +27,9 @@ func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerReq
if err := s.runtime.StopContainer(c, -1); err != nil { if err := s.runtime.StopContainer(c, -1); err != nil {
return nil, fmt.Errorf("failed to stop container %s: %v", c.ID(), err) return nil, fmt.Errorf("failed to stop container %s: %v", c.ID(), err)
} }
if err := s.storageRuntimeServer.StopContainer(c.ID()); err != nil {
return nil, fmt.Errorf("failed to unmount container %s: %v", c.ID(), err)
}
} }
if err := s.runtime.DeleteContainer(c); err != nil { if err := s.runtime.DeleteContainer(c); err != nil {
@ -35,10 +38,6 @@ func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerReq
s.removeContainer(c) s.removeContainer(c)
if err := s.storageRuntimeServer.StopContainer(c.ID()); err != nil {
return nil, fmt.Errorf("failed to unmount container %s: %v", c.ID(), err)
}
if err := s.storageRuntimeServer.DeleteContainer(c.ID()); err != nil { if err := s.storageRuntimeServer.DeleteContainer(c.ID()); err != nil {
return nil, fmt.Errorf("failed to delete storage for container %s: %v", c.ID(), err) return nil, fmt.Errorf("failed to delete storage for container %s: %v", c.ID(), err)
} }

View file

@ -25,6 +25,9 @@ func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest
if err := s.runtime.StopContainer(c, req.Timeout); err != nil { if err := s.runtime.StopContainer(c, req.Timeout); err != nil {
return nil, fmt.Errorf("failed to stop container %s: %v", c.ID(), err) return nil, fmt.Errorf("failed to stop container %s: %v", c.ID(), err)
} }
if err := s.storageRuntimeServer.StopContainer(c.ID()); err != nil {
return nil, fmt.Errorf("failed to unmount container %s: %v", c.ID(), err)
}
} }
s.containerStateToDisk(c) s.containerStateToDisk(c)

View file

@ -10,7 +10,9 @@ import (
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containernetworking/cni/pkg/ns" "github.com/containernetworking/cni/pkg/ns"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/symlink"
"github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/oci"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
@ -238,9 +240,19 @@ func (s *sandbox) netNsRemove() error {
} }
if s.netns.restored { if s.netns.restored {
if err := unix.Unmount(s.netns.ns.Path(), unix.MNT_DETACH); err != nil { // we got namespaces in the form of
// /var/run/netns/cni-0d08effa-06eb-a963-f51a-e2b0eceffc5d
// but /var/run on most system is symlinked to /run so we first resolve
// the symlink and then try and see if it's mounted
fp, err := symlink.FollowSymlinkInScope(s.netns.ns.Path(), "/")
if err != nil {
return err return err
} }
if mounted, err := mount.Mounted(fp); err == nil && mounted {
if err := unix.Unmount(fp, unix.MNT_DETACH); err != nil {
return err
}
}
if err := os.RemoveAll(s.netns.ns.Path()); err != nil { if err := os.RemoveAll(s.netns.ns.Path()); err != nil {
return err return err

View file

@ -2,14 +2,11 @@ package server
import ( import (
"fmt" "fmt"
"syscall"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/storage" "github.com/containers/storage"
"github.com/docker/docker/pkg/mount"
"github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/oci"
pkgstorage "github.com/kubernetes-incubator/cri-o/pkg/storage" pkgstorage "github.com/kubernetes-incubator/cri-o/pkg/storage"
"github.com/opencontainers/selinux/go-selinux/label"
"golang.org/x/net/context" "golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
@ -74,27 +71,10 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
} }
} }
if err := label.ReleaseLabel(sb.processLabel); err != nil {
return nil, err
}
// unmount the shm for the pod
if sb.shmPath != "/dev/shm" {
if mounted, err := mount.Mounted(sb.shmPath); err == nil && mounted {
if err := syscall.Unmount(sb.shmPath, syscall.MNT_DETACH); err != nil {
return nil, err
}
}
}
if err := sb.netNsRemove(); err != nil {
return nil, fmt.Errorf("failed to remove networking namespace for sandbox %s: %v", sb.id, err)
}
s.removeContainer(podInfraContainer) s.removeContainer(podInfraContainer)
// Remove the files related to the sandbox // Remove the files related to the sandbox
if err := s.storageRuntimeServer.StopContainer(sb.id); err != nil { if err := s.storageRuntimeServer.StopContainer(sb.id); err != nil && err != storage.ErrContainerUnknown {
logrus.Warnf("failed to stop sandbox container in pod sandbox %s: %v", sb.id, err) logrus.Warnf("failed to stop sandbox container in pod sandbox %s: %v", sb.id, err)
} }
if err := s.storageRuntimeServer.RemovePodSandbox(sb.id); err != nil && err != pkgstorage.ErrInvalidSandboxID { if err := s.storageRuntimeServer.RemovePodSandbox(sb.id); err != nil && err != pkgstorage.ErrInvalidSandboxID {

View file

@ -5,8 +5,13 @@ import (
"os" "os"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/storage"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/symlink"
"github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/oci"
"github.com/opencontainers/selinux/go-selinux/label"
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/sys/unix"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
@ -61,10 +66,41 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque
if err := s.runtime.StopContainer(c, -1); err != nil { if err := s.runtime.StopContainer(c, -1); err != nil {
return nil, fmt.Errorf("failed to stop container %s in pod sandbox %s: %v", c.Name(), sb.id, err) return nil, fmt.Errorf("failed to stop container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
} }
if c.ID() == podInfraContainer.ID() {
continue
}
if err := s.storageRuntimeServer.StopContainer(c.ID()); err != nil && err != storage.ErrContainerUnknown {
// assume container already umounted
logrus.Warnf("failed to stop container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
}
} }
s.containerStateToDisk(c) s.containerStateToDisk(c)
} }
if err := label.ReleaseLabel(sb.processLabel); err != nil {
return nil, err
}
// unmount the shm for the pod
if sb.shmPath != "/dev/shm" {
// we got namespaces in the form of
// /var/run/containers/storage/overlay-containers/CID/userdata/shm
// but /var/run on most system is symlinked to /run so we first resolve
// the symlink and then try and see if it's mounted
fp, err := symlink.FollowSymlinkInScope(sb.shmPath, "/")
if err != nil {
return nil, err
}
if mounted, err := mount.Mounted(fp); err == nil && mounted {
if err := unix.Unmount(fp, unix.MNT_DETACH); err != nil {
return nil, err
}
}
}
if err := s.storageRuntimeServer.StopContainer(sb.id); err != nil && err != storage.ErrContainerUnknown {
logrus.Warnf("failed to stop sandbox container in pod sandbox %s: %v", sb.id, err)
}
resp := &pb.StopPodSandboxResponse{} resp := &pb.StopPodSandboxResponse{}
logrus.Debugf("StopPodSandboxResponse: %+v", resp) logrus.Debugf("StopPodSandboxResponse: %+v", resp)
return resp, nil return resp, nil

View file

@ -367,12 +367,21 @@ function teardown() {
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
[[ "$output" != "" ]] [[ "$output" != "" ]]
[[ "$output" =~ "$ctr3_id" ]] [[ "$output" =~ "$ctr3_id" ]]
run crioctl pod stop --id "$pod1_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod1_id" run crioctl pod remove --id "$pod1_id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
run crioctl pod stop --id "$pod2_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod2_id" run crioctl pod remove --id "$pod2_id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
run crioctl pod stop --id "$pod3_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod3_id" run crioctl pod remove --id "$pod3_id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
@ -421,6 +430,9 @@ function teardown() {
[[ "$output" =~ "$ctr1_id" ]] [[ "$output" =~ "$ctr1_id" ]]
[[ "$output" =~ "$ctr2_id" ]] [[ "$output" =~ "$ctr2_id" ]]
[[ "$output" =~ "$ctr3_id" ]] [[ "$output" =~ "$ctr3_id" ]]
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod_id" run crioctl pod remove --id "$pod_id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
@ -501,6 +513,9 @@ function teardown() {
run crioctl ctr execsync --id "$ctr_id" --timeout 1 sleep 10 run crioctl ctr execsync --id "$ctr_id" --timeout 1 sleep 10
echo "$output" echo "$output"
[[ "$output" =~ "command timed out" ]] [[ "$output" =~ "command timed out" ]]
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod_id" run crioctl pod remove --id "$pod_id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
@ -526,6 +541,9 @@ function teardown() {
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
[[ "$output" =~ "/dev/mynull" ]] [[ "$output" =~ "/dev/mynull" ]]
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod_id" run crioctl pod remove --id "$pod_id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
@ -606,7 +624,9 @@ function teardown() {
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
[[ "$output" == *"$(printf "Stderr:\nthis goes to stderr")"* ]] [[ "$output" == *"$(printf "Stderr:\nthis goes to stderr")"* ]]
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod_id" run crioctl pod remove --id "$pod_id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]

View file

@ -247,7 +247,7 @@ function cleanup_ctrs() {
if [ "$output" != "" ]; then if [ "$output" != "" ]; then
printf '%s\n' "$output" | while IFS= read -r line printf '%s\n' "$output" | while IFS= read -r line
do do
crioctl ctr stop --id "$line" || true crioctl ctr stop --id "$line"
crioctl ctr remove --id "$line" crioctl ctr remove --id "$line"
done done
fi fi
@ -272,7 +272,7 @@ function cleanup_pods() {
if [ "$output" != "" ]; then if [ "$output" != "" ]; then
printf '%s\n' "$output" | while IFS= read -r line printf '%s\n' "$output" | while IFS= read -r line
do do
crioctl pod stop --id "$line" || true crioctl pod stop --id "$line"
crioctl pod remove --id "$line" crioctl pod remove --id "$line"
done done
fi fi

View file

@ -16,7 +16,6 @@ function teardown() {
run crioctl pod stop --id "$id" run crioctl pod stop --id "$id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
echo "$output"
run crioctl pod remove --id "$id" run crioctl pod remove --id "$id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
@ -48,6 +47,9 @@ function teardown() {
run crioctl ctr start --id "$ctr_id" run crioctl ctr start --id "$ctr_id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod_id" run crioctl pod remove --id "$pod_id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
@ -155,12 +157,21 @@ function teardown() {
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
[[ "$output" == "" ]] [[ "$output" == "" ]]
run crioctl pod stop --id "$pod1_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod1_id" run crioctl pod remove --id "$pod1_id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
run crioctl pod stop --id "$pod2_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod2_id" run crioctl pod remove --id "$pod2_id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
run crioctl pod stop --id "$pod3_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod3_id" run crioctl pod remove --id "$pod3_id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
@ -256,6 +267,9 @@ function teardown() {
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
pod_id="$output" pod_id="$output"
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod_id" run crioctl pod remove --id "$pod_id"
echo "$output" echo "$output"
[ "$status" -eq 0 ] [ "$status" -eq 0 ]

View file

@ -78,6 +78,131 @@ function teardown() {
stop_crio stop_crio
} }
@test "crio restore with bad state and pod stopped" {
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
stop_crio
# simulate reboot with runc state going away
for i in $("$RUNTIME" list -q | xargs); do "$RUNTIME" delete -f $i; done
start_crio
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_pods
stop_crio
}
@test "crio restore with bad state and ctr stopped" {
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run crioctl ctr stop --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
stop_crio
# simulate reboot with runc state going away
for i in $("$RUNTIME" list -q | xargs); do "$RUNTIME" delete -f $i; done
start_crio
run crioctl ctr stop --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_crio
}
@test "crio restore with bad state and ctr removed" {
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
run crioctl ctr stop --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl ctr remove --id "$ctr_id"
echo "$output"
[ "$status" -eq 0 ]
stop_crio
# simulate reboot with runc state going away
for i in $("$RUNTIME" list -q | xargs); do "$RUNTIME" delete -f $i; done
start_crio
run crioctl ctr stop --id "$ctr_id"
echo "$output"
[ "$status" -eq 1 ]
[[ "${output}" =~ "not found" ]]
cleanup_ctrs
cleanup_pods
stop_crio
}
@test "crio restore with bad state and pod removed" {
start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
echo "$output"
[ "$status" -eq 0 ]
pod_id="$output"
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
stop_crio
# simulate reboot with runc state going away
for i in $("$RUNTIME" list -q | xargs); do "$RUNTIME" delete -f $i; done
start_crio
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_pods
stop_crio
}
@test "crio restore with bad state" { @test "crio restore with bad state" {
start_crio start_crio
run crioctl pod run --config "$TESTDATA"/sandbox_config.json run crioctl pod run --config "$TESTDATA"/sandbox_config.json
@ -129,6 +254,13 @@ function teardown() {
[[ "${output}" =~ "CONTAINER_EXITED" ]] [[ "${output}" =~ "CONTAINER_EXITED" ]]
[[ "${output}" =~ "Exit Code: 255" ]] [[ "${output}" =~ "Exit Code: 255" ]]
run crioctl pod stop --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
run crioctl pod remove --id "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
cleanup_ctrs cleanup_ctrs
cleanup_pods cleanup_pods
stop_crio stop_crio