Vendor: Update k8s version

Signed-off-by: Michał Żyłowski <michal.zylowski@intel.com>
This commit is contained in:
Michał Żyłowski 2017-02-03 14:41:32 +01:00
parent dfa93414c5
commit 52baf68d50
3756 changed files with 113013 additions and 92675 deletions

View file

@ -71,7 +71,10 @@ go_test(
"volume_manager_test.go",
],
library = ":go_default_library",
tags = ["automanaged"],
tags = [
"automanaged",
"integration",
],
deps = [
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
@ -114,6 +117,15 @@ go_test(
],
)
genrule(
name = "gen_e2e_node.test",
testonly = 1,
srcs = [":go_default_test"],
outs = ["e2e_node.test"],
cmd = "srcs=($(SRCS)); cp $$(dirname $${srcs[0]})/go_default_test $@;",
output_to_bindir = 1,
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),

View file

@ -1,4 +1,6 @@
assignees:
- vishh
- timstclair
- Random-Liu
approvers:
- Random-Liu
- timstclair
- vishh
reviewers:
- sig-node-reviewers

View file

@ -147,7 +147,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.
f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
} else {
// Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor".
w, err := f.PodClient().Watch(v1.SingleObject(v1.ObjectMeta{Name: pod.Name}))
w, err := f.PodClient().Watch(metav1.SingleObject(metav1.ObjectMeta{Name: pod.Name}))
framework.ExpectNoError(err)
_, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) {
switch e.Type {
@ -171,7 +171,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.
func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("test-apparmor-%s", strings.Replace(profile, "/", "-", -1)),
Annotations: map[string]string{
apparmor.ContainerAnnotationKeyPrefix + "test": profile,

View file

@ -17,6 +17,7 @@ limitations under the License.
package e2e_node
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/cm"
@ -68,7 +69,7 @@ func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *v1.Pod {
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
@ -106,7 +107,7 @@ func makePodToVerifyCgroupRemoved(cgroupName cm.CgroupName) *v1.Pod {
cgroupFsName = cm.ConvertCgroupNameToSystemd(cm.CgroupName(cgroupName), true)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
@ -166,7 +167,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
)
By("Creating a Guaranteed pod in Namespace", func() {
guaranteedPod = f.PodClient().Create(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name,
},
@ -191,7 +192,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
Expect(f.PodClient().Delete(guaranteedPod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
Expect(f.PodClient().Delete(guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("pod" + podUID))
f.PodClient().Create(pod)
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
@ -210,7 +211,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
)
By("Creating a BestEffort pod in Namespace", func() {
bestEffortPod = f.PodClient().Create(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name,
},
@ -235,7 +236,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
Expect(f.PodClient().Delete(bestEffortPod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
Expect(f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("BestEffort/pod" + podUID))
f.PodClient().Create(pod)
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
@ -254,7 +255,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
)
By("Creating a Burstable pod in Namespace", func() {
burstablePod = f.PodClient().Create(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name,
},
@ -279,7 +280,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
Expect(f.PodClient().Delete(burstablePod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
Expect(f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("Burstable/pod" + podUID))
f.PodClient().Create(pod)
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)

View file

@ -46,7 +46,7 @@ func (cc *ConformanceContainer) Create() {
imagePullSecrets = append(imagePullSecrets, v1.LocalObjectReference{Name: s})
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: cc.podName,
},
Spec: v1.PodSpec{
@ -63,7 +63,7 @@ func (cc *ConformanceContainer) Create() {
}
func (cc *ConformanceContainer) Delete() error {
return cc.PodClient.Delete(cc.podName, v1.NewDeleteOptions(0))
return cc.PodClient.Delete(cc.podName, metav1.NewDeleteOptions(0))
}
func (cc *ConformanceContainer) IsReady() (bool, error) {

View file

@ -26,6 +26,7 @@ import (
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
@ -96,7 +97,7 @@ var _ = framework.KubeDescribe("Kubelet Container Manager [Serial]", func() {
podClient := f.PodClient()
podName := "besteffort" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
@ -140,7 +141,7 @@ var _ = framework.KubeDescribe("Kubelet Container Manager [Serial]", func() {
podClient := f.PodClient()
podName := "guaranteed" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
@ -181,7 +182,7 @@ var _ = framework.KubeDescribe("Kubelet Container Manager [Serial]", func() {
podClient := f.PodClient()
podName := "burstable" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{

View file

@ -492,12 +492,12 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
obj, err := f.ClientSet.Core().Pods(ns).List(options)
return runtime.Object(obj), err
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
return f.ClientSet.Core().Pods(ns).Watch(options)
},

View file

@ -74,7 +74,7 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
verifyPodName = "verify" + string(uuid.NewUUID())
createIdlePod(idlePodName, podClient)
podClient.Create(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: busyPodName,
},
Spec: v1.PodSpec{
@ -97,9 +97,9 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
if !isImageSupported() || !evictionOptionIsSet() { // Skip the after each
return
}
podClient.DeleteSync(busyPodName, &v1.DeleteOptions{}, podDisappearTimeout)
podClient.DeleteSync(idlePodName, &v1.DeleteOptions{}, podDisappearTimeout)
podClient.DeleteSync(verifyPodName, &v1.DeleteOptions{}, podDisappearTimeout)
podClient.DeleteSync(busyPodName, &metav1.DeleteOptions{}, podDisappearTimeout)
podClient.DeleteSync(idlePodName, &metav1.DeleteOptions{}, podDisappearTimeout)
podClient.DeleteSync(verifyPodName, &metav1.DeleteOptions{}, podDisappearTimeout)
// Wait for 2 container gc loop to ensure that the containers are deleted. The containers
// created in this test consume a lot of disk, we don't want them to trigger disk eviction
@ -188,7 +188,7 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
func createIdlePod(podName string, podClient *framework.PodClient) {
podClient.Create(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{

View file

@ -31,6 +31,7 @@ import (
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
commontest "k8s.io/kubernetes/test/e2e/common"
@ -252,7 +253,7 @@ func updateTestContext() error {
// getNode gets node object from the apiserver.
func getNode(c *clientset.Clientset) (*v1.Node, error) {
nodes, err := c.Nodes().List(v1.ListOptions{})
nodes, err := c.Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), "should be able to list nodes.")
if nodes == nil {
return nil, fmt.Errorf("the node list is nil.")

View file

@ -231,7 +231,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
AfterEach(func() {
for _, pod := range test.testPods {
By(fmt.Sprintf("Deleting Pod %v", pod.podName))
f.PodClient().DeleteSync(pod.podName, &v1.DeleteOptions{}, defaultRuntimeRequestTimeoutDuration)
f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, defaultRuntimeRequestTimeoutDuration)
}
By("Making sure all containers get cleaned up")
@ -309,7 +309,7 @@ func getPods(specs []*testPodSpec) (pods []*v1.Pod) {
})
}
pods = append(pods, &v1.Pod{
ObjectMeta: v1.ObjectMeta{Name: spec.podName},
ObjectMeta: metav1.ObjectMeta{Name: spec.podName},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyAlways,
Containers: containers,

View file

@ -35,7 +35,7 @@ var _ = framework.KubeDescribe("ImageID", func() {
It("should be set to the manifest digest (from RepoDigests) when available", func() {
podDesc := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-with-repodigest",
},
Spec: v1.PodSpec{

View file

@ -20,6 +20,7 @@ import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
@ -45,7 +46,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
{
evictionPriority: 1, // This pod should be evicted before the normal memory usage pod
pod: v1.Pod{
ObjectMeta: v1.ObjectMeta{Name: "container-inode-hog-pod"},
ObjectMeta: metav1.ObjectMeta{Name: "container-inode-hog-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
@ -65,7 +66,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
{
evictionPriority: 1, // This pod should be evicted before the normal memory usage pod
pod: v1.Pod{
ObjectMeta: v1.ObjectMeta{Name: "volume-inode-hog-pod"},
ObjectMeta: metav1.ObjectMeta{Name: "volume-inode-hog-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
@ -91,7 +92,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
{
evictionPriority: 0, // This pod should never be evicted
pod: v1.Pod{
ObjectMeta: v1.ObjectMeta{Name: "normal-memory-usage-pod"},
ObjectMeta: metav1.ObjectMeta{Name: "normal-memory-usage-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
@ -160,7 +161,7 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs
Eventually(func() error {
// Gather current information
updatedPodList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{})
updatedPodList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{})
updatedPods := updatedPodList.Items
for _, p := range updatedPods {
framework.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase)
@ -250,7 +251,7 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs
By("making sure we can start a new pod after the test")
podName := "test-admit-pod"
f.PodClient().CreateSync(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
@ -269,7 +270,7 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs
By("deleting pods")
for _, spec := range podTestSpecs {
By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
f.PodClient().DeleteSync(spec.pod.Name, &v1.DeleteOptions{}, podDisappearTimeout)
f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, podDisappearTimeout)
}
if CurrentGinkgoTestDescription().Failed {
@ -287,7 +288,7 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs
// Returns TRUE if the node has disk pressure due to inodes exists on the node, FALSE otherwise
func hasInodePressure(f *framework.Framework, testCondition string) (bool, error) {
nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
if len(nodeList.Items) != 1 {
return false, fmt.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items)

View file

@ -33,7 +33,7 @@ TIMEOUT=${TIMEOUT:-"45m"}
mkdir -p ${ARTIFACTS}
go run test/e2e_node/runner/remote/run_remote.go conformance \
--logtostderr --vmodule=*=4 --ssh-env="gce" \
--logtostderr --vmodule=*=4 --ssh-env="gce" --ssh-user="$GCE_USER" \
--zone="$GCE_ZONE" --project="$GCE_PROJECT" --hosts="$GCE_HOSTS" \
--images="$GCE_IMAGES" --image-project="$GCE_IMAGE_PROJECT" \
--image-config-file="$GCE_IMAGE_CONFIG_PATH" --cleanup="$CLEANUP" \

View file

@ -4,4 +4,5 @@ GCE_ZONE=us-central1-f
GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--focus="\[Flaky\]"'
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'

View file

@ -40,7 +40,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
podName := "busybox-scheduling-" + string(uuid.NewUUID())
It("it should print the output to logs [Conformance]", func() {
podClient.CreateSync(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
@ -74,7 +74,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
BeforeEach(func() {
podName = "bin-false" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
})
It("should be possible to delete", func() {
err := podClient.Delete(podName, &v1.DeleteOptions{})
err := podClient.Delete(podName, &metav1.DeleteOptions{})
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
})
})
@ -121,7 +121,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
It("it should not write to root filesystem [Conformance]", func() {
isReadOnly := true
podClient.CreateSync(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{

View file

@ -20,6 +20,7 @@ import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid"
@ -59,7 +60,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
podClient.WaitForSuccess(podCheckHook.Name, postStartWaitTimeout)
}
By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, v1.NewDeleteOptions(15), podWaitTimeout)
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), podWaitTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
By("create the hook check pod")
podClient.Create(podCheckHook)
@ -109,7 +110,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
Context("when it is http hook", func() {
var targetIP string
podHandleHookRequest := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-handle-http-request",
},
Spec: v1.PodSpec{
@ -143,7 +144,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
}, postStartWaitTimeout, podCheckInterval).Should(BeNil())
}
By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, v1.NewDeleteOptions(15), podWaitTimeout)
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), podWaitTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
By("check prestop hook")
Eventually(func() error {
@ -154,7 +155,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
}
It("should execute poststart http hook properly [Conformance]", func() {
podWithHook := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-with-poststart-http-hook",
},
Spec: v1.PodSpec{
@ -179,7 +180,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
})
It("should execute prestop http hook properly [Conformance]", func() {
podWithHook := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-with-prestop-http-hook",
},
Spec: v1.PodSpec{
@ -208,7 +209,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
func getExecHookTestPod(name string, cmd []string) *v1.Pod {
return &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{

View file

@ -47,7 +47,7 @@ var _ = framework.KubeDescribe("ContainerLogPath", func() {
logDir := kubelet.ContainerLogsDir
logPod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: logPodName,
},
Spec: v1.PodSpec{
@ -75,7 +75,7 @@ var _ = framework.KubeDescribe("ContainerLogPath", func() {
expectedlogFile := logDir + "/" + logPodName + "_" + ns + "_" + logContName + "-" + logConID.ID + ".log"
checkPod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: checkPodName,
},
Spec: v1.PodSpec{

View file

@ -51,7 +51,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
// Wait for the memory pressure condition to disappear from the node status before continuing.
By("waiting for the memory pressure condition on the node to disappear before ending the test.")
Eventually(func() error {
nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("tried to get node list but got error: %v", err)
}
@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
// This is the final check to try to prevent interference with subsequent tests.
podName := "admit-best-effort-pod"
f.PodClient().CreateSync(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
@ -175,7 +175,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
// see the eviction manager reporting a pressure condition for a while without the besteffort failing,
// and we see that the manager did in fact evict the besteffort (this should be in the Kubelet log), we
// will have more reason to believe the phase is out of date.
nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
glog.Errorf("tried to get node list but got error: %v", err)
}
@ -245,7 +245,7 @@ func createMemhogPod(f *framework.Framework, genName string, ctnName string, res
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
GenerateName: genName,
},
Spec: v1.PodSpec{

View file

@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
uid := pod.UID
By("delete the mirror pod with grace period 30s")
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, v1.NewDeleteOptions(30))
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(30))
Expect(err).ShouldNot(HaveOccurred())
By("wait for the mirror pod to be recreated")
@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
uid := pod.UID
By("delete the mirror pod with grace period 0s")
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, v1.NewDeleteOptions(0))
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(0))
Expect(err).ShouldNot(HaveOccurred())
By("wait for the mirror pod to be recreated")

View file

@ -34,6 +34,7 @@ import (
cadvisorclient "github.com/google/cadvisor/client/v2"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"github.com/opencontainers/runc/libcontainer/cgroups"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
@ -294,7 +295,7 @@ func formatCPUSummary(summary framework.ContainersCPUSummary) string {
// createCadvisorPod creates a standalone cadvisor pod for fine-grain resource monitoring.
func getCadvisorPod() *v1.Pod {
return &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: cadvisorPodName,
},
Spec: v1.PodSpec{
@ -373,7 +374,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
go func(pod *v1.Pod) {
defer wg.Done()
err := f.PodClient().Delete(pod.ObjectMeta.Name, v1.NewDeleteOptions(30))
err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred())
Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
@ -395,7 +396,7 @@ func newTestPods(numPods int, imageName, podType string) []*v1.Pod {
}
pods = append(pods,
&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: labels,
},

View file

@ -21,6 +21,7 @@ package e2e_node
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
"fmt"
@ -35,7 +36,7 @@ import (
// If the timeout is hit, it returns the list of currently running pods.
func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (runningPods []*v1.Pod) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
podList, err := f.PodClient().List(v1.ListOptions{})
podList, err := f.PodClient().List(metav1.ListOptions{})
if err != nil {
framework.Logf("Failed to list pods on node: %v", err)
continue

View file

@ -28,6 +28,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
)
const (
@ -128,46 +129,112 @@ while true; do sleep 1; done
}
})
It("should report termination message if TerminationMessagePath is set [Conformance]", func() {
name := "termination-message-container"
terminationMessage := "DONE"
terminationMessagePath := "/dev/termination-log"
priv := true
c := ConformanceContainer{
PodClient: f.PodClient(),
Container: v1.Container{
rootUser := int64(0)
nonRootUser := int64(10000)
for _, testCase := range []struct {
name string
container v1.Container
phase v1.PodPhase
message gomegatypes.GomegaMatcher
}{
{
name: "if TerminationMessagePath is set [Conformance]",
container: v1.Container{
Image: "gcr.io/google_containers/busybox:1.24",
Name: name,
Command: []string{"/bin/sh", "-c"},
Args: []string{fmt.Sprintf("/bin/echo -n %s > %s", terminationMessage, terminationMessagePath)},
TerminationMessagePath: terminationMessagePath,
Args: []string{"/bin/echo -n DONE > /dev/termination-log"},
TerminationMessagePath: "/dev/termination-log",
SecurityContext: &v1.SecurityContext{
Privileged: &priv,
RunAsUser: &rootUser,
},
},
RestartPolicy: v1.RestartPolicyNever,
}
phase: v1.PodSucceeded,
message: Equal("DONE"),
},
By("create the container")
c.Create()
defer c.Delete()
{
name: "if TerminationMessagePath is set as non-root user and at a non-default path [Conformance]",
container: v1.Container{
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE > /dev/termination-custom-log"},
TerminationMessagePath: "/dev/termination-custom-log",
SecurityContext: &v1.SecurityContext{
RunAsUser: &nonRootUser,
},
},
phase: v1.PodSucceeded,
message: Equal("DONE"),
},
By("wait for the container to succeed")
Eventually(c.GetPhase, retryTimeout, pollInterval).Should(Equal(v1.PodSucceeded))
{
name: "from log output if TerminationMessagePolicy FallbackToLogOnError is set [Conformance]",
container: v1.Container{
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE; /bin/false"},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
},
phase: v1.PodFailed,
message: Equal("DONE\n"),
},
By("get the container status")
status, err := c.GetStatus()
Expect(err).NotTo(HaveOccurred())
{
name: "as empty when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set",
container: v1.Container{
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo DONE; /bin/true"},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
},
phase: v1.PodSucceeded,
message: Equal(""),
},
By("the container should be terminated")
Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated))
{
name: "from file when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [Conformance]",
container: v1.Container{
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n OK > /dev/termination-log; /bin/echo DONE; /bin/true"},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
},
phase: v1.PodSucceeded,
message: Equal("OK"),
},
} {
It(fmt.Sprintf("should report termination message %s", testCase.name), func() {
testCase.container.Name = "termination-message-container"
c := ConformanceContainer{
PodClient: f.PodClient(),
Container: testCase.container,
RestartPolicy: v1.RestartPolicyNever,
}
By("the termination message should be set")
Expect(status.State.Terminated.Message).Should(Equal(terminationMessage))
By("create the container")
c.Create()
defer c.Delete()
By("delete the container")
Expect(c.Delete()).To(Succeed())
})
By(fmt.Sprintf("wait for the container to reach %s", testCase.phase))
Eventually(c.GetPhase, retryTimeout, pollInterval).Should(Equal(testCase.phase))
By("get the container status")
status, err := c.GetStatus()
Expect(err).NotTo(HaveOccurred())
By("the container should be terminated")
Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated))
By("the termination message should be set")
Expect(status.State.Terminated.Message).Should(testCase.message)
By("delete the container")
Expect(c.Delete()).To(Succeed())
})
}
})
Context("when running a container with a new image", func() {

View file

@ -26,7 +26,6 @@ go_library(
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/typed/dynamic:go_default_library",
"//pkg/controller/namespace:go_default_library",
"//pkg/util/config:go_default_library",
@ -39,6 +38,7 @@ go_library(
"//vendor:github.com/coreos/pkg/capnslog",
"//vendor:github.com/golang/glog",
"//vendor:github.com/kardianos/osext",
"//vendor:k8s.io/client-go/rest",
],
)

View file

@ -19,10 +19,10 @@ package services
import (
"time"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
"k8s.io/kubernetes/test/e2e/framework"

View file

@ -35,7 +35,7 @@ var _ = framework.KubeDescribe("SimpleMount", func() {
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "simple-mount-pod",
},
Spec: v1.PodSpec{

View file

@ -221,7 +221,7 @@ func createSummaryTestPods(f *framework.Framework, names ...string) {
pods := make([]*v1.Pod, 0, len(names))
for _, name := range names {
pods = append(pods, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{

View file

@ -251,7 +251,7 @@ func makeKubeletConfigMap(nodeName string, kubeCfg *componentconfig.KubeletConfi
framework.ExpectNoError(err)
cmap := &v1.ConfigMap{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("kubelet-%s", nodeName),
},
Data: map[string]string{

View file

@ -19,6 +19,7 @@ package e2e_node
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
@ -41,7 +42,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
By("Creating a pod with a memory backed volume that exits success without restart", func() {
volumeName = "memory-volume"
memoryBackedPod = f.PodClient().Create(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name,
},
@ -80,7 +81,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
// need to create a new verification pod on each pass since updates
//to the HostPath volume aren't propogated to the pod
pod := f.PodClient().Create(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name,
},
@ -113,7 +114,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
})
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
gp := int64(1)
f.PodClient().Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})
f.PodClient().Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
if err == nil {
break
}