Vendor: Update k8s version
Signed-off-by: Michał Żyłowski <michal.zylowski@intel.com>
This commit is contained in:
parent
dfa93414c5
commit
52baf68d50
3756 changed files with 113013 additions and 92675 deletions
2
vendor/k8s.io/kubernetes/test/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/BUILD
generated
vendored
|
@ -14,10 +14,12 @@ filegroup(
|
|||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e:all-srcs",
|
||||
"//test/e2e_federation:all-srcs",
|
||||
"//test/e2e_node:all-srcs",
|
||||
"//test/fixtures:all-srcs",
|
||||
"//test/images:all-srcs",
|
||||
"//test/integration:all-srcs",
|
||||
"//test/kubemark:all-srcs",
|
||||
"//test/list:all-srcs",
|
||||
"//test/soak/cauldron:all-srcs",
|
||||
"//test/soak/serve_hostnames:all-srcs",
|
||||
|
|
18
vendor/k8s.io/kubernetes/test/OWNERS
generated
vendored
18
vendor/k8s.io/kubernetes/test/OWNERS
generated
vendored
|
@ -1,4 +1,20 @@
|
|||
assignees:
|
||||
reviewers:
|
||||
- deads2k
|
||||
- fejta
|
||||
- ixdy
|
||||
- krousey
|
||||
- liggitt
|
||||
- madhusudancs
|
||||
- smarterclayton
|
||||
- spxtr
|
||||
- sttts
|
||||
approvers:
|
||||
- deads2k
|
||||
- fejta
|
||||
- ixdy
|
||||
- krousey
|
||||
- liggitt
|
||||
- madhusudancs
|
||||
- smarterclayton
|
||||
- spxtr
|
||||
- sttts
|
||||
|
|
37
vendor/k8s.io/kubernetes/test/e2e/BUILD
generated
vendored
37
vendor/k8s.io/kubernetes/test/e2e/BUILD
generated
vendored
|
@ -38,17 +38,6 @@ go_library(
|
|||
"example_cluster_dns.go",
|
||||
"example_k8petstore.go",
|
||||
"examples.go",
|
||||
"federated-ingress.go",
|
||||
"federated-namespace.go",
|
||||
"federated-secret.go",
|
||||
"federated-service.go",
|
||||
"federation-apiserver.go",
|
||||
"federation-authn.go",
|
||||
"federation-daemonset.go",
|
||||
"federation-deployment.go",
|
||||
"federation-event.go",
|
||||
"federation-replicaset.go",
|
||||
"federation-util.go",
|
||||
"firewall.go",
|
||||
"garbage_collector.go",
|
||||
"generated_clientset.go",
|
||||
|
@ -80,7 +69,6 @@ go_library(
|
|||
"pd.go",
|
||||
"persistent_volumes.go",
|
||||
"persistent_volumes-disruptive.go",
|
||||
"petset.go",
|
||||
"pod_gc.go",
|
||||
"pods.go",
|
||||
"portforward.go",
|
||||
|
@ -100,6 +88,7 @@ go_library(
|
|||
"service_latency.go",
|
||||
"serviceloadbalancers.go",
|
||||
"ssh.go",
|
||||
"statefulset.go",
|
||||
"third-party.go",
|
||||
"ubernetes_lite.go",
|
||||
"util_iperf.go",
|
||||
|
@ -109,9 +98,6 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//federation/apis/federation/v1beta1:go_default_library",
|
||||
"//federation/client/clientset_generated/federation_clientset:go_default_library",
|
||||
"//federation/client/clientset_generated/federation_clientset/typed/core/v1:go_default_library",
|
||||
"//federation/pkg/federation-controller/util:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/annotations:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
|
@ -125,7 +111,7 @@ go_library(
|
|||
"//pkg/apis/batch/v2alpha1:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/apis/rbac/v1alpha1:go_default_library",
|
||||
"//pkg/apis/rbac/v1beta1:go_default_library",
|
||||
"//pkg/apis/storage/util:go_default_library",
|
||||
"//pkg/apis/storage/v1beta1:go_default_library",
|
||||
"//pkg/apis/storage/v1beta1/util:go_default_library",
|
||||
|
@ -134,10 +120,6 @@ go_library(
|
|||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/transport:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd/api:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
|
@ -145,10 +127,10 @@ go_library(
|
|||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/endpoint:go_default_library",
|
||||
"//pkg/controller/job:go_default_library",
|
||||
"//pkg/controller/petset:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/controller/statefulset:go_default_library",
|
||||
"//pkg/genericapiserver/registry/generic/registry:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/kubelet/api/v1alpha1/stats:go_default_library",
|
||||
|
@ -156,12 +138,9 @@ go_library(
|
|||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/metrics:go_default_library",
|
||||
"//pkg/quota/evaluator/core:go_default_library",
|
||||
"//pkg/registry/generic/registry:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/serviceaccount:go_default_library",
|
||||
"//pkg/util:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//pkg/util/logs:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
|
@ -174,6 +153,7 @@ go_library(
|
|||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/generated:go_default_library",
|
||||
"//test/e2e_federation:go_default_library",
|
||||
"//test/images/net/nat:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws",
|
||||
|
@ -197,6 +177,7 @@ go_library(
|
|||
"//vendor:gopkg.in/inf.v0",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
|
@ -208,11 +189,15 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/yaml",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/serviceaccount",
|
||||
"//vendor:k8s.io/client-go/kubernetes",
|
||||
"//vendor:k8s.io/client-go/pkg/api/v1",
|
||||
"//vendor:k8s.io/client-go/pkg/apis/extensions/v1beta1",
|
||||
"//vendor:k8s.io/client-go/pkg/apis/policy/v1beta1",
|
||||
"//vendor:k8s.io/client-go/pkg/util/intstr",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
"//vendor:k8s.io/client-go/transport",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -228,12 +213,12 @@ go_test(
|
|||
"integration",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/metrics:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//vendor:github.com/onsi/ginkgo",
|
||||
"//vendor:github.com/onsi/gomega",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
3
vendor/k8s.io/kubernetes/test/e2e/addon_update.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/addon_update.go
generated
vendored
|
@ -26,7 +26,6 @@ import (
|
|||
|
||||
"golang.org/x/crypto/ssh"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -206,7 +205,7 @@ spec:
|
|||
const (
|
||||
addonTestPollInterval = 3 * time.Second
|
||||
addonTestPollTimeout = 5 * time.Minute
|
||||
defaultNsName = v1.NamespaceDefault
|
||||
defaultNsName = metav1.NamespaceDefault
|
||||
addonNsName = "kube-system"
|
||||
)
|
||||
|
||||
|
|
9
vendor/k8s.io/kubernetes/test/e2e/apparmor.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/apparmor.go
generated
vendored
|
@ -19,6 +19,7 @@ package e2e
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
api "k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
|
@ -52,7 +53,7 @@ elif ! touch %[2]s; then
|
|||
exit 2
|
||||
fi`, deniedPath, allowedPath)
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-apparmor",
|
||||
Annotations: map[string]string{
|
||||
apparmor.ContainerAnnotationKeyPrefix + "test": profile,
|
||||
|
@ -99,7 +100,7 @@ profile %s flags=(attach_disconnected) {
|
|||
`, profileName, deniedPath, allowedPath)
|
||||
|
||||
cm := &api.ConfigMap{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "apparmor-profiles",
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
|
@ -114,13 +115,13 @@ func createAppArmorProfileLoader(f *framework.Framework) (*extensions.DaemonSet,
|
|||
True := true
|
||||
// Copied from https://github.com/kubernetes/contrib/blob/master/apparmor/loader/example-configmap.yaml
|
||||
loader := &extensions.DaemonSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "apparmor-loader",
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"name": "apparmor-loader"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
|
|
4
vendor/k8s.io/kubernetes/test/e2e/autoscaling_utils.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/autoscaling_utils.go
generated
vendored
|
@ -330,7 +330,7 @@ func (rc *ResourceConsumer) CleanUp() {
|
|||
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
|
||||
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
|
||||
_, err := c.Core().Services(ns).Create(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
|
@ -384,7 +384,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalCli
|
|||
By(fmt.Sprintf("Running controller"))
|
||||
controllerName := name + "-ctrl"
|
||||
_, err = c.Core().Services(ns).Create(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: controllerName,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
|
|
4
vendor/k8s.io/kubernetes/test/e2e/cadvisor.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/cadvisor.go
generated
vendored
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("Cadvisor", func() {
|
|||
func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) {
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
By("getting list of nodes")
|
||||
nodeList, err := c.Core().Nodes().List(v1.ListOptions{})
|
||||
nodeList, err := c.Core().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
var errors []error
|
||||
|
||||
|
|
16
vendor/k8s.io/kubernetes/test/e2e/cluster_logging_es.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/cluster_logging_es.go
generated
vendored
|
@ -26,8 +26,6 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -55,7 +53,7 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu
|
|||
|
||||
By("Running synthetic logger")
|
||||
createSynthLogger(f, expectedLinesCount)
|
||||
defer f.PodClient().Delete(synthLoggerPodName, &v1.DeleteOptions{})
|
||||
defer f.PodClient().Delete(synthLoggerPodName, &metav1.DeleteOptions{})
|
||||
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))
|
||||
|
||||
|
@ -89,7 +87,7 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu
|
|||
func checkElasticsearchReadiness(f *framework.Framework) error {
|
||||
// Check for the existence of the Elasticsearch service.
|
||||
By("Checking the Elasticsearch service exists.")
|
||||
s := f.ClientSet.Core().Services(api.NamespaceSystem)
|
||||
s := f.ClientSet.Core().Services(metav1.NamespaceSystem)
|
||||
// Make a few attempts to connect. This makes the test robust against
|
||||
// being run as the first e2e test just after the e2e cluster has been created.
|
||||
var err error
|
||||
|
@ -104,8 +102,8 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
|
|||
// Wait for the Elasticsearch pods to enter the running state.
|
||||
By("Checking to make sure the Elasticsearch pods are running")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "elasticsearch-logging"}))
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := f.ClientSet.Core().Pods(metav1.NamespaceSystem).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods.Items {
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
|
||||
|
@ -128,7 +126,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
|
|||
defer cancel()
|
||||
|
||||
// Query against the root URL for Elasticsearch.
|
||||
response := proxyRequest.Namespace(api.NamespaceSystem).
|
||||
response := proxyRequest.Namespace(metav1.NamespaceSystem).
|
||||
Context(ctx).
|
||||
Name("elasticsearch-logging").
|
||||
Do()
|
||||
|
@ -168,7 +166,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
body, err = proxyRequest.Namespace(api.NamespaceSystem).
|
||||
body, err = proxyRequest.Namespace(metav1.NamespaceSystem).
|
||||
Context(ctx).
|
||||
Name("elasticsearch-logging").
|
||||
Suffix("_cluster/health").
|
||||
|
@ -219,7 +217,7 @@ func getMissingLinesCountElasticsearch(f *framework.Framework, expectedCount int
|
|||
|
||||
// Ask Elasticsearch to return all the log lines that were tagged with the
|
||||
// pod name. Ask for ten times as many log lines because duplication is possible.
|
||||
body, err := proxyRequest.Namespace(api.NamespaceSystem).
|
||||
body, err := proxyRequest.Namespace(metav1.NamespaceSystem).
|
||||
Context(ctx).
|
||||
Name("elasticsearch-logging").
|
||||
Suffix("_search").
|
||||
|
|
4
vendor/k8s.io/kubernetes/test/e2e/cluster_logging_gcl.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/cluster_logging_gcl.go
generated
vendored
|
@ -23,8 +23,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -41,7 +41,7 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL", func() {
|
|||
It("should check that logs from containers are ingested in GCL", func() {
|
||||
By("Running synthetic logger")
|
||||
createSynthLogger(f, expectedLinesCount)
|
||||
defer f.PodClient().Delete(synthLoggerPodName, &v1.DeleteOptions{})
|
||||
defer f.PodClient().Delete(synthLoggerPodName, &metav1.DeleteOptions{})
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))
|
||||
|
||||
|
|
9
vendor/k8s.io/kubernetes/test/e2e/cluster_logging_utils.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/cluster_logging_utils.go
generated
vendored
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
@ -44,7 +43,7 @@ const (
|
|||
|
||||
func createSynthLogger(f *framework.Framework, linesCount int) {
|
||||
f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: synthLoggerPodName,
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
|
@ -74,13 +73,13 @@ func reportLogsFromFluentdPod(f *framework.Framework) error {
|
|||
}
|
||||
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "fluentd-logging"}))
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
fluentdPods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
fluentdPods, err := f.ClientSet.Core().Pods(metav1.NamespaceSystem).List(options)
|
||||
|
||||
for _, fluentdPod := range fluentdPods.Items {
|
||||
if fluentdPod.Spec.NodeName == synthLoggerNodeName {
|
||||
containerName := fluentdPod.Spec.Containers[0].Name
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, api.NamespaceSystem, fluentdPod.Name, containerName)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, metav1.NamespaceSystem, fluentdPod.Name, containerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get logs from fluentd pod %s due to %v", fluentdPod.Name, err)
|
||||
}
|
||||
|
|
8
vendor/k8s.io/kubernetes/test/e2e/cluster_size_autoscaling.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/cluster_size_autoscaling.go
generated
vendored
|
@ -27,10 +27,10 @@ import (
|
|||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
|
@ -107,7 +107,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
EventsLoop:
|
||||
for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
|
||||
By("Waiting for NotTriggerScaleUp event")
|
||||
events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(v1.ListOptions{})
|
||||
events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
for _, e := range events.Items {
|
||||
|
@ -565,7 +565,7 @@ func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
|
|||
// WaitForClusterSize waits until the cluster size matches the given function.
|
||||
func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
||||
nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
|
||||
nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
if err != nil {
|
||||
|
@ -592,7 +592,7 @@ func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, time
|
|||
func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error {
|
||||
var notready []string
|
||||
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) {
|
||||
pods, err := c.Core().Pods(f.Namespace.Name).List(v1.ListOptions{})
|
||||
pods, err := c.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pods: %v", err)
|
||||
}
|
||||
|
|
203
vendor/k8s.io/kubernetes/test/e2e/common/configmap.go
generated
vendored
203
vendor/k8s.io/kubernetes/test/e2e/common/configmap.go
generated
vendored
|
@ -19,8 +19,10 @@ package common
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -85,7 +87,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
|
|||
containerName := "configmap-volume-test"
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
|
@ -101,7 +103,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
|
|||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -153,6 +155,189 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
|
|||
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
|
||||
})
|
||||
|
||||
It("optional updates should be reflected in volume [Conformance] [Volume]", func() {
|
||||
|
||||
// We may have to wait or a full sync period to elapse before the
|
||||
// Kubelet projects the update into the volume and the container picks
|
||||
// it up. This timeout is based on the default Kubelet sync period (1
|
||||
// minute) plus additional time for fudge factor.
|
||||
const podLogTimeout = 300 * time.Second
|
||||
trueVal := true
|
||||
|
||||
volumeMountPath := "/etc/configmap-volumes"
|
||||
|
||||
deleteName := "cm-test-opt-del-" + string(uuid.NewUUID())
|
||||
deleteContainerName := "delcm-volume-test"
|
||||
deleteVolumeName := "deletecm-volume"
|
||||
deleteConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: deleteName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
updateName := "cm-test-opt-upd-" + string(uuid.NewUUID())
|
||||
updateContainerName := "updcm-volume-test"
|
||||
updateVolumeName := "updatecm-volume"
|
||||
updateConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: updateName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
createName := "cm-test-opt-create-" + string(uuid.NewUUID())
|
||||
createContainerName := "createcm-volume-test"
|
||||
createVolumeName := "createcm-volume"
|
||||
createConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: createName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
|
||||
var err error
|
||||
if deleteConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
|
||||
if updateConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: deleteName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: updateName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: createName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: deleteContainerName,
|
||||
Image: "gcr.io/google_containers/mounttest:0.7",
|
||||
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volumes/delete/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "delete"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateContainerName,
|
||||
Image: "gcr.io/google_containers/mounttest:0.7",
|
||||
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volumes/update/data-3"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "update"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: "gcr.io/google_containers/mounttest:0.7",
|
||||
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "create"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollCreateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
}
|
||||
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1"))
|
||||
|
||||
pollUpdateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
}
|
||||
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3"))
|
||||
|
||||
pollDeleteLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
}
|
||||
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
|
||||
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
|
||||
err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
|
||||
updateConfigMap.ResourceVersion = "" // to force update
|
||||
delete(updateConfigMap.Data, "data-1")
|
||||
updateConfigMap.Data["data-3"] = "value-3"
|
||||
_, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
|
||||
if createConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
|
||||
}
|
||||
|
||||
By("waiting to observe update in volume")
|
||||
|
||||
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
|
||||
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/delete/data-1"))
|
||||
})
|
||||
|
||||
It("should be consumable via environment variable [Conformance]", func() {
|
||||
name := "configmap-test-" + string(uuid.NewUUID())
|
||||
configMap := newConfigMap(f, name)
|
||||
|
@ -163,7 +348,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
|
|||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -206,7 +391,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
|
|||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -253,7 +438,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
|
|||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -311,7 +496,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
|
|||
|
||||
func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
|
@ -325,7 +510,7 @@ func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
|
|||
|
||||
func newEnvFromConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
|
@ -352,7 +537,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
|
|||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -426,7 +611,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
|
|||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
16
vendor/k8s.io/kubernetes/test/e2e/common/container_probe.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/common/container_probe.go
generated
vendored
|
@ -96,7 +96,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
|||
|
||||
It("should be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-exec",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
|
@ -123,7 +123,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
|||
|
||||
It("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-exec",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
|
@ -150,7 +150,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
|||
|
||||
It("should be restarted with a /healthz http liveness probe [Conformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
|
@ -179,7 +179,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
|||
// Slow by design (5 min)
|
||||
It("should have monotonically increasing restart count [Conformance] [Slow]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
|
@ -207,7 +207,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
|||
|
||||
It("should *not* be restarted with a /healthz http liveness probe [Conformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
|
@ -238,7 +238,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
|||
// TODO: enable this test once the default exec handler supports timeout.
|
||||
Skip("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-exec",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
|
@ -297,7 +297,7 @@ func getRestartCount(p *v1.Pod) int {
|
|||
|
||||
func makePodSpec(readinessProbe, livenessProbe *v1.Probe) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "test-webserver-" + string(uuid.NewUUID())},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-webserver-" + string(uuid.NewUUID())},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
|
@ -353,7 +353,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
|
|||
// At the end of the test, clean up by removing the pod.
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
|
||||
podClient.Create(pod)
|
||||
|
|
3
vendor/k8s.io/kubernetes/test/e2e/common/docker_containers.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/common/docker_containers.go
generated
vendored
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package common
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -71,7 +72,7 @@ func entrypointTestPod() *v1.Pod {
|
|||
podName := "client-containers-" + string(uuid.NewUUID())
|
||||
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
5
vendor/k8s.io/kubernetes/test/e2e/common/downward_api.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/common/downward_api.go
generated
vendored
|
@ -19,6 +19,7 @@ package common
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
|
@ -153,7 +154,7 @@ var _ = framework.KubeDescribe("Downward API", func() {
|
|||
fmt.Sprintf("MEMORY_LIMIT=[1-9]"),
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
|
@ -176,7 +177,7 @@ var _ = framework.KubeDescribe("Downward API", func() {
|
|||
|
||||
func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
|
|
2
vendor/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go
generated
vendored
|
@ -333,7 +333,7 @@ func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[stri
|
|||
|
||||
func downwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
|
|
2
vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go
generated
vendored
|
@ -317,7 +317,7 @@ func testPodWithVolume(image, path string, source *v1.EmptyDirVolumeSource) *v1.
|
|||
Kind: "Pod",
|
||||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
7
vendor/k8s.io/kubernetes/test/e2e/common/expansion.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/common/expansion.go
generated
vendored
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package common
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -32,7 +33,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
|||
It("should allow composing env vars into new env vars [Conformance]", func() {
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
|
@ -72,7 +73,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
|||
It("should allow substituting values in a container's command [Conformance]", func() {
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
|
@ -102,7 +103,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
|||
It("should allow substituting values in a container's args [Conformance]", func() {
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
|
|
2
vendor/k8s.io/kubernetes/test/e2e/common/host_path.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/common/host_path.go
generated
vendored
|
@ -139,7 +139,7 @@ func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
|
|||
Kind: "Pod",
|
||||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
17
vendor/k8s.io/kubernetes/test/e2e/common/init_container.go
generated
vendored
17
vendor/k8s.io/kubernetes/test/e2e/common/init_container.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -47,7 +48,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
|
@ -81,7 +82,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
Expect(err).To(BeNil())
|
||||
}
|
||||
startedPod := podClient.Create(pod)
|
||||
w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
wr := watch.NewRecorder(w)
|
||||
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodCompleted)
|
||||
|
@ -112,7 +113,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
|
@ -150,7 +151,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
Expect(err).To(BeNil())
|
||||
}
|
||||
startedPod := podClient.Create(pod)
|
||||
w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
wr := watch.NewRecorder(w)
|
||||
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodRunning)
|
||||
|
@ -182,7 +183,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
|
@ -220,7 +221,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
Expect(err).To(BeNil())
|
||||
}
|
||||
startedPod := podClient.Create(pod)
|
||||
w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
|
||||
wr := watch.NewRecorder(w)
|
||||
|
@ -303,7 +304,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
|
@ -344,7 +345,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
|||
}
|
||||
startedPod := podClient.Create(pod)
|
||||
|
||||
w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
|
||||
wr := watch.NewRecorder(w)
|
||||
|
|
5
vendor/k8s.io/kubernetes/test/e2e/common/kubelet_etc_hosts.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/common/kubelet_etc_hosts.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
@ -130,7 +131,7 @@ func (config *KubeletManagedHostConfig) getEtcHostsContent(podName, containerNam
|
|||
|
||||
func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -186,7 +187,7 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
|
|||
|
||||
func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
38
vendor/k8s.io/kubernetes/test/e2e/common/pods.go
generated
vendored
38
vendor/k8s.io/kubernetes/test/e2e/common/pods.go
generated
vendored
|
@ -129,7 +129,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
It("should get a host IP [Conformance]", func() {
|
||||
name := "pod-hostip-" + string(uuid.NewUUID())
|
||||
testHostIP(podClient, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -148,7 +148,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
name := "pod-submit-remove-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
|
@ -167,11 +167,11 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
|
||||
By("setting up watch")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
options = v1.ListOptions{
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = v1.ListOptions{LabelSelector: selector.String()}
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
@ -207,7 +207,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
framework.Logf("running pod: %#v", pod)
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
err = podClient.Delete(pod.Name, v1.NewDeleteOptions(30))
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
|
||||
|
||||
By("verifying the kubelet observed the termination notice")
|
||||
|
@ -255,7 +255,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
|
||||
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = v1.ListOptions{LabelSelector: selector.String()}
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
|
@ -266,7 +266,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
name := "pod-update-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
|
@ -288,7 +288,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
@ -303,7 +303,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
|
||||
By("verifying the updated pod is in kubernetes")
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = v1.ListOptions{LabelSelector: selector.String()}
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
@ -315,7 +315,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
|
@ -337,7 +337,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
@ -356,7 +356,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
// This pod serves its hostname via HTTP.
|
||||
serverName := "server-envvars-" + string(uuid.NewUUID())
|
||||
serverPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serverName,
|
||||
Labels: map[string]string{"name": serverName},
|
||||
},
|
||||
|
@ -381,7 +381,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
// allow overriding the prefix in the service manifest.
|
||||
svcName := "fooservice"
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: svcName,
|
||||
Labels: map[string]string{
|
||||
"name": svcName,
|
||||
|
@ -404,7 +404,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
podName := "client-envvars-" + string(uuid.NewUUID())
|
||||
const containerName = "env3cont"
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
|
@ -444,7 +444,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
By("creating the pod")
|
||||
name := "pod-exec-websocket-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -514,7 +514,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
By("creating the pod")
|
||||
name := "pod-logs-websocket-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -568,7 +568,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
podName := "pod-back-off-image"
|
||||
containerName := "back-off"
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"test": "back-off-image"},
|
||||
},
|
||||
|
@ -609,7 +609,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
podName := "back-off-cap"
|
||||
containerName := "back-off-cap"
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
|
|
3
vendor/k8s.io/kubernetes/test/e2e/common/privileged.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/common/privileged.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
@ -83,7 +84,7 @@ func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod {
|
|||
const image = "gcr.io/google_containers/busybox:1.24"
|
||||
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.privilegedPod,
|
||||
Namespace: c.f.Namespace.Name,
|
||||
},
|
||||
|
|
245
vendor/k8s.io/kubernetes/test/e2e/common/secrets.go
generated
vendored
245
vendor/k8s.io/kubernetes/test/e2e/common/secrets.go
generated
vendored
|
@ -19,12 +19,16 @@ package common
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Secrets", func() {
|
||||
|
@ -96,7 +100,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
|
|||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -149,6 +153,183 @@ var _ = framework.KubeDescribe("Secrets", func() {
|
|||
})
|
||||
})
|
||||
|
||||
It("optional updates should be reflected in volume [Conformance] [Volume]", func() {
|
||||
|
||||
// We may have to wait or a full sync period to elapse before the
|
||||
// Kubelet projects the update into the volume and the container picks
|
||||
// it up. This timeout is based on the default Kubelet sync period (1
|
||||
// minute) plus additional time for fudge factor.
|
||||
const podLogTimeout = 300 * time.Second
|
||||
trueVal := true
|
||||
|
||||
volumeMountPath := "/etc/secret-volumes"
|
||||
|
||||
deleteName := "s-test-opt-del-" + string(uuid.NewUUID())
|
||||
deleteContainerName := "dels-volume-test"
|
||||
deleteVolumeName := "deletes-volume"
|
||||
deleteSecret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: deleteName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
updateName := "s-test-opt-upd-" + string(uuid.NewUUID())
|
||||
updateContainerName := "upds-volume-test"
|
||||
updateVolumeName := "updates-volume"
|
||||
updateSecret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: updateName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
createName := "s-test-opt-create-" + string(uuid.NewUUID())
|
||||
createContainerName := "creates-volume-test"
|
||||
createVolumeName := "creates-volume"
|
||||
createSecret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: createName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
|
||||
var err error
|
||||
if deleteSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
|
||||
if updateSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: deleteName,
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: updateName,
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: createName,
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: deleteContainerName,
|
||||
Image: "gcr.io/google_containers/mounttest:0.7",
|
||||
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/secret-volumes/delete/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "delete"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateContainerName,
|
||||
Image: "gcr.io/google_containers/mounttest:0.7",
|
||||
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/secret-volumes/update/data-3"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "update"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: "gcr.io/google_containers/mounttest:0.7",
|
||||
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "create"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollCreateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
}
|
||||
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/create/data-1"))
|
||||
|
||||
pollUpdateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
}
|
||||
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/update/data-3"))
|
||||
|
||||
pollDeleteLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
}
|
||||
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
|
||||
By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
|
||||
err = f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
|
||||
updateSecret.ResourceVersion = "" // to force update
|
||||
delete(updateSecret.Data, "data-1")
|
||||
updateSecret.Data["data-3"] = []byte("value-3")
|
||||
_, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Update(updateSecret)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
|
||||
if createSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
|
||||
}
|
||||
|
||||
By("waiting to observe update in volume")
|
||||
|
||||
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
|
||||
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/delete/data-1"))
|
||||
})
|
||||
|
||||
It("should be consumable from pods in env vars [Conformance]", func() {
|
||||
name := "secret-test-" + string(uuid.NewUUID())
|
||||
secret := secretForTest(f.Namespace.Name, name)
|
||||
|
@ -160,7 +341,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
|
|||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -192,11 +373,65 @@ var _ = framework.KubeDescribe("Secrets", func() {
|
|||
"SECRET_DATA=value-1",
|
||||
})
|
||||
})
|
||||
|
||||
It("should be consumable via the environment [Conformance]", func() {
|
||||
name := "secret-test-" + string(uuid.NewUUID())
|
||||
secret := newEnvFromSecret(f.Namespace.Name, name)
|
||||
By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name))
|
||||
var err error
|
||||
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "env-test",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
{
|
||||
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
|
||||
},
|
||||
{
|
||||
Prefix: "p_",
|
||||
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("consume secrets", pod, 0, []string{
|
||||
"data_1=value-1", "data_2=value-2", "data_3=value-3",
|
||||
"p_data_1=value-1", "p_data_2=value-2", "p_data_3=value-3",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func newEnvFromSecret(namespace, name string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data_1": []byte("value-1\n"),
|
||||
"data_2": []byte("value-2\n"),
|
||||
"data_3": []byte("value-3\n"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func secretForTest(namespace, name string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
|
@ -222,7 +457,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
|
|||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
|
@ -294,7 +529,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
|
|||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
2
vendor/k8s.io/kubernetes/test/e2e/common/sysctl.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/common/sysctl.go
generated
vendored
|
@ -38,7 +38,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
|
|||
testPod := func() *v1.Pod {
|
||||
podName := "sysctl-" + string(uuid.NewUUID())
|
||||
pod := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
|
|
5
vendor/k8s.io/kubernetes/test/e2e/common/util.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/common/util.go
generated
vendored
|
@ -23,8 +23,9 @@ import (
|
|||
type Suite string
|
||||
|
||||
const (
|
||||
E2E Suite = "e2e"
|
||||
NodeE2E Suite = "node e2e"
|
||||
E2E Suite = "e2e"
|
||||
NodeE2E Suite = "node e2e"
|
||||
FederationE2E Suite = "federation e2e"
|
||||
)
|
||||
|
||||
var CurrentSuite Suite
|
||||
|
|
8
vendor/k8s.io/kubernetes/test/e2e/common/volumes.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/common/volumes.go
generated
vendored
|
@ -122,7 +122,7 @@ func startVolumeServer(f *framework.Framework, config VolumeTestConfig) *v1.Pod
|
|||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-server",
|
||||
Labels: map[string]string{
|
||||
"role": config.prefix + "-server",
|
||||
|
@ -198,7 +198,7 @@ func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume v1
|
|||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-client",
|
||||
Labels: map[string]string{
|
||||
"role": config.prefix + "-client",
|
||||
|
@ -274,7 +274,7 @@ func injectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V
|
|||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-injector",
|
||||
Labels: map[string]string{
|
||||
"role": config.prefix + "-injector",
|
||||
|
@ -421,7 +421,7 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
|||
Kind: "Endpoints",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-server",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
|
|
19
vendor/k8s.io/kubernetes/test/e2e/cronjob.go
generated
vendored
19
vendor/k8s.io/kubernetes/test/e2e/cronjob.go
generated
vendored
|
@ -27,7 +27,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
|
@ -68,7 +67,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring at least two running jobs exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
activeJobs := filterActiveJobs(jobs)
|
||||
Expect(len(activeJobs) >= 2).To(BeTrue())
|
||||
|
@ -91,7 +90,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
|
|||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Ensuring no job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(jobs.Items).To(HaveLen(0))
|
||||
|
||||
|
@ -117,7 +116,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
|
|||
Expect(cronJob.Status.Active).Should(HaveLen(1))
|
||||
|
||||
By("Ensuring exaclty one running job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
activeJobs := filterActiveJobs(jobs)
|
||||
Expect(activeJobs).To(HaveLen(1))
|
||||
|
@ -148,7 +147,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
|
|||
Expect(cronJob.Status.Active).Should(HaveLen(1))
|
||||
|
||||
By("Ensuring exaclty one running job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
|
||||
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
activeJobs := filterActiveJobs(jobs)
|
||||
Expect(activeJobs).To(HaveLen(1))
|
||||
|
@ -205,7 +204,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
|
|||
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
|
||||
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job was deleted")
|
||||
|
@ -232,7 +231,7 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batch.ConcurrencyPo
|
|||
parallelism := int32(1)
|
||||
completions := int32(1)
|
||||
sj := &batch.CronJob{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: batch.CronJobSpec{
|
||||
|
@ -322,7 +321,7 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty boo
|
|||
// Wait for a job to be replaced with a new one.
|
||||
func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{})
|
||||
jobs, err := c.Batch().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -339,7 +338,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
|
|||
// waitForJobsAtLeast waits for at least a number of jobs to appear.
|
||||
func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{})
|
||||
jobs, err := c.Batch().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -350,7 +349,7 @@ func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
|
|||
// waitForAnyFinishedJob waits for any completed job to appear.
|
||||
func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{})
|
||||
jobs, err := c.Batch().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
7
vendor/k8s.io/kubernetes/test/e2e/daemon_restart.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/daemon_restart.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
@ -170,7 +171,7 @@ func replacePods(pods []*v1.Pod, store cache.Store) {
|
|||
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
|
||||
// and a list of nodenames across which these containers restarted.
|
||||
func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) {
|
||||
options := v1.ListOptions{LabelSelector: labelSelector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: labelSelector.String()}
|
||||
pods, err := c.Core().Pods(ns).List(options)
|
||||
framework.ExpectNoError(err)
|
||||
failedContainers := 0
|
||||
|
@ -220,12 +221,12 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
|
|||
tracker = newPodTracker()
|
||||
newPods, controller = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = labelSelector.String()
|
||||
obj, err := f.ClientSet.Core().Pods(ns).List(options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labelSelector.String()
|
||||
return f.ClientSet.Core().Pods(ns).Watch(options)
|
||||
},
|
||||
|
|
22
vendor/k8s.io/kubernetes/test/e2e/daemon_set.go
generated
vendored
22
vendor/k8s.io/kubernetes/test/e2e/daemon_set.go
generated
vendored
|
@ -59,12 +59,12 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
|||
var f *framework.Framework
|
||||
|
||||
AfterEach(func() {
|
||||
if daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(v1.ListOptions{}); err == nil {
|
||||
if daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(api.Registry.EnabledVersions()...), daemonsets))
|
||||
} else {
|
||||
framework.Logf("unable to dump daemonsets: %v", err)
|
||||
}
|
||||
if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{}); err == nil {
|
||||
if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(api.Registry.EnabledVersions()...), pods))
|
||||
} else {
|
||||
framework.Logf("unable to dump pods: %v", err)
|
||||
|
@ -94,12 +94,12 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dsName,
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: label,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -136,7 +136,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
|||
podClient := c.Core().Pods(ns)
|
||||
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(podList.Items)).To(BeNumerically(">", 0))
|
||||
|
@ -153,13 +153,13 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
|||
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
||||
framework.Logf("Creating daemon with a node selector %s", dsName)
|
||||
_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dsName,
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: complexLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: complexLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -226,13 +226,13 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
|||
},
|
||||
}
|
||||
_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dsName,
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: complexLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: complexLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -345,7 +345,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
|
|||
func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, nodeNames []string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
selector := labels.Set(selector).AsSelector()
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(options)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
|
@ -374,7 +374,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, n
|
|||
|
||||
func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
|
||||
nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
nodeNames := make([]string, 0)
|
||||
for _, node := range nodeList.Items {
|
||||
|
|
4
vendor/k8s.io/kubernetes/test/e2e/dashboard.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/dashboard.go
generated
vendored
|
@ -21,9 +21,9 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
|
@ -35,7 +35,7 @@ var _ = framework.KubeDescribe("Kubernetes Dashboard", func() {
|
|||
const (
|
||||
uiServiceName = "kubernetes-dashboard"
|
||||
uiAppName = uiServiceName
|
||||
uiNamespace = api.NamespaceSystem
|
||||
uiNamespace = metav1.NamespaceSystem
|
||||
|
||||
serverStartTimeout = 1 * time.Minute
|
||||
)
|
||||
|
|
18
vendor/k8s.io/kubernetes/test/e2e/density.go
generated
vendored
18
vendor/k8s.io/kubernetes/test/e2e/density.go
generated
vendored
|
@ -26,6 +26,7 @@ import (
|
|||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
@ -39,7 +40,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
utiluuid "k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -171,7 +171,7 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
|
|||
|
||||
func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) {
|
||||
label := labels.SelectorFromSet(labels.Set(observedLabels))
|
||||
podStore := testutils.NewPodStore(c, v1.NamespaceAll, label, fields.Everything())
|
||||
podStore := testutils.NewPodStore(c, metav1.NamespaceAll, label, fields.Everything())
|
||||
defer podStore.Stop()
|
||||
ticker := time.NewTicker(period)
|
||||
defer ticker.Stop()
|
||||
|
@ -228,12 +228,12 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
|
|||
|
||||
// Print some data about Pod to Node allocation
|
||||
By("Printing Pod to Node allocation data")
|
||||
podList, err := dtc.ClientSet.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
|
||||
podList, err := dtc.ClientSet.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
pausePodAllocation := make(map[string]int)
|
||||
systemPodAllocation := make(map[string][]string)
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Namespace == api.NamespaceSystem {
|
||||
if pod.Namespace == metav1.NamespaceSystem {
|
||||
systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name)
|
||||
} else {
|
||||
pausePodAllocation[pod.Spec.NodeName]++
|
||||
|
@ -565,12 +565,12 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||
nsName := namespaces[i].Name
|
||||
latencyPodsStore, controller := cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
|
||||
obj, err := c.Core().Pods(nsName).List(options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
|
||||
return c.Core().Pods(nsName).Watch(options)
|
||||
},
|
||||
|
@ -655,7 +655,7 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||
"involvedObject.namespace": nsName,
|
||||
"source": v1.DefaultSchedulerName,
|
||||
}.AsSelector().String()
|
||||
options := v1.ListOptions{FieldSelector: selector}
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
schedEvents, err := c.Core().Events(nsName).List(options)
|
||||
framework.ExpectNoError(err)
|
||||
for k := range createTimes {
|
||||
|
@ -785,7 +785,7 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns,
|
|||
"name": name,
|
||||
}
|
||||
rc := &v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
},
|
||||
|
@ -793,7 +793,7 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns,
|
|||
Replicas: func(i int) *int32 { x := int32(i); return &x }(1),
|
||||
Selector: labels,
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
21
vendor/k8s.io/kubernetes/test/e2e/deployment.go
generated
vendored
21
vendor/k8s.io/kubernetes/test/e2e/deployment.go
generated
vendored
|
@ -29,7 +29,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/annotations"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
|
@ -109,7 +108,7 @@ var _ = framework.KubeDescribe("Deployment", func() {
|
|||
func newDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName string, image string, strategyType extensions.DeploymentStrategyType, revisionHistoryLimit *int32) *extensions.Deployment {
|
||||
zero := int64(0)
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
|
@ -120,7 +119,7 @@ func newDeployment(deploymentName string, replicas int32, podLabels map[string]s
|
|||
},
|
||||
RevisionHistoryLimit: revisionHistoryLimit,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -185,7 +184,7 @@ func stopDeploymentMaybeOverlap(c clientset.Interface, internalClient internalcl
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
|
||||
err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0))
|
||||
err = reaper.Stop(ns, deployment.Name, timeout, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||
|
@ -195,7 +194,7 @@ func stopDeploymentMaybeOverlap(c clientset.Interface, internalClient internalcl
|
|||
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rss, err := c.Extensions().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// RSes may be created by overlapping deployments right after this deployment is deleted, ignore them
|
||||
|
@ -412,11 +411,11 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
|||
deploymentName := "test-cleanup-deployment"
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
|
||||
pods, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
pods, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err)
|
||||
}
|
||||
options := v1.ListOptions{
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
}
|
||||
stopCh := make(chan struct{})
|
||||
|
@ -572,7 +571,7 @@ func testPausedDeployment(f *framework.Framework) {
|
|||
if err != nil {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
opts := v1.ListOptions{LabelSelector: selector.String()}
|
||||
opts := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
w, err := c.Extensions().ReplicaSets(ns).Watch(opts)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
@ -916,7 +915,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
|
|||
// All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := c.Core().Pods(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.CheckPodHashLabel(pods)
|
||||
|
@ -1163,7 +1162,7 @@ func testOverlappingDeployment(f *framework.Framework) {
|
|||
|
||||
// Only the first deployment is synced
|
||||
By("Checking only the first overlapping deployment is synced")
|
||||
options := v1.ListOptions{}
|
||||
options := metav1.ListOptions{}
|
||||
rsList, err := c.Extensions().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed listing all replica sets in namespace %s", ns)
|
||||
Expect(rsList.Items).To(HaveLen(int(replicas)))
|
||||
|
@ -1365,7 +1364,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
opts := v1.ListOptions{LabelSelector: selector.String()}
|
||||
opts := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.Core().Pods(ns).List(opts)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(podList.Items) == 0 {
|
||||
|
|
14
vendor/k8s.io/kubernetes/test/e2e/disruption.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/disruption.go
generated
vendored
|
@ -136,7 +136,7 @@ var _ = framework.KubeDescribe("DisruptionController", func() {
|
|||
// Locate a running pod.
|
||||
var pod v1.Pod
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
podList, err := cs.Pods(ns).List(v1.ListOptions{})
|
||||
podList, err := cs.Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ var _ = framework.KubeDescribe("DisruptionController", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
e := &policy.Eviction{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: ns,
|
||||
},
|
||||
|
@ -191,7 +191,7 @@ var _ = framework.KubeDescribe("DisruptionController", func() {
|
|||
|
||||
func createPodDisruptionBudgetOrDie(cs *kubernetes.Clientset, ns string, minAvailable intstr.IntOrString) {
|
||||
pdb := policy.PodDisruptionBudget{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
},
|
||||
|
@ -207,7 +207,7 @@ func createPodDisruptionBudgetOrDie(cs *kubernetes.Clientset, ns string, minAvai
|
|||
func createPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
|
||||
for i := 0; i < n; i++ {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%d", i),
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
|
@ -231,7 +231,7 @@ func createPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
|
|||
func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
|
||||
By("Waiting for all pods to be running")
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
pods, err := cs.Core().Pods(ns).List(v1.ListOptions{LabelSelector: "foo=bar"})
|
||||
pods, err := cs.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: "foo=bar"})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ func createReplicaSetOrDie(cs *kubernetes.Clientset, ns string, size int32, excl
|
|||
}
|
||||
|
||||
rs := &extensions.ReplicaSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "rs",
|
||||
Namespace: ns,
|
||||
},
|
||||
|
@ -279,7 +279,7 @@ func createReplicaSetOrDie(cs *kubernetes.Clientset, ns string, size int32, excl
|
|||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
12
vendor/k8s.io/kubernetes/test/e2e/dns.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/e2e/dns.go
generated
vendored
|
@ -49,7 +49,7 @@ func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string, useAnnotatio
|
|||
Kind: "Pod",
|
||||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dns-test-" + string(uuid.NewUUID()),
|
||||
Namespace: namespace,
|
||||
},
|
||||
|
@ -238,7 +238,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
|
|||
defer func() {
|
||||
By("deleting the pod")
|
||||
defer GinkgoRecover()
|
||||
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
|
||||
|
@ -267,7 +267,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
|
|||
defer func() {
|
||||
By("deleting the pod")
|
||||
defer GinkgoRecover()
|
||||
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
|
||||
|
@ -288,9 +288,9 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
|
|||
}
|
||||
|
||||
func verifyDNSPodIsRunning(f *framework.Framework) {
|
||||
systemClient := f.ClientSet.Core().Pods(api.NamespaceSystem)
|
||||
systemClient := f.ClientSet.Core().Pods(metav1.NamespaceSystem)
|
||||
By("Waiting for DNS Service to be Running")
|
||||
options := v1.ListOptions{LabelSelector: dnsServiceLabelSelector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: dnsServiceLabelSelector.String()}
|
||||
dnsPods, err := systemClient.List(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to list all dns service pods")
|
||||
|
@ -304,7 +304,7 @@ func verifyDNSPodIsRunning(f *framework.Framework) {
|
|||
|
||||
func createServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *v1.Service {
|
||||
headlessService := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
|
|
21
vendor/k8s.io/kubernetes/test/e2e/dns_autoscaling.go
generated
vendored
21
vendor/k8s.io/kubernetes/test/e2e/dns_autoscaling.go
generated
vendored
|
@ -26,7 +26,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
@ -89,7 +88,7 @@ var _ = framework.KubeDescribe("DNS horizontal autoscaling", func() {
|
|||
|
||||
By("Wait for number of running and ready kube-dns pods recover")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
|
||||
_, err := framework.WaitForPodsWithLabelRunningReady(c, api.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout)
|
||||
_, err := framework.WaitForPodsWithLabelRunningReady(c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
|
@ -231,7 +230,7 @@ func getScheduableCores(nodes []v1.Node) int64 {
|
|||
}
|
||||
|
||||
func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
|
||||
cm, err := c.Core().ConfigMaps(api.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{})
|
||||
cm, err := c.Core().ConfigMaps(metav1.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -239,7 +238,7 @@ func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
|
|||
}
|
||||
|
||||
func deleteDNSScalingConfigMap(c clientset.Interface) error {
|
||||
if err := c.Core().ConfigMaps(api.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
|
||||
if err := c.Core().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
framework.Logf("DNS autoscaling ConfigMap deleted.")
|
||||
|
@ -259,13 +258,13 @@ func packLinearParams(params *DNSParamsLinear) map[string]string {
|
|||
func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap {
|
||||
configMap := v1.ConfigMap{}
|
||||
configMap.ObjectMeta.Name = DNSAutoscalerLabelName
|
||||
configMap.ObjectMeta.Namespace = api.NamespaceSystem
|
||||
configMap.ObjectMeta.Namespace = metav1.NamespaceSystem
|
||||
configMap.Data = params
|
||||
return &configMap
|
||||
}
|
||||
|
||||
func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error {
|
||||
_, err := c.Core().ConfigMaps(api.NamespaceSystem).Update(configMap)
|
||||
_, err := c.Core().ConfigMaps(metav1.NamespaceSystem).Update(configMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -275,8 +274,8 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) e
|
|||
|
||||
func getDNSReplicas(c clientset.Interface) (int, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
|
||||
listOpts := v1.ListOptions{LabelSelector: label.String()}
|
||||
deployments, err := c.Extensions().Deployments(api.NamespaceSystem).List(listOpts)
|
||||
listOpts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
deployments, err := c.Extensions().Deployments(metav1.NamespaceSystem).List(listOpts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -290,8 +289,8 @@ func getDNSReplicas(c clientset.Interface) (int, error) {
|
|||
|
||||
func deleteDNSAutoscalerPod(c clientset.Interface) error {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName}))
|
||||
listOpts := v1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.Core().Pods(api.NamespaceSystem).List(listOpts)
|
||||
listOpts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.Core().Pods(metav1.NamespaceSystem).List(listOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -300,7 +299,7 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error {
|
|||
}
|
||||
|
||||
podName := pods.Items[0].Name
|
||||
if err := c.Core().Pods(api.NamespaceSystem).Delete(podName, nil); err != nil {
|
||||
if err := c.Core().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
framework.Logf("DNS autoscaling pod %v deleted.", podName)
|
||||
|
|
12
vendor/k8s.io/kubernetes/test/e2e/dns_configmap.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/e2e/dns_configmap.go
generated
vendored
|
@ -22,11 +22,11 @@ import (
|
|||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -69,7 +69,7 @@ var _ = framework.KubeDescribe("DNS config map", func() {
|
|||
func (t *dnsConfigMapTest) init() {
|
||||
By("Finding a DNS pod")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"}))
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
|
||||
pods, err := t.f.ClientSet.Core().Pods("kube-system").List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -218,7 +218,7 @@ func (t *dnsConfigMapTest) setConfigMap(cm *v1.ConfigMap, fedMap map[string]stri
|
|||
cm.ObjectMeta.Namespace = t.ns
|
||||
cm.ObjectMeta.Name = t.name
|
||||
|
||||
options := v1.ListOptions{
|
||||
options := metav1.ListOptions{
|
||||
FieldSelector: fields.Set{
|
||||
"metadata.namespace": t.ns,
|
||||
"metadata.name": t.name,
|
||||
|
@ -256,7 +256,7 @@ func (t *dnsConfigMapTest) createUtilPod() {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: t.f.Namespace.Name,
|
||||
Labels: map[string]string{"app": "e2e-dns-configmap"},
|
||||
GenerateName: "e2e-dns-configmap-",
|
||||
|
@ -285,7 +285,7 @@ func (t *dnsConfigMapTest) createUtilPod() {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Service",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: t.f.Namespace.Name,
|
||||
Name: "e2e-dns-configmap",
|
||||
},
|
||||
|
@ -308,7 +308,7 @@ func (t *dnsConfigMapTest) createUtilPod() {
|
|||
|
||||
func (t *dnsConfigMapTest) deleteUtilPod() {
|
||||
podClient := t.c.Core().Pods(t.f.Namespace.Name)
|
||||
if err := podClient.Delete(t.utilPod.Name, v1.NewDeleteOptions(0)); err != nil {
|
||||
if err := podClient.Delete(t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Logf("Delete of pod %v:%v failed: %v",
|
||||
t.utilPod.Namespace, t.utilPod.Name, err)
|
||||
}
|
||||
|
|
19
vendor/k8s.io/kubernetes/test/e2e/e2e.go
generated
vendored
19
vendor/k8s.io/kubernetes/test/e2e/e2e.go
generated
vendored
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
@ -42,6 +43,7 @@ import (
|
|||
commontest "k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/generated"
|
||||
federationtest "k8s.io/kubernetes/test/e2e_federation"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
|
@ -109,7 +111,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
|||
// Delete any namespaces except default and kube-system. This ensures no
|
||||
// lingering resources are left over from a previous test run.
|
||||
if framework.TestContext.CleanStart {
|
||||
deleted, err := framework.DeleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, v1.NamespaceDefault, federationapi.FederationNamespaceSystem})
|
||||
deleted, err := framework.DeleteNamespaces(c, nil /* deleteFilter */, []string{metav1.NamespaceSystem, metav1.NamespaceDefault, federationapi.FederationNamespaceSystem})
|
||||
if err != nil {
|
||||
framework.Failf("Error deleting orphaned namespaces: %v", err)
|
||||
}
|
||||
|
@ -129,14 +131,14 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
|||
// test pods from running, and tests that ensure all pods are running and
|
||||
// ready will fail).
|
||||
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
|
||||
if err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout, framework.ImagePullerLabels, true); err != nil {
|
||||
framework.DumpAllNamespaceInfo(c, api.NamespaceSystem)
|
||||
framework.LogFailedContainers(c, api.NamespaceSystem, framework.Logf)
|
||||
runKubernetesServiceTestContainer(c, v1.NamespaceDefault)
|
||||
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout, framework.ImagePullerLabels, true); err != nil {
|
||||
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
|
||||
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
|
||||
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
|
||||
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
|
||||
}
|
||||
|
||||
if err := framework.WaitForPodsSuccess(c, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout); err != nil {
|
||||
if err := framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout); err != nil {
|
||||
// There is no guarantee that the image pulling will succeed in 3 minutes
|
||||
// and we don't even run the image puller on all platforms (including GKE).
|
||||
// We wait for it so we get an indication of failures in the logs, and to
|
||||
|
@ -147,12 +149,15 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
|||
// Dump the output of the nethealth containers only once per run
|
||||
if framework.TestContext.DumpLogsOnFailure {
|
||||
framework.Logf("Dumping network health container logs from all nodes")
|
||||
framework.LogContainersInPodsWithLabels(c, api.NamespaceSystem, framework.ImagePullerLabels, "nethealth", framework.Logf)
|
||||
framework.LogContainersInPodsWithLabels(c, metav1.NamespaceSystem, framework.ImagePullerLabels, "nethealth", framework.Logf)
|
||||
}
|
||||
|
||||
// Reference common test to make the import valid.
|
||||
commontest.CurrentSuite = commontest.E2E
|
||||
|
||||
// Reference federation test to make the import valid.
|
||||
federationtest.FederationSuite = commontest.FederationE2E
|
||||
|
||||
return nil
|
||||
|
||||
}, func(data []byte) {
|
||||
|
|
19
vendor/k8s.io/kubernetes/test/e2e/empty_dir_wrapper.go
generated
vendored
19
vendor/k8s.io/kubernetes/test/e2e/empty_dir_wrapper.go
generated
vendored
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package e2e
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
|
@ -58,7 +59,7 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
|
|||
volumeMountPath := "/etc/secret-volume"
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
|
@ -78,7 +79,7 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
|
|||
defer gitCleanup()
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -128,7 +129,7 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
|
|||
framework.Failf("unable to delete secret %v: %v", secret.Name, err)
|
||||
}
|
||||
By("Cleaning up the git vol pod")
|
||||
if err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0)); err != nil {
|
||||
if err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err)
|
||||
}
|
||||
}()
|
||||
|
@ -178,7 +179,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
|
|||
labels := map[string]string{"name": gitServerPodName}
|
||||
|
||||
gitServerPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: gitServerPodName,
|
||||
Labels: labels,
|
||||
},
|
||||
|
@ -201,7 +202,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
|
|||
httpPort := 2345
|
||||
|
||||
gitServerSvc := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "git-server-svc",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
|
@ -222,7 +223,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
|
|||
|
||||
return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
|
||||
By("Cleaning up the git server pod")
|
||||
if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, v1.NewDeleteOptions(0)); err != nil {
|
||||
if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
|
||||
}
|
||||
By("Cleaning up the git server svc")
|
||||
|
@ -258,7 +259,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
|
|||
configMapName := fmt.Sprintf("racey-configmap-%d", i)
|
||||
configMapNames = append(configMapNames, configMapName)
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: configMapName,
|
||||
},
|
||||
|
@ -333,7 +334,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
|
|||
}
|
||||
|
||||
rc := &v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rcName,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
|
@ -342,7 +343,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
|
|||
"name": rcName,
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"name": rcName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
7
vendor/k8s.io/kubernetes/test/e2e/etcd_failure.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/etcd_failure.go
generated
vendored
|
@ -19,6 +19,7 @@ package e2e
|
|||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -106,7 +107,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
|||
|
||||
By("deleting pods from existing replication controller")
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
|
||||
options := v1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
framework.Logf("apiserver returned error, as expected before recovery: %v", err)
|
||||
|
@ -116,7 +117,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
|||
return false, nil
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
err = podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
framework.Logf("apiserver has recovered")
|
||||
|
@ -125,7 +126,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
|||
|
||||
By("waiting for replication controller to recover")
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
|
||||
options := v1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods.Items {
|
||||
|
|
10
vendor/k8s.io/kubernetes/test/e2e/events.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/events.go
generated
vendored
|
@ -22,10 +22,10 @@ import (
|
|||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -44,7 +44,7 @@ var _ = framework.KubeDescribe("Events", func() {
|
|||
name := "send-events-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
|
@ -75,7 +75,7 @@ var _ = framework.KubeDescribe("Events", func() {
|
|||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
|
@ -95,7 +95,7 @@ var _ = framework.KubeDescribe("Events", func() {
|
|||
"involvedObject.namespace": f.Namespace.Name,
|
||||
"source": v1.DefaultSchedulerName,
|
||||
}.AsSelector().String()
|
||||
options := v1.ListOptions{FieldSelector: selector}
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Events", func() {
|
|||
"involvedObject.namespace": f.Namespace.Name,
|
||||
"source": "kubelet",
|
||||
}.AsSelector().String()
|
||||
options := v1.ListOptions{FieldSelector: selector}
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
events, err = f.ClientSet.Core().Events(f.Namespace.Name).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
5
vendor/k8s.io/kubernetes/test/e2e/example_cluster_dns.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/example_cluster_dns.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"path/filepath"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -98,7 +99,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
|
|||
// the application itself may have not been initialized. Just query the application.
|
||||
for _, ns := range namespaces {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.Core().Pods(ns.Name).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
|
||||
|
@ -118,7 +119,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
|
|||
// dns error or timeout.
|
||||
// This code is probably unnecessary, but let's stay on the safe side.
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName}))
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.Core().Pods(namespaces[0].Name).List(options)
|
||||
|
||||
if err != nil || pods == nil || len(pods.Items) == 0 {
|
||||
|
|
8
vendor/k8s.io/kubernetes/test/e2e/examples.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/examples.go
generated
vendored
|
@ -31,10 +31,10 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
rbacv1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
||||
rbacv1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/generated"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
@ -72,7 +72,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
|||
// this test wants powerful permissions. Since the namespace names are unique, we can leave this
|
||||
// lying around so we don't have to race any caches
|
||||
framework.BindClusterRoleInNamespace(c.Rbac(), "edit", f.Namespace.Name,
|
||||
rbacv1alpha1.Subject{Kind: rbacv1alpha1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
|
||||
err := framework.WaitForAuthorizationUpdate(c.Authorization(),
|
||||
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
|
||||
|
@ -298,7 +298,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
|||
label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"}))
|
||||
err = wait.PollImmediate(statefulsetPoll, statefulsetTimeout,
|
||||
func() (bool, error) {
|
||||
podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: label.String()})
|
||||
podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: label.String()})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Unable to get list of pods in statefulset %s", label)
|
||||
}
|
||||
|
|
14
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
|
@ -32,8 +32,6 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//federation/apis/federation/v1beta1:go_default_library",
|
||||
"//federation/client/clientset_generated/federation_clientset:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
|
@ -45,24 +43,20 @@ go_library(
|
|||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/apis/rbac/v1alpha1:go_default_library",
|
||||
"//pkg/apis/rbac/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/authorization/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/rbac/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/typed/discovery:go_default_library",
|
||||
"//pkg/client/typed/dynamic:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd/api:go_default_library",
|
||||
"//pkg/client/unversioned/remotecommand:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubelet/api/v1alpha1/stats:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
|
@ -95,10 +89,10 @@ go_library(
|
|||
"//vendor:golang.org/x/net/websocket",
|
||||
"//vendor:google.golang.org/api/compute/v1",
|
||||
"//vendor:google.golang.org/api/googleapi",
|
||||
"//vendor:gopkg.in/yaml.v2",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1/unstructured",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
|
@ -113,6 +107,8 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/client-go/kubernetes",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd/api",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
22
vendor/k8s.io/kubernetes/test/e2e/framework/authorizer_util.go
generated
vendored
22
vendor/k8s.io/kubernetes/test/e2e/framework/authorizer_util.go
generated
vendored
|
@ -21,13 +21,13 @@ import (
|
|||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
legacyv1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
authorizationv1beta1 "k8s.io/kubernetes/pkg/apis/authorization/v1beta1"
|
||||
rbacv1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
||||
rbacv1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
|
||||
v1beta1authorization "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1"
|
||||
v1alpha1rbac "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1"
|
||||
v1beta1rbac "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -70,13 +70,13 @@ func WaitForAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviewsGette
|
|||
}
|
||||
|
||||
// BindClusterRole binds the cluster role at the cluster scope
|
||||
func BindClusterRole(c v1alpha1rbac.ClusterRoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1alpha1.Subject) {
|
||||
func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) {
|
||||
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
|
||||
_, err := c.ClusterRoleBindings().Create(&rbacv1alpha1.ClusterRoleBinding{
|
||||
ObjectMeta: legacyv1.ObjectMeta{
|
||||
_, err := c.ClusterRoleBindings().Create(&rbacv1beta1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns + "--" + clusterRole,
|
||||
},
|
||||
RoleRef: rbacv1alpha1.RoleRef{
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: clusterRole,
|
||||
|
@ -91,13 +91,13 @@ func BindClusterRole(c v1alpha1rbac.ClusterRoleBindingsGetter, clusterRole, ns s
|
|||
}
|
||||
|
||||
// BindClusterRoleInNamespace binds the cluster role at the namespace scope
|
||||
func BindClusterRoleInNamespace(c v1alpha1rbac.RoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1alpha1.Subject) {
|
||||
func BindClusterRoleInNamespace(c v1beta1rbac.RoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) {
|
||||
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
|
||||
_, err := c.RoleBindings(ns).Create(&rbacv1alpha1.RoleBinding{
|
||||
ObjectMeta: legacyv1.ObjectMeta{
|
||||
_, err := c.RoleBindings(ns).Create(&rbacv1beta1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns + "--" + clusterRole,
|
||||
},
|
||||
RoleRef: rbacv1alpha1.RoleRef{
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: clusterRole,
|
||||
|
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/exec_util.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/exec_util.go
generated
vendored
|
@ -23,9 +23,9 @@ import (
|
|||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/remotecommand"
|
||||
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
|
||||
|
||||
|
|
193
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
193
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
|
@ -20,7 +20,6 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -28,27 +27,25 @@ import (
|
|||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
staging "k8s.io/client-go/kubernetes"
|
||||
clientreporestclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -88,13 +85,6 @@ type Framework struct {
|
|||
|
||||
// configuration for framework's client
|
||||
options FrameworkOptions
|
||||
|
||||
// will this framework exercise a federated cluster as well
|
||||
federated bool
|
||||
|
||||
// Federation specific params. These are set only if federated = true.
|
||||
FederationClientset_1_5 *federation_clientset.Clientset
|
||||
FederationNamespace *v1.Namespace
|
||||
}
|
||||
|
||||
type TestDataSummary interface {
|
||||
|
@ -118,12 +108,6 @@ func NewDefaultFramework(baseName string) *Framework {
|
|||
return NewFramework(baseName, options, nil)
|
||||
}
|
||||
|
||||
func NewDefaultFederatedFramework(baseName string) *Framework {
|
||||
f := NewDefaultFramework(baseName)
|
||||
f.federated = true
|
||||
return f
|
||||
}
|
||||
|
||||
func NewDefaultGroupVersionFramework(baseName string, groupVersion schema.GroupVersion) *Framework {
|
||||
f := NewDefaultFramework(baseName)
|
||||
f.options.GroupVersion = &groupVersion
|
||||
|
@ -203,25 +187,6 @@ func (f *Framework) BeforeEach() {
|
|||
f.ClientPool = dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
}
|
||||
|
||||
if f.federated {
|
||||
if f.FederationClientset_1_5 == nil {
|
||||
By("Creating a release 1.4 federation Clientset")
|
||||
var err error
|
||||
f.FederationClientset_1_5, err = LoadFederationClientset_1_5()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
By("Waiting for federation-apiserver to be ready")
|
||||
err := WaitForFederationApiserverReady(f.FederationClientset_1_5)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("federation-apiserver is ready")
|
||||
|
||||
By("Creating a federation namespace")
|
||||
ns, err := f.createFederationNamespace(f.BaseName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.FederationNamespace = ns
|
||||
By(fmt.Sprintf("Created federation namespace %s", ns.Name))
|
||||
}
|
||||
|
||||
By("Building a namespace api object")
|
||||
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
|
||||
"e2e-framework": f.BaseName,
|
||||
|
@ -262,45 +227,6 @@ func (f *Framework) BeforeEach() {
|
|||
}
|
||||
}
|
||||
|
||||
func (f *Framework) deleteFederationNs() {
|
||||
if !f.federated {
|
||||
// Nothing to do if this is not a federation setup.
|
||||
return
|
||||
}
|
||||
ns := f.FederationNamespace
|
||||
By(fmt.Sprintf("Destroying federation namespace %q for this suite.", ns.Name))
|
||||
timeout := 5 * time.Minute
|
||||
if f.NamespaceDeletionTimeout != 0 {
|
||||
timeout = f.NamespaceDeletionTimeout
|
||||
}
|
||||
|
||||
clientset := f.FederationClientset_1_5
|
||||
// First delete the namespace from federation apiserver.
|
||||
// Also delete the corresponding namespaces from underlying clusters.
|
||||
orphanDependents := false
|
||||
if err := clientset.Core().Namespaces().Delete(ns.Name, &v1.DeleteOptions{OrphanDependents: &orphanDependents}); err != nil {
|
||||
Failf("Error while deleting federation namespace %s: %s", ns.Name, err)
|
||||
}
|
||||
// Verify that it got deleted.
|
||||
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
|
||||
if _, err := clientset.Core().Namespaces().Get(ns.Name, metav1.GetOptions{}); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
Logf("Error while waiting for namespace to be terminated: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
Failf("Couldn't delete ns %q: %s", ns.Name, err)
|
||||
} else {
|
||||
Logf("Namespace %v was already deleted", ns.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AfterEach deletes the namespace, after reading its events.
|
||||
func (f *Framework) AfterEach() {
|
||||
RemoveCleanupAction(f.cleanupHandle)
|
||||
|
@ -327,8 +253,6 @@ func (f *Framework) AfterEach() {
|
|||
}
|
||||
}
|
||||
}
|
||||
// Delete the federation namespace.
|
||||
f.deleteFederationNs()
|
||||
} else {
|
||||
if TestContext.DeleteNamespace {
|
||||
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
|
||||
|
@ -340,7 +264,6 @@ func (f *Framework) AfterEach() {
|
|||
|
||||
// Paranoia-- prevent reuse!
|
||||
f.Namespace = nil
|
||||
f.FederationNamespace = nil
|
||||
f.ClientSet = nil
|
||||
f.namespacesToDelete = nil
|
||||
|
||||
|
@ -354,34 +277,12 @@ func (f *Framework) AfterEach() {
|
|||
}
|
||||
}()
|
||||
|
||||
if f.federated {
|
||||
defer func() {
|
||||
if f.FederationClientset_1_5 == nil {
|
||||
Logf("Warning: framework is marked federated, but has no federation 1.4 clientset")
|
||||
return
|
||||
}
|
||||
if err := f.FederationClientset_1_5.Federation().Clusters().DeleteCollection(nil, v1.ListOptions{}); err != nil {
|
||||
Logf("Error: failed to delete Clusters: %+v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Print events if the test failed.
|
||||
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
|
||||
// Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client.
|
||||
DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
|
||||
By(fmt.Sprintf("Dumping a list of prepulled images on each node"))
|
||||
LogContainersInPodsWithLabels(f.ClientSet, api.NamespaceSystem, ImagePullerLabels, "image-puller", Logf)
|
||||
if f.federated {
|
||||
// Dump federation events in federation namespace.
|
||||
DumpEventsInNamespace(func(opts v1.ListOptions, ns string) (*v1.EventList, error) {
|
||||
return f.FederationClientset_1_5.Core().Events(ns).List(opts)
|
||||
}, f.FederationNamespace.Name)
|
||||
// Print logs of federation control plane pods (federation-apiserver and federation-controller-manager)
|
||||
LogPodsWithLabels(f.ClientSet, "federation", map[string]string{"app": "federated-cluster"}, Logf)
|
||||
// Print logs of kube-dns pod
|
||||
LogPodsWithLabels(f.ClientSet, "kube-system", map[string]string{"k8s-app": "kube-dns"}, Logf)
|
||||
}
|
||||
LogContainersInPodsWithLabels(f.ClientSet, metav1.NamespaceSystem, ImagePullerLabels, "image-puller", Logf)
|
||||
}
|
||||
|
||||
summaries := make([]TestDataSummary, 0)
|
||||
|
@ -456,29 +357,6 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
|
|||
return ns, err
|
||||
}
|
||||
|
||||
func (f *Framework) createFederationNamespace(baseName string) (*v1.Namespace, error) {
|
||||
clientset := f.FederationClientset_1_5
|
||||
namespaceObj := &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName),
|
||||
},
|
||||
}
|
||||
// Be robust about making the namespace creation call.
|
||||
var got *v1.Namespace
|
||||
if err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
|
||||
var err error
|
||||
got, err = clientset.Core().Namespaces().Create(namespaceObj)
|
||||
if err != nil {
|
||||
Logf("Unexpected error while creating namespace: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return got, nil
|
||||
}
|
||||
|
||||
// WaitForPodTerminated waits for the pod to be terminated with the given reason.
|
||||
func (f *Framework) WaitForPodTerminated(podName, reason string) error {
|
||||
return waitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name)
|
||||
|
@ -526,7 +404,7 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
|
|||
for {
|
||||
// TODO: Endpoints client should take a field selector so we
|
||||
// don't have to list everything.
|
||||
list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(v1.ListOptions{})
|
||||
list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -541,7 +419,7 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
|
|||
}
|
||||
}
|
||||
|
||||
options := v1.ListOptions{
|
||||
options := metav1.ListOptions{
|
||||
FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector().String(),
|
||||
ResourceVersion: rv,
|
||||
}
|
||||
|
@ -640,7 +518,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
|||
}
|
||||
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
|
||||
service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "service-for-" + appName,
|
||||
Labels: map[string]string{
|
||||
"app": appName + "-service",
|
||||
|
@ -666,7 +544,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
|
|||
if i <= maxCount {
|
||||
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
||||
Labels: labels,
|
||||
},
|
||||
|
@ -709,7 +587,7 @@ type KubeConfig struct {
|
|||
Users []KubeUser `yaml:"users"`
|
||||
}
|
||||
|
||||
func (kc *KubeConfig) findUser(name string) *KubeUser {
|
||||
func (kc *KubeConfig) FindUser(name string) *KubeUser {
|
||||
for _, user := range kc.Users {
|
||||
if user.Name == name {
|
||||
return &user
|
||||
|
@ -718,7 +596,7 @@ func (kc *KubeConfig) findUser(name string) *KubeUser {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (kc *KubeConfig) findCluster(name string) *KubeCluster {
|
||||
func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
|
||||
for _, cluster := range kc.Clusters {
|
||||
if cluster.Name == name {
|
||||
return &cluster
|
||||
|
@ -727,55 +605,6 @@ func (kc *KubeConfig) findCluster(name string) *KubeCluster {
|
|||
return nil
|
||||
}
|
||||
|
||||
type E2EContext struct {
|
||||
// Raw context name,
|
||||
RawName string `yaml:"rawName"`
|
||||
// A valid dns subdomain which can be used as the name of kubernetes resources.
|
||||
Name string `yaml:"name"`
|
||||
Cluster *KubeCluster `yaml:"cluster"`
|
||||
User *KubeUser `yaml:"user"`
|
||||
}
|
||||
|
||||
func (f *Framework) GetUnderlyingFederatedContexts() []E2EContext {
|
||||
if !f.federated {
|
||||
Failf("getUnderlyingFederatedContexts called on non-federated framework")
|
||||
}
|
||||
|
||||
kubeconfig := KubeConfig{}
|
||||
configBytes, err := ioutil.ReadFile(TestContext.KubeConfig)
|
||||
ExpectNoError(err)
|
||||
err = yaml.Unmarshal(configBytes, &kubeconfig)
|
||||
ExpectNoError(err)
|
||||
|
||||
e2eContexts := []E2EContext{}
|
||||
for _, context := range kubeconfig.Contexts {
|
||||
if strings.HasPrefix(context.Name, "federation") && context.Name != federatedKubeContext {
|
||||
user := kubeconfig.findUser(context.Context.User)
|
||||
if user == nil {
|
||||
Failf("Could not find user for context %+v", context)
|
||||
}
|
||||
|
||||
cluster := kubeconfig.findCluster(context.Context.Cluster)
|
||||
if cluster == nil {
|
||||
Failf("Could not find cluster for context %+v", context)
|
||||
}
|
||||
|
||||
dnsSubdomainName, err := GetValidDNSSubdomainName(context.Name)
|
||||
if err != nil {
|
||||
Failf("Could not convert context name %s to a valid dns subdomain name, error: %s", context.Name, err)
|
||||
}
|
||||
e2eContexts = append(e2eContexts, E2EContext{
|
||||
RawName: context.Name,
|
||||
Name: dnsSubdomainName,
|
||||
Cluster: cluster,
|
||||
User: user,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return e2eContexts
|
||||
}
|
||||
|
||||
func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
|
||||
if numRetries > 0 {
|
||||
|
@ -899,10 +728,10 @@ func filterLabels(selectors map[string]string, cli clientset.Interface, ns strin
|
|||
// everything manually.
|
||||
if len(selectors) > 0 {
|
||||
selector = labels.SelectorFromSet(labels.Set(selectors))
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pl, err = cli.Core().Pods(ns).List(options)
|
||||
} else {
|
||||
pl, err = cli.Core().Pods(ns).List(v1.ListOptions{})
|
||||
pl, err = cli.Core().Pods(ns).List(metav1.ListOptions{})
|
||||
}
|
||||
return pl, err
|
||||
}
|
||||
|
|
6
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
|
@ -30,10 +30,10 @@ import (
|
|||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
|
@ -158,7 +158,7 @@ func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor
|
|||
client: c,
|
||||
nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate),
|
||||
}
|
||||
nodes, err := m.client.Core().Nodes().List(v1.ListOptions{})
|
||||
nodes, err := m.client.Core().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err)
|
||||
}
|
||||
|
@ -701,7 +701,7 @@ func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingI
|
|||
|
||||
func (r *ResourceMonitor) Start() {
|
||||
// It should be OK to monitor unschedulable Nodes
|
||||
nodes, err := r.client.Core().Nodes().List(v1.ListOptions{})
|
||||
nodes, err := r.client.Core().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Failf("ResourceMonitor: unable to get list of nodes: %v", err)
|
||||
}
|
||||
|
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/metrics_util.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/metrics_util.go
generated
vendored
|
@ -28,9 +28,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/pkg/metrics"
|
||||
|
@ -325,7 +324,7 @@ func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) {
|
|||
result := SchedulingLatency{}
|
||||
|
||||
// Check if master Node is registered
|
||||
nodes, err := c.Core().Nodes().List(v1.ListOptions{})
|
||||
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
|
||||
var data string
|
||||
|
@ -342,7 +341,7 @@ func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) {
|
|||
rawData, err := c.Core().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Prefix("proxy").
|
||||
Namespace(api.NamespaceSystem).
|
||||
Namespace(metav1.NamespaceSystem).
|
||||
Resource("pods").
|
||||
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
|
||||
Suffix("metrics").
|
||||
|
|
10
vendor/k8s.io/kubernetes/test/e2e/framework/networking_utils.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/framework/networking_utils.go
generated
vendored
|
@ -323,7 +323,7 @@ func (config *NetworkingTestConfig) createNetShellPodSpec(podName string, node s
|
|||
Kind: "Pod",
|
||||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: config.Namespace,
|
||||
},
|
||||
|
@ -367,7 +367,7 @@ func (config *NetworkingTestConfig) createTestPodSpec() *v1.Pod {
|
|||
Kind: "Pod",
|
||||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testPodName,
|
||||
Namespace: config.Namespace,
|
||||
},
|
||||
|
@ -397,7 +397,7 @@ func (config *NetworkingTestConfig) createTestPodSpec() *v1.Pod {
|
|||
|
||||
func (config *NetworkingTestConfig) createNodePortService(selector map[string]string) {
|
||||
serviceSpec := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodePortServiceName,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
|
@ -501,7 +501,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
|
|||
|
||||
func (config *NetworkingTestConfig) cleanup() {
|
||||
nsClient := config.getNamespacesClient()
|
||||
nsList, err := nsClient.List(v1.ListOptions{})
|
||||
nsList, err := nsClient.List(metav1.ListOptions{})
|
||||
if err == nil {
|
||||
for _, ns := range nsList.Items {
|
||||
if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace {
|
||||
|
@ -558,7 +558,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
|
|||
|
||||
func (config *NetworkingTestConfig) DeleteNetProxyPod() {
|
||||
pod := config.EndpointPods[0]
|
||||
config.getPodClient().Delete(pod.Name, v1.NewDeleteOptions(0))
|
||||
config.getPodClient().Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
config.EndpointPods = config.EndpointPods[1:]
|
||||
// wait for pod being deleted.
|
||||
err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
|
||||
|
|
5
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
|
@ -23,10 +23,11 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
)
|
||||
|
||||
// The following upgrade functions are passed into the framework below and used
|
||||
|
@ -153,7 +154,7 @@ func CheckNodesReady(c clientset.Interface, nt time.Duration, expect int) ([]str
|
|||
// A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
|
||||
// knows about all of the nodes. Thus, we retry the list nodes call
|
||||
// until we get the expected number of nodes.
|
||||
nodeList, errLast = c.Core().Nodes().List(v1.ListOptions{
|
||||
nodeList, errLast = c.Core().Nodes().List(metav1.ListOptions{
|
||||
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String()})
|
||||
if errLast != nil {
|
||||
return false, nil
|
||||
|
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
|
@ -113,7 +113,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
|
|||
|
||||
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
|
||||
// disappear before the timeout, it will fail the test.
|
||||
func (c *PodClient) DeleteSync(name string, options *v1.DeleteOptions, timeout time.Duration) {
|
||||
func (c *PodClient) DeleteSync(name string, options *metav1.DeleteOptions, timeout time.Duration) {
|
||||
err := c.Delete(name, options)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
Failf("Failed to delete pod %q: %v", name, err)
|
||||
|
|
6
vendor/k8s.io/kubernetes/test/e2e/framework/resource_usage_gatherer.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/resource_usage_gatherer.go
generated
vendored
|
@ -27,8 +27,8 @@ import (
|
|||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
)
|
||||
|
@ -232,7 +232,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
|||
finished: false,
|
||||
})
|
||||
} else {
|
||||
pods, err := c.Core().Pods("kube-system").List(v1.ListOptions{})
|
||||
pods, err := c.Core().Pods("kube-system").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error while listing Pods: %v", err)
|
||||
return nil, err
|
||||
|
@ -244,7 +244,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
|||
g.containerIDs = append(g.containerIDs, containerID)
|
||||
}
|
||||
}
|
||||
nodeList, err := c.Core().Nodes().List(v1.ListOptions{})
|
||||
nodeList, err := c.Core().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error while listing Nodes: %v", err)
|
||||
return nil, err
|
||||
|
|
18
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
18
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
|
@ -109,7 +109,7 @@ func NewServiceTestJig(client clientset.Interface, name string) *ServiceTestJig
|
|||
// as the jig and exposes the given port.
|
||||
func (j *ServiceTestJig) newServiceTemplate(namespace string, proto v1.Protocol, port int32) *v1.Service {
|
||||
service := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: j.Name,
|
||||
Labels: j.Labels,
|
||||
|
@ -487,7 +487,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string
|
|||
// name as the jig and runs the "netexec" container.
|
||||
func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationController {
|
||||
rc := &v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: j.Name,
|
||||
Labels: j.Labels,
|
||||
|
@ -496,7 +496,7 @@ func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationControll
|
|||
Replicas: func(i int) *int32 { x := int32(i); return &x }(1),
|
||||
Selector: j.Labels,
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: j.Labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -552,7 +552,7 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s
|
|||
label := labels.SelectorFromSet(labels.Set(j.Labels))
|
||||
Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := j.Client.Core().Pods(namespace).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -585,7 +585,7 @@ func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error
|
|||
// newNetexecPodSpec returns the pod spec of netexec pod
|
||||
func newNetexecPodSpec(podName string, httpPort, udpPort int32, hostNetwork bool) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -632,7 +632,7 @@ func (j *ServiceTestJig) LaunchNetexecPodOnNode(f *Framework, nodeName, podName
|
|||
func newEchoServerPodSpec(podName string) *v1.Pod {
|
||||
port := 8080
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -783,7 +783,7 @@ func NewServerTest(client clientset.Interface, namespace string, serviceName str
|
|||
// Build default config for a service (which can then be changed)
|
||||
func (t *ServiceTestFixture) BuildServiceSpec() *v1.Service {
|
||||
service := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: t.ServiceName,
|
||||
Namespace: t.Namespace,
|
||||
},
|
||||
|
@ -1001,7 +1001,7 @@ func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName strin
|
|||
i++
|
||||
}
|
||||
|
||||
if pods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{}); err == nil {
|
||||
if pods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil {
|
||||
for _, pod := range pods.Items {
|
||||
Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
|
||||
}
|
||||
|
@ -1017,7 +1017,7 @@ func StartServeHostnameService(c clientset.Interface, internalClient internalcli
|
|||
|
||||
By("creating service " + name + " in namespace " + ns)
|
||||
_, err := c.Core().Services(ns).Create(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
|
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
|
@ -23,8 +23,8 @@ import (
|
|||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
|
@ -81,6 +81,8 @@ type TestContextType struct {
|
|||
FeatureGates string
|
||||
// Node e2e specific test context
|
||||
NodeTestContextType
|
||||
// Federation e2e context
|
||||
FederatedKubeContext string
|
||||
|
||||
// Viper-only parameters. These will in time replace all flags.
|
||||
|
||||
|
@ -129,7 +131,6 @@ type CloudConfig struct {
|
|||
}
|
||||
|
||||
var TestContext TestContextType
|
||||
var federatedKubeContext string
|
||||
|
||||
// Register flags common to all e2e test suites.
|
||||
func RegisterCommonFlags() {
|
||||
|
@ -163,7 +164,7 @@ func RegisterClusterFlags() {
|
|||
flag.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to kubeconfig containing embedded authinfo.")
|
||||
flag.StringVar(&TestContext.KubeContext, clientcmd.FlagContext, "", "kubeconfig context to use/override. If unset, will use value from 'current-context'")
|
||||
flag.StringVar(&TestContext.KubeAPIContentType, "kube-api-content-type", "application/vnd.kubernetes.protobuf", "ContentType used to communicate with apiserver")
|
||||
flag.StringVar(&federatedKubeContext, "federated-kube-context", "e2e-federation", "kubeconfig context for federation.")
|
||||
flag.StringVar(&TestContext.FederatedKubeContext, "federated-kube-context", "e2e-federation", "kubeconfig context for federation.")
|
||||
|
||||
flag.StringVar(&TestContext.KubeVolumeDir, "volume-dir", "/var/lib/kubelet", "Path to the directory containing the kubelet volumes.")
|
||||
flag.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
|
||||
|
|
265
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
265
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
|
@ -23,6 +23,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -53,6 +54,7 @@ import (
|
|||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
@ -60,8 +62,9 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
|
||||
"k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
|
@ -72,15 +75,11 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/typed/discovery"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
|
@ -360,30 +359,13 @@ func SkipUnlessServerVersionGTE(v *utilversion.Version, c discovery.ServerVersio
|
|||
}
|
||||
}
|
||||
|
||||
// Detects whether the federation namespace exists in the underlying cluster
|
||||
func SkipUnlessFederated(c clientset.Interface) {
|
||||
federationNS := os.Getenv("FEDERATION_NAMESPACE")
|
||||
if federationNS == "" {
|
||||
federationNS = federationapi.FederationNamespaceSystem
|
||||
}
|
||||
|
||||
_, err := c.Core().Namespaces().Get(federationNS, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
Skipf("Could not find federation namespace %s: skipping federated test", federationNS)
|
||||
} else {
|
||||
Failf("Unexpected error getting namespace: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SkipIfMissingResource(clientPool dynamic.ClientPool, gvr schema.GroupVersionResource, namespace string) {
|
||||
dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr)
|
||||
if err != nil {
|
||||
Failf("Unexpected error getting dynamic client for %v: %v", gvr.GroupVersion(), err)
|
||||
}
|
||||
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
|
||||
_, err = dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{})
|
||||
_, err = dynamicClient.Resource(&apiResource, namespace).List(&metav1.ListOptions{})
|
||||
if err != nil {
|
||||
// not all resources support list, so we ignore those
|
||||
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
|
||||
|
@ -469,7 +451,7 @@ func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[s
|
|||
start, badPods, desiredPods := time.Now(), []v1.Pod{}, 0
|
||||
|
||||
if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
|
||||
podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: successPodSelector.String()})
|
||||
podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: successPodSelector.String()})
|
||||
if err != nil {
|
||||
Logf("Error getting pods in namespace %q: %v", ns, err)
|
||||
return false, nil
|
||||
|
@ -538,7 +520,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti
|
|||
// checked.
|
||||
replicas, replicaOk := int32(0), int32(0)
|
||||
|
||||
rcList, err := c.Core().ReplicationControllers(ns).List(v1.ListOptions{})
|
||||
rcList, err := c.Core().ReplicationControllers(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
|
||||
return false, nil
|
||||
|
@ -548,7 +530,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti
|
|||
replicaOk += rc.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
rsList, err := c.Extensions().ReplicaSets(ns).List(v1.ListOptions{})
|
||||
rsList, err := c.Extensions().ReplicaSets(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error getting replication sets in namespace %q: %v", ns, err)
|
||||
return false, nil
|
||||
|
@ -558,7 +540,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti
|
|||
replicaOk += rs.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
podList, err := c.Core().Pods(ns).List(v1.ListOptions{})
|
||||
podList, err := c.Core().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error getting pods in namespace '%s': %v", ns, err)
|
||||
return false, nil
|
||||
|
@ -631,7 +613,7 @@ func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string
|
|||
}
|
||||
|
||||
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
|
||||
podList, err := c.Core().Pods(ns).List(v1.ListOptions{})
|
||||
podList, err := c.Core().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
logFunc("Error getting pods in namespace '%s': %v", ns, err)
|
||||
return
|
||||
|
@ -645,7 +627,7 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri
|
|||
}
|
||||
|
||||
func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) {
|
||||
podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
|
||||
podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
|
||||
if err != nil {
|
||||
logFunc("Error getting pods in namespace %q: %v", ns, err)
|
||||
return
|
||||
|
@ -657,7 +639,7 @@ func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string
|
|||
}
|
||||
|
||||
func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) {
|
||||
podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
|
||||
podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
|
||||
if err != nil {
|
||||
Logf("Error getting pods in namespace %q: %v", ns, err)
|
||||
return
|
||||
|
@ -672,7 +654,7 @@ func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[s
|
|||
// Returns the list of deleted namespaces or an error.
|
||||
func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) {
|
||||
By("Deleting namespaces")
|
||||
nsList, err := c.Core().Namespaces().List(v1.ListOptions{})
|
||||
nsList, err := c.Core().Namespaces().List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var deleted []string
|
||||
var wg sync.WaitGroup
|
||||
|
@ -719,7 +701,7 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou
|
|||
//Now POLL until all namespaces have been eradicated.
|
||||
return wait.Poll(2*time.Second, timeout,
|
||||
func() (bool, error) {
|
||||
nsList, err := c.Core().Namespaces().List(v1.ListOptions{})
|
||||
nsList, err := c.Core().Namespaces().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -733,7 +715,7 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou
|
|||
}
|
||||
|
||||
func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error {
|
||||
w, err := c.Core().ServiceAccounts(ns).Watch(v1.SingleObject(v1.ObjectMeta{Name: serviceAccountName}))
|
||||
w, err := c.Core().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -768,10 +750,10 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeou
|
|||
|
||||
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
|
||||
// waits and checks if all match pods are in the given podCondition
|
||||
func WaitForMatchPodsCondition(c clientset.Interface, opts v1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
|
||||
func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
|
||||
Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
|
||||
pods, err := c.Core().Pods(v1.NamespaceAll).List(opts)
|
||||
pods, err := c.Core().Pods(metav1.NamespaceAll).List(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -800,18 +782,6 @@ func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace st
|
|||
return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout)
|
||||
}
|
||||
|
||||
// WaitForFederationApiserverReady waits for the federation apiserver to be ready.
|
||||
// It tests the readiness by sending a GET request and expecting a non error response.
|
||||
func WaitForFederationApiserverReady(c *federation_clientset.Clientset) error {
|
||||
return wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) {
|
||||
_, err := c.Federation().Clusters().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
|
||||
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
|
||||
Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
|
||||
|
@ -881,7 +851,7 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s
|
|||
labels["e2e-run"] = string(RunId)
|
||||
|
||||
namespaceObj := &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName),
|
||||
Namespace: "",
|
||||
Labels: labels,
|
||||
|
@ -931,7 +901,7 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
|
|||
|
||||
Logf("Waiting for terminating namespaces to be deleted...")
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) {
|
||||
namespaces, err := c.Core().Namespaces().List(v1.ListOptions{})
|
||||
namespaces, err := c.Core().Namespaces().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Listing namespaces failed: %v", err)
|
||||
continue
|
||||
|
@ -1014,7 +984,7 @@ func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace st
|
|||
// logNamespaces logs the number of namespaces by phase
|
||||
// namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs
|
||||
func logNamespaces(c clientset.Interface, namespace string) {
|
||||
namespaceList, err := c.Core().Namespaces().List(v1.ListOptions{})
|
||||
namespaceList, err := c.Core().Namespaces().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("namespace: %v, unable to list namespaces: %v", namespace, err)
|
||||
return
|
||||
|
@ -1049,7 +1019,7 @@ func logNamespace(c clientset.Interface, namespace string) {
|
|||
// countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
|
||||
func countRemainingPods(c clientset.Interface, namespace string) (int, int, error) {
|
||||
// check for remaining pods
|
||||
pods, err := c.Core().Pods(namespace).List(v1.ListOptions{})
|
||||
pods, err := c.Core().Pods(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
@ -1112,7 +1082,7 @@ func hasRemainingContent(c clientset.Interface, clientPool dynamic.ClientPool, n
|
|||
Logf("namespace: %s, resource: %s, ignored listing per whitelist", namespace, apiResource.Name)
|
||||
continue
|
||||
}
|
||||
obj, err := dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{})
|
||||
obj, err := dynamicClient.Resource(&apiResource, namespace).List(&metav1.ListOptions{})
|
||||
if err != nil {
|
||||
// not all resources support list, so we ignore those
|
||||
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
|
||||
|
@ -1261,7 +1231,7 @@ func waitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace,
|
|||
}
|
||||
|
||||
func waitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error {
|
||||
w, err := c.Core().Pods(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
|
||||
w, err := c.Core().Pods(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1276,7 +1246,7 @@ func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namesp
|
|||
}
|
||||
|
||||
func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error {
|
||||
w, err := c.Core().Pods(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
|
||||
w, err := c.Core().Pods(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1285,7 +1255,7 @@ func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName,
|
|||
}
|
||||
|
||||
func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error {
|
||||
w, err := c.Core().Pods(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
|
||||
w, err := c.Core().Pods(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1297,7 +1267,7 @@ func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace
|
|||
// The resourceVersion is used when Watching object changes, it tells since when we care
|
||||
// about changes to the pod.
|
||||
func WaitForPodNotPending(c clientset.Interface, ns, podName, resourceVersion string) error {
|
||||
w, err := c.Core().Pods(ns).Watch(v1.SingleObject(v1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
|
||||
w, err := c.Core().Pods(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1356,7 +1326,7 @@ func waitForRCPodOnNode(c clientset.Interface, ns, rcName, node string) (*v1.Pod
|
|||
var p *v1.Pod = nil
|
||||
err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
Logf("Waiting for pod %s to appear on node %s", rcName, node)
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.Core().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -1375,7 +1345,7 @@ func waitForRCPodOnNode(c clientset.Interface, ns, rcName, node string) (*v1.Pod
|
|||
|
||||
// WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status.
|
||||
func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error {
|
||||
options := v1.ListOptions{FieldSelector: fields.Set{
|
||||
options := metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"metadata.name": name,
|
||||
"metadata.namespace": ns,
|
||||
}.AsSelector().String()}
|
||||
|
@ -1406,7 +1376,7 @@ func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D
|
|||
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
|
||||
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
Logf("Waiting for pod %s to disappear", podName)
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.Core().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -1470,7 +1440,7 @@ func WaitForService(c clientset.Interface, namespace, name string, exist bool, i
|
|||
func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
|
||||
return wait.Poll(interval, timeout, func() (bool, error) {
|
||||
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
|
||||
list, err := c.Core().Endpoints(namespace).List(v1.ListOptions{})
|
||||
list, err := c.Core().Endpoints(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -1544,7 +1514,7 @@ func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Sele
|
|||
// reply with their own pod name.
|
||||
func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
|
||||
successes := 0
|
||||
options := v1.ListOptions{LabelSelector: r.label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: r.label.String()}
|
||||
currentPods, err := r.c.Core().Pods(r.ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for i, pod := range r.pods.Items {
|
||||
|
@ -1681,7 +1651,7 @@ func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.Po
|
|||
func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
|
||||
timeout := 2 * time.Minute
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
|
||||
// List the pods, making sure we observe all the replicas.
|
||||
pods, err := c.Core().Pods(ns).List(options)
|
||||
|
@ -1781,7 +1751,7 @@ func ServiceResponding(c clientset.Interface, ns, name string) error {
|
|||
})
|
||||
}
|
||||
|
||||
func restclientConfig(kubeContext string) (*clientcmdapi.Config, error) {
|
||||
func RestclientConfig(kubeContext string) (*clientcmdapi.Config, error) {
|
||||
Logf(">>> kubeConfig: %s\n", TestContext.KubeConfig)
|
||||
if TestContext.KubeConfig == "" {
|
||||
return nil, fmt.Errorf("KubeConfig must be specified to load client config")
|
||||
|
@ -1804,43 +1774,13 @@ func LoadConfig() (*restclient.Config, error) {
|
|||
// This is a node e2e test, apply the node e2e configuration
|
||||
return &restclient.Config{Host: TestContext.Host}, nil
|
||||
}
|
||||
c, err := restclientConfig(TestContext.KubeContext)
|
||||
c, err := RestclientConfig(TestContext.KubeContext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig()
|
||||
}
|
||||
|
||||
func LoadFederatedConfig(overrides *clientcmd.ConfigOverrides) (*restclient.Config, error) {
|
||||
c, err := restclientConfig(federatedKubeContext)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating federation client config: %v", err.Error())
|
||||
}
|
||||
cfg, err := clientcmd.NewDefaultClientConfig(*c, overrides).ClientConfig()
|
||||
if cfg != nil {
|
||||
//TODO(colhom): this is only here because https://github.com/kubernetes/kubernetes/issues/25422
|
||||
cfg.NegotiatedSerializer = api.Codecs
|
||||
}
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("error creating federation client config: %v", err.Error())
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func LoadFederationClientset_1_5() (*federation_clientset.Clientset, error) {
|
||||
config, err := LoadFederatedConfig(&clientcmd.ConfigOverrides{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := federation_clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating federation clientset: %v", err.Error())
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func LoadInternalClientset() (*internalclientset.Clientset, error) {
|
||||
config, err := LoadConfig()
|
||||
if err != nil {
|
||||
|
@ -2176,7 +2116,7 @@ func (f *Framework) MatchContainerOutput(
|
|||
createdPod := podClient.Create(pod)
|
||||
defer func() {
|
||||
By("delete the pod")
|
||||
podClient.DeleteSync(createdPod.Name, &v1.DeleteOptions{}, podNoLongerRunningTimeout)
|
||||
podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, podNoLongerRunningTimeout)
|
||||
}()
|
||||
|
||||
// Wait for client pod to complete.
|
||||
|
@ -2235,11 +2175,11 @@ func RunRC(config testutils.RCConfig) error {
|
|||
return testutils.RunRC(config)
|
||||
}
|
||||
|
||||
type EventsLister func(opts v1.ListOptions, ns string) (*v1.EventList, error)
|
||||
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
|
||||
|
||||
func DumpEventsInNamespace(eventsLister EventsLister, namespace string) {
|
||||
By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
|
||||
events, err := eventsLister(v1.ListOptions{}, namespace)
|
||||
events, err := eventsLister(metav1.ListOptions{}, namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Found %d events.", len(events.Items)))
|
||||
|
@ -2257,7 +2197,7 @@ func DumpEventsInNamespace(eventsLister EventsLister, namespace string) {
|
|||
}
|
||||
|
||||
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
|
||||
DumpEventsInNamespace(func(opts v1.ListOptions, ns string) (*v1.EventList, error) {
|
||||
DumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
|
||||
return c.Core().Events(ns).List(opts)
|
||||
}, namespace)
|
||||
|
||||
|
@ -2266,7 +2206,7 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
|
|||
// 2. there are so many of them that working with them are mostly impossible
|
||||
// So we dump them only if the cluster is relatively small.
|
||||
maxNodesForDump := 20
|
||||
if nodes, err := c.Core().Nodes().List(v1.ListOptions{}); err == nil {
|
||||
if nodes, err := c.Core().Nodes().List(metav1.ListOptions{}); err == nil {
|
||||
if len(nodes.Items) <= maxNodesForDump {
|
||||
dumpAllPodInfo(c)
|
||||
dumpAllNodeInfo(c)
|
||||
|
@ -2292,7 +2232,7 @@ func (o byFirstTimestamp) Less(i, j int) bool {
|
|||
}
|
||||
|
||||
func dumpAllPodInfo(c clientset.Interface) {
|
||||
pods, err := c.Core().Pods("").List(v1.ListOptions{})
|
||||
pods, err := c.Core().Pods("").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("unable to fetch pod debug info: %v", err)
|
||||
}
|
||||
|
@ -2301,7 +2241,7 @@ func dumpAllPodInfo(c clientset.Interface) {
|
|||
|
||||
func dumpAllNodeInfo(c clientset.Interface) {
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
nodes, err := c.Core().Nodes().List(v1.ListOptions{})
|
||||
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("unable to fetch node list: %v", err)
|
||||
return
|
||||
|
@ -2356,11 +2296,11 @@ func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
|
|||
selector := fields.Set{
|
||||
"involvedObject.kind": "Node",
|
||||
"involvedObject.name": nodeName,
|
||||
"involvedObject.namespace": v1.NamespaceAll,
|
||||
"involvedObject.namespace": metav1.NamespaceAll,
|
||||
"source": "kubelet",
|
||||
}.AsSelector().String()
|
||||
options := v1.ListOptions{FieldSelector: selector}
|
||||
events, err := c.Core().Events(api.NamespaceSystem).List(options)
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
events, err := c.Core().Events(metav1.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
Logf("Unexpected error retrieving node events %v", err)
|
||||
return []v1.Event{}
|
||||
|
@ -2373,7 +2313,7 @@ func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList {
|
|||
var nodes *v1.NodeList
|
||||
var err error
|
||||
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
|
||||
nodes, err = c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
|
||||
nodes, err = c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
return err == nil, nil
|
||||
|
@ -2401,7 +2341,7 @@ func isNodeUntainted(node *v1.Node) bool {
|
|||
Kind: "Pod",
|
||||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-not-scheduled",
|
||||
Namespace: "fake-not-scheduled",
|
||||
},
|
||||
|
@ -2446,7 +2386,7 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
|
|||
return wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
|
||||
attempt++
|
||||
notSchedulable = nil
|
||||
opts := v1.ListOptions{
|
||||
opts := metav1.ListOptions{
|
||||
ResourceVersion: "0",
|
||||
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
|
||||
}
|
||||
|
@ -2736,7 +2676,7 @@ func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label label
|
|||
// Wait up to PodListTimeout for getting pods with certain label
|
||||
func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
||||
for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) {
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err = c.Core().Pods(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(pods.Items) > 0 {
|
||||
|
@ -2792,7 +2732,7 @@ func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, n
|
|||
}
|
||||
}
|
||||
|
||||
func deleteResource(c clientset.Interface, kind schema.GroupKind, ns, name string, deleteOption *v1.DeleteOptions) error {
|
||||
func deleteResource(c clientset.Interface, kind schema.GroupKind, ns, name string, deleteOption *metav1.DeleteOptions) error {
|
||||
switch kind {
|
||||
case api.Kind("ReplicationController"):
|
||||
return c.Core().ReplicationControllers(ns).Delete(name, deleteOption)
|
||||
|
@ -2944,7 +2884,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
|||
defer ps.Stop()
|
||||
startTime := time.Now()
|
||||
falseVar := false
|
||||
deleteOption := &v1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
deleteOption := &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
err = deleteResource(c, kind, ns, name, deleteOption)
|
||||
if err != nil && apierrs.IsNotFound(err) {
|
||||
Logf("%v %s was already deleted: %v", kind, name, err)
|
||||
|
@ -3072,7 +3012,7 @@ func waitForReplicaSetPodsGone(c clientset.Interface, rs *extensions.ReplicaSet)
|
|||
return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
ExpectNoError(err)
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
if pods, err := c.Core().Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
@ -3090,7 +3030,7 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.Core().Pods(rs.Namespace).List(options)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -3316,7 +3256,7 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er
|
|||
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
|
||||
}
|
||||
|
||||
w, err := c.Extensions().Deployments(d.Namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
w, err := c.Extensions().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -3440,7 +3380,7 @@ func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, exp
|
|||
|
||||
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
|
||||
pods, err := c.Core().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
|
@ -3510,7 +3450,7 @@ func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, r
|
|||
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment) {
|
||||
minReadySeconds := deployment.Spec.MinReadySeconds
|
||||
podList, err := deploymentutil.ListPods(deployment,
|
||||
func(namespace string, options v1.ListOptions) (*v1.PodList, error) {
|
||||
func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
|
||||
return c.Core().Pods(namespace).List(options)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -3804,7 +3744,7 @@ func IssueSSHCommand(cmd, provider string, node *v1.Node) error {
|
|||
// NewHostExecPodSpec returns the pod spec of hostexec pod
|
||||
func NewHostExecPodSpec(ns, name string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
|
@ -3852,7 +3792,7 @@ func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod {
|
|||
func newExecPodSpec(ns, generateName string) *v1.Pod {
|
||||
immediate := int64(0)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: generateName,
|
||||
Namespace: ns,
|
||||
},
|
||||
|
@ -3895,7 +3835,7 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw
|
|||
func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) {
|
||||
By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns))
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
},
|
||||
|
@ -4098,7 +4038,7 @@ func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
|
|||
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
||||
notReady = nil
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
nodes, err := c.Core().Nodes().List(v1.ListOptions{})
|
||||
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -4144,7 +4084,7 @@ func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error
|
|||
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
||||
notReady = nil
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
nodes, err := c.Core().Nodes().List(v1.ListOptions{ResourceVersion: "0"})
|
||||
nodes, err := c.Core().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -4153,14 +4093,14 @@ func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error
|
|||
notReady = append(notReady, node)
|
||||
}
|
||||
}
|
||||
pods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{ResourceVersion: "0"})
|
||||
pods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
systemPodsPerNode := make(map[string][]string)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Namespace == api.NamespaceSystem && pod.Status.Phase == v1.PodRunning {
|
||||
if pod.Namespace == metav1.NamespaceSystem && pod.Status.Phase == v1.PodRunning {
|
||||
if pod.Spec.NodeName != "" {
|
||||
systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
|
||||
}
|
||||
|
@ -4318,7 +4258,7 @@ func WaitForApiserverUp(c clientset.Interface) error {
|
|||
// By cluster size we mean number of Nodes excluding Master Node.
|
||||
func WaitForClusterSize(c clientset.Interface, size int, timeout time.Duration) error {
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
||||
nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
|
||||
nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
if err != nil {
|
||||
|
@ -4349,7 +4289,7 @@ func GenerateMasterRegexp(prefix string) string {
|
|||
// waitForMasters waits until the cluster has the desired number of ready masters in it.
|
||||
func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error {
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
||||
nodes, err := c.Core().Nodes().List(v1.ListOptions{})
|
||||
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Failed to list nodes: %v", err)
|
||||
continue
|
||||
|
@ -4560,7 +4500,7 @@ func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (s
|
|||
// kube-proxy NodePorts won't work.
|
||||
var nodes *v1.NodeList
|
||||
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
|
||||
nodes, err = client.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
|
||||
nodes, err = client.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
return err == nil, nil
|
||||
|
@ -4585,7 +4525,7 @@ func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (s
|
|||
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
|
||||
// none are running, otherwise it does what a synchronous scale operation would do.
|
||||
func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, ns string, l map[string]string, replicas uint) error {
|
||||
listOpts := v1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
|
||||
listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
|
||||
rcs, err := clientset.Core().ReplicationControllers(ns).List(listOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -4847,7 +4787,7 @@ func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) {
|
|||
containerName := fmt.Sprintf("%s-container", podName)
|
||||
port := 8080
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -4880,7 +4820,7 @@ func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) {
|
|||
func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, timeout int) error {
|
||||
contName := fmt.Sprintf("%s-container", podName)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -4944,7 +4884,7 @@ func UpdatePodWithRetries(client clientset.Interface, ns, name string, update fu
|
|||
}
|
||||
|
||||
func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
|
||||
pods, err := c.Core().Pods(ns).List(v1.ListOptions{})
|
||||
pods, err := c.Core().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
|
@ -5033,7 +4973,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
|
|||
timeout := 10 * time.Minute
|
||||
startTime := time.Now()
|
||||
|
||||
allPods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
|
||||
allPods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
// API server returns also Pods that succeeded. We need to filter them out.
|
||||
currentPods := make([]v1.Pod, 0, len(allPods.Items))
|
||||
|
@ -5048,7 +4988,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
|
|||
for len(currentlyNotScheduledPods) != 0 {
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
allPods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
|
||||
allPods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods)
|
||||
|
||||
|
@ -5064,7 +5004,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
|
|||
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) {
|
||||
nodes := &v1.NodeList{}
|
||||
masters := sets.NewString()
|
||||
all, _ := c.Core().Nodes().List(v1.ListOptions{})
|
||||
all, _ := c.Core().Nodes().List(metav1.ListOptions{})
|
||||
for _, n := range all.Items {
|
||||
if system.IsMasterNode(n.Name) {
|
||||
masters.Insert(n.Name)
|
||||
|
@ -5076,7 +5016,7 @@ func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeL
|
|||
}
|
||||
|
||||
func ListNamespaceEvents(c clientset.Interface, ns string) error {
|
||||
ls, err := c.Core().Events(ns).List(v1.ListOptions{})
|
||||
ls, err := c.Core().Events(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -5197,7 +5137,7 @@ func getMaster(c clientset.Interface) Address {
|
|||
master := Address{}
|
||||
|
||||
// Populate the internal IP.
|
||||
eps, err := c.Core().Endpoints(v1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
eps, err := c.Core().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("Failed to get kubernetes endpoints: %v", err)
|
||||
}
|
||||
|
@ -5280,7 +5220,7 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str
|
|||
Kind: "ReplicationController",
|
||||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
|
@ -5289,7 +5229,7 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str
|
|||
"name": name,
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -5300,3 +5240,54 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str
|
|||
},
|
||||
}
|
||||
}
|
||||
|
||||
// SimpleGET executes a get on the given url, returns error if non-200 returned.
|
||||
func SimpleGET(c *http.Client, url, host string) (string, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Host = host
|
||||
res, err := c.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
rawBody, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
body := string(rawBody)
|
||||
if res.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf(
|
||||
"GET returned http error %v", res.StatusCode)
|
||||
}
|
||||
return body, err
|
||||
}
|
||||
|
||||
// PollURL polls till the url responds with a healthy http code. If
|
||||
// expectUnreachable is true, it breaks on first non-healthy http code instead.
|
||||
func PollURL(route, host string, timeout time.Duration, interval time.Duration, httpClient *http.Client, expectUnreachable bool) error {
|
||||
var lastBody string
|
||||
pollErr := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
var err error
|
||||
lastBody, err = SimpleGET(httpClient, route, host)
|
||||
if err != nil {
|
||||
Logf("host %v path %v: %v unreachable", host, route, err)
|
||||
return expectUnreachable, nil
|
||||
}
|
||||
return !expectUnreachable, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
return fmt.Errorf("Failed to execute a successful GET within %v, Last response body for %v, host %v:\n%v\n\n%v\n",
|
||||
timeout, route, host, lastBody, pollErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DescribeIng(ns string) {
|
||||
Logf("\nOutput of kubectl describe ing:\n")
|
||||
desc, _ := RunKubectl(
|
||||
"describe", "ing", fmt.Sprintf("--namespace=%v", ns))
|
||||
Logf(desc)
|
||||
}
|
||||
|
|
56
vendor/k8s.io/kubernetes/test/e2e/garbage_collector.go
generated
vendored
56
vendor/k8s.io/kubernetes/test/e2e/garbage_collector.go
generated
vendored
|
@ -32,20 +32,20 @@ import (
|
|||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
func getOrphanOptions() *v1.DeleteOptions {
|
||||
func getOrphanOptions() *metav1.DeleteOptions {
|
||||
var trueVar = true
|
||||
return &v1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
return &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
}
|
||||
|
||||
func getNonOrphanOptions() *v1.DeleteOptions {
|
||||
func getNonOrphanOptions() *metav1.DeleteOptions {
|
||||
var falseVar = false
|
||||
return &v1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
return &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
}
|
||||
|
||||
var zero = int64(0)
|
||||
var deploymentLabels = map[string]string{"app": "gc-test"}
|
||||
var podTemplateSpec = v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: deploymentLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -62,7 +62,7 @@ var podTemplateSpec = v1.PodTemplateSpec{
|
|||
func newOwnerDeployment(f *framework.Framework, deploymentName string) *v1beta1.Deployment {
|
||||
replicas := int32(2)
|
||||
return &v1beta1.Deployment{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
},
|
||||
Spec: v1beta1.DeploymentSpec{
|
||||
|
@ -84,7 +84,7 @@ func newOwnerRC(f *framework.Framework, name string) *v1.ReplicationController {
|
|||
Kind: "ReplicationController",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
|
@ -106,7 +106,7 @@ func verifyRemainingDeploymentsAndReplicaSets(
|
|||
deploymentNum, rsNum int,
|
||||
) (bool, error) {
|
||||
var ret = true
|
||||
rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(v1.ListOptions{})
|
||||
rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list rs: %v", err)
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ func verifyRemainingDeploymentsAndReplicaSets(
|
|||
ret = false
|
||||
By(fmt.Sprintf("expected %d rs, got %d rs", rsNum, len(rs.Items)))
|
||||
}
|
||||
deployments, err := clientSet.Extensions().Deployments(f.Namespace.Name).List(v1.ListOptions{})
|
||||
deployments, err := clientSet.Extensions().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list deployments: %v", err)
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ func verifyRemainingDeploymentsAndReplicaSets(
|
|||
// communication with the API server fails.
|
||||
func verifyRemainingObjects(f *framework.Framework, clientSet clientset.Interface, rcNum, podNum int) (bool, error) {
|
||||
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
|
||||
pods, err := clientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{})
|
||||
pods, err := clientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ func verifyRemainingObjects(f *framework.Framework, clientSet clientset.Interfac
|
|||
ret = false
|
||||
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
|
||||
}
|
||||
rcs, err := rcClient.List(v1.ListOptions{})
|
||||
rcs, err := rcClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
}
|
||||
// wait for rc to create some pods
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
}
|
||||
By("delete the rc")
|
||||
deleteOptions := getNonOrphanOptions()
|
||||
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(rc.UID))
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
|
@ -210,7 +210,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
return verifyRemainingObjects(f, clientSet, 0, 0)
|
||||
}); err != nil {
|
||||
framework.Failf("failed to wait for all pods to be deleted: %v", err)
|
||||
remainingPods, err := podClient.List(v1.ListOptions{})
|
||||
remainingPods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to list pods post mortem: %v", err)
|
||||
} else {
|
||||
|
@ -249,13 +249,13 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
}
|
||||
By("delete the rc")
|
||||
deleteOptions := getOrphanOptions()
|
||||
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(rc.UID))
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
By("wait for the rc to be deleted")
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
rcs, err := rcClient.List(v1.ListOptions{})
|
||||
rcs, err := rcClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list rcs: %v", err)
|
||||
}
|
||||
|
@ -268,7 +268,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
}
|
||||
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
|
@ -308,14 +308,14 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
}
|
||||
By("delete the rc")
|
||||
deleteOptions := &v1.DeleteOptions{}
|
||||
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(rc.UID))
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
|
@ -343,7 +343,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
// wait for deployment to create some rs
|
||||
By("Wait for the Deployment to create new ReplicaSet")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
rsList, err := rsClient.List(v1.ListOptions{})
|
||||
rsList, err := rsClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list rs: %v", err)
|
||||
}
|
||||
|
@ -356,7 +356,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
|
||||
By("delete the deployment")
|
||||
deleteOptions := getNonOrphanOptions()
|
||||
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(createdDeployment.UID))
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
|
||||
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the deployment: %v", err)
|
||||
}
|
||||
|
@ -366,7 +366,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Failed to wait for all rs to be garbage collected: %v", err)
|
||||
remainingRSs, err := rsClient.List(v1.ListOptions{})
|
||||
remainingRSs, err := rsClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to list RSs post mortem: %v", err)
|
||||
} else {
|
||||
|
@ -392,7 +392,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
// wait for deployment to create some rs
|
||||
By("Wait for the Deployment to create new ReplicaSet")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
rsList, err := rsClient.List(v1.ListOptions{})
|
||||
rsList, err := rsClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list rs: %v", err)
|
||||
}
|
||||
|
@ -405,7 +405,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
|
||||
By("delete the deployment")
|
||||
deleteOptions := getOrphanOptions()
|
||||
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(createdDeployment.UID))
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
|
||||
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the deployment: %v", err)
|
||||
}
|
||||
|
@ -415,20 +415,20 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
|
|||
})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Failed to wait to see if the garbage collecter mistakenly deletes the rs: %v", err)
|
||||
remainingRSs, err := rsClient.List(v1.ListOptions{})
|
||||
remainingRSs, err := rsClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to list RSs post mortem: %v", err)
|
||||
} else {
|
||||
framework.Failf("remaining rs post mortem: %#v", remainingRSs)
|
||||
}
|
||||
remainingDSs, err := deployClient.List(v1.ListOptions{})
|
||||
remainingDSs, err := deployClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to list Deployments post mortem: %v", err)
|
||||
} else {
|
||||
framework.Failf("remaining deployment's post mortem: %#v", remainingDSs)
|
||||
}
|
||||
}
|
||||
rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(v1.ListOptions{})
|
||||
rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to list ReplicaSet %v", err)
|
||||
}
|
||||
|
|
29
vendor/k8s.io/kubernetes/test/e2e/generated_clientset.go
generated
vendored
29
vendor/k8s.io/kubernetes/test/e2e/generated_clientset.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -37,7 +38,7 @@ import (
|
|||
|
||||
func stagingClientPod(name, value string) clientv1.Pod {
|
||||
return clientv1.Pod{
|
||||
ObjectMeta: clientv1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
|
@ -58,7 +59,7 @@ func stagingClientPod(name, value string) clientv1.Pod {
|
|||
|
||||
func testingPod(name, value string) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
|
@ -129,13 +130,13 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
|
|||
pod := &podCopy
|
||||
By("setting up watch")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
|
||||
options := v1.ListOptions{LabelSelector: selector}
|
||||
options := metav1.ListOptions{LabelSelector: selector}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to query for pods: %v", err)
|
||||
}
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
options = v1.ListOptions{
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
}
|
||||
|
@ -151,7 +152,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
|
|||
}
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
options = v1.ListOptions{
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
ResourceVersion: pod.ResourceVersion,
|
||||
}
|
||||
|
@ -169,7 +170,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
|
|||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
if err := podClient.Delete(pod.Name, v1.NewDeleteOptions(30)); err != nil {
|
||||
if err := podClient.Delete(pod.Name, metav1.NewDeleteOptions(30)); err != nil {
|
||||
framework.Failf("Failed to delete pod: %v", err)
|
||||
}
|
||||
|
||||
|
@ -179,7 +180,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
|
|||
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
|
||||
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
|
||||
|
||||
options = v1.ListOptions{LabelSelector: selector}
|
||||
options = metav1.ListOptions{LabelSelector: selector}
|
||||
pods, err = podClient.List(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to list pods to verify deletion: %v", err)
|
||||
|
@ -192,7 +193,7 @@ func newTestingCronJob(name string, value string) *v2alpha1.CronJob {
|
|||
parallelism := int32(1)
|
||||
completions := int32(1)
|
||||
return &v2alpha1.CronJob{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"time": value,
|
||||
|
@ -263,13 +264,13 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
|
|||
cronJob := newTestingCronJob(name, value)
|
||||
By("setting up watch")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
|
||||
options := v1.ListOptions{LabelSelector: selector}
|
||||
options := metav1.ListOptions{LabelSelector: selector}
|
||||
cronJobs, err := cronJobClient.List(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to query for cronJobs: %v", err)
|
||||
}
|
||||
Expect(len(cronJobs.Items)).To(Equal(0))
|
||||
options = v1.ListOptions{
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
ResourceVersion: cronJobs.ListMeta.ResourceVersion,
|
||||
}
|
||||
|
@ -285,7 +286,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
|
|||
}
|
||||
|
||||
By("verifying the cronJob is in kubernetes")
|
||||
options = v1.ListOptions{
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
ResourceVersion: cronJob.ResourceVersion,
|
||||
}
|
||||
|
@ -303,7 +304,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
|
|||
framework.Failf("Failed to delete cronJob: %v", err)
|
||||
}
|
||||
|
||||
options = v1.ListOptions{LabelSelector: selector}
|
||||
options = metav1.ListOptions{LabelSelector: selector}
|
||||
cronJobs, err = cronJobClient.List(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to list cronJobs to verify deletion: %v", err)
|
||||
|
@ -322,7 +323,7 @@ var _ = framework.KubeDescribe("Staging client repo client", func() {
|
|||
podCopy := stagingClientPod(name, value)
|
||||
pod := &podCopy
|
||||
By("verifying no pod exists before the test")
|
||||
pods, err := podClient.List(clientv1.ListOptions{})
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to query for pods: %v", err)
|
||||
}
|
||||
|
@ -336,7 +337,7 @@ var _ = framework.KubeDescribe("Staging client repo client", func() {
|
|||
By("verifying the pod is in kubernetes")
|
||||
timeout := 1 * time.Minute
|
||||
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
|
||||
pods, err = podClient.List(clientv1.ListOptions{})
|
||||
pods, err = podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
2
vendor/k8s.io/kubernetes/test/e2e/gke_local_ssd.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/gke_local_ssd.go
generated
vendored
|
@ -76,7 +76,7 @@ func testPodWithSsd(command string) *v1.Pod {
|
|||
Kind: "Pod",
|
||||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
4
vendor/k8s.io/kubernetes/test/e2e/horizontal_pod_autoscaling.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/horizontal_pod_autoscaling.go
generated
vendored
|
@ -19,7 +19,7 @@ package e2e
|
|||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -177,7 +177,7 @@ func scaleDown(name, kind string, checkStability bool, rc *ResourceConsumer, f *
|
|||
|
||||
func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) {
|
||||
hpa := &autoscaling.HorizontalPodAutoscaler{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rc.name,
|
||||
Namespace: rc.framework.Namespace.Name,
|
||||
},
|
||||
|
|
14
vendor/k8s.io/kubernetes/test/e2e/ingress.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/ingress.go
generated
vendored
|
@ -22,8 +22,8 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
rbacv1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
rbacv1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -69,7 +69,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
|||
// this test wants powerful permissions. Since the namespace names are unique, we can leave this
|
||||
// lying around so we don't have to race any caches
|
||||
framework.BindClusterRole(jig.client.Rbac(), "cluster-admin", f.Namespace.Name,
|
||||
rbacv1alpha1.Subject{Kind: rbacv1alpha1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
|
||||
err := framework.WaitForAuthorizationUpdate(jig.client.Authorization(),
|
||||
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
|
||||
|
@ -102,7 +102,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
|||
// Platform specific cleanup
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
describeIng(ns)
|
||||
framework.DescribeIng(ns)
|
||||
}
|
||||
if jig.ing == nil {
|
||||
By("No ingress created, no cleanup necessary")
|
||||
|
@ -137,10 +137,10 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
|||
|
||||
By("waiting for Ingress to come up with ip: " + ip)
|
||||
httpClient := buildInsecureClient(reqTimeout)
|
||||
framework.ExpectNoError(pollURL(fmt.Sprintf("https://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.pollInterval, httpClient, false))
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.pollInterval, httpClient, false))
|
||||
|
||||
By("should reject HTTP traffic")
|
||||
framework.ExpectNoError(pollURL(fmt.Sprintf("http://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.pollInterval, httpClient, true))
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.pollInterval, httpClient, true))
|
||||
|
||||
By("should have correct firewall rule for ingress")
|
||||
fw := gceController.getFirewallRule()
|
||||
|
@ -192,7 +192,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
|||
framework.ExpectNoError(gcloudDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID))
|
||||
}
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
describeIng(ns)
|
||||
framework.DescribeIng(ns)
|
||||
}
|
||||
if jig.ing == nil {
|
||||
By("No ingress created, no cleanup necessary")
|
||||
|
|
47
vendor/k8s.io/kubernetes/test/e2e/ingress_utils.go
generated
vendored
47
vendor/k8s.io/kubernetes/test/e2e/ingress_utils.go
generated
vendored
|
@ -181,33 +181,13 @@ func createComformanceTests(jig *testJig, ns string) []conformanceTests {
|
|||
})
|
||||
By("Checking that " + pathToFail + " is not exposed by polling for failure")
|
||||
route := fmt.Sprintf("http://%v%v", jig.address, pathToFail)
|
||||
framework.ExpectNoError(pollURL(route, updateURLMapHost, framework.LoadBalancerCleanupTimeout, jig.pollInterval, &http.Client{Timeout: reqTimeout}, true))
|
||||
framework.ExpectNoError(framework.PollURL(route, updateURLMapHost, framework.LoadBalancerCleanupTimeout, jig.pollInterval, &http.Client{Timeout: reqTimeout}, true))
|
||||
},
|
||||
fmt.Sprintf("Waiting for path updates to reflect in L7"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// pollURL polls till the url responds with a healthy http code. If
|
||||
// expectUnreachable is true, it breaks on first non-healthy http code instead.
|
||||
func pollURL(route, host string, timeout time.Duration, interval time.Duration, httpClient *http.Client, expectUnreachable bool) error {
|
||||
var lastBody string
|
||||
pollErr := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
var err error
|
||||
lastBody, err = simpleGET(httpClient, route, host)
|
||||
if err != nil {
|
||||
framework.Logf("host %v path %v: %v unreachable", host, route, err)
|
||||
return expectUnreachable, nil
|
||||
}
|
||||
return !expectUnreachable, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
return fmt.Errorf("Failed to execute a successful GET within %v, Last response body for %v, host %v:\n%v\n\n%v\n",
|
||||
timeout, route, host, lastBody, pollErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateRSACerts generates a basic self signed certificate using a key length
|
||||
// of rsaBits, valid for validFor time.
|
||||
func generateRSACerts(host string, isCA bool, keyOut, certOut io.Writer) error {
|
||||
|
@ -306,7 +286,7 @@ func createSecret(kubeClient clientset.Interface, ing *extensions.Ingress) (host
|
|||
cert := c.Bytes()
|
||||
key := k.Bytes()
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tls.SecretName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
|
@ -327,13 +307,6 @@ func createSecret(kubeClient clientset.Interface, ing *extensions.Ingress) (host
|
|||
return host, cert, key, err
|
||||
}
|
||||
|
||||
func describeIng(ns string) {
|
||||
framework.Logf("\nOutput of kubectl describe ing:\n")
|
||||
desc, _ := framework.RunKubectl(
|
||||
"describe", "ing", fmt.Sprintf("--namespace=%v", ns))
|
||||
framework.Logf(desc)
|
||||
}
|
||||
|
||||
func cleanupGCE(gceController *GCEIngressController) {
|
||||
pollErr := wait.Poll(5*time.Second, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if err := gceController.Cleanup(false); err != nil {
|
||||
|
@ -821,7 +794,7 @@ func (j *testJig) update(update func(ing *extensions.Ingress)) {
|
|||
update(j.ing)
|
||||
j.ing, err = j.client.Extensions().Ingresses(ns).Update(j.ing)
|
||||
if err == nil {
|
||||
describeIng(j.ing.Namespace)
|
||||
framework.DescribeIng(j.ing.Namespace)
|
||||
return
|
||||
}
|
||||
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
|
||||
|
@ -889,7 +862,7 @@ func (j *testJig) waitForIngress(waitForNodePort bool) {
|
|||
}
|
||||
route := fmt.Sprintf("%v://%v%v", proto, address, p.Path)
|
||||
framework.Logf("Testing route %v host %v with simple GET", route, rules.Host)
|
||||
framework.ExpectNoError(pollURL(route, rules.Host, framework.LoadBalancerPollTimeout, j.pollInterval, timeoutClient, false))
|
||||
framework.ExpectNoError(framework.PollURL(route, rules.Host, framework.LoadBalancerPollTimeout, j.pollInterval, timeoutClient, false))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -898,7 +871,7 @@ func (j *testJig) waitForIngress(waitForNodePort bool) {
|
|||
// given url returns a non-healthy http code even once.
|
||||
func (j *testJig) verifyURL(route, host string, iterations int, interval time.Duration, httpClient *http.Client) error {
|
||||
for i := 0; i < iterations; i++ {
|
||||
b, err := simpleGET(httpClient, route, host)
|
||||
b, err := framework.SimpleGET(httpClient, route, host)
|
||||
if err != nil {
|
||||
framework.Logf(b)
|
||||
return err
|
||||
|
@ -913,7 +886,7 @@ func (j *testJig) curlServiceNodePort(ns, name string, port int) {
|
|||
// TODO: Curl all nodes?
|
||||
u, err := framework.GetNodePortURL(j.client, ns, name, port)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(pollURL(u, "", 30*time.Second, j.pollInterval, &http.Client{Timeout: reqTimeout}, false))
|
||||
framework.ExpectNoError(framework.PollURL(u, "", 30*time.Second, j.pollInterval, &http.Client{Timeout: reqTimeout}, false))
|
||||
}
|
||||
|
||||
// getIngressNodePorts returns all related backend services' nodePorts.
|
||||
|
@ -921,7 +894,7 @@ func (j *testJig) curlServiceNodePort(ns, name string, port int) {
|
|||
// by default, so retrieve its nodePort as well.
|
||||
func (j *testJig) getIngressNodePorts() []string {
|
||||
nodePorts := []string{}
|
||||
defaultSvc, err := j.client.Core().Services(api.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{})
|
||||
defaultSvc, err := j.client.Core().Services(metav1.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodePorts = append(nodePorts, strconv.Itoa(int(defaultSvc.Spec.Ports[0].NodePort)))
|
||||
|
||||
|
@ -975,8 +948,8 @@ func ingFromManifest(fileName string) *extensions.Ingress {
|
|||
}
|
||||
|
||||
func (cont *GCEIngressController) getL7AddonUID() (string, error) {
|
||||
framework.Logf("Retrieving UID from config map: %v/%v", api.NamespaceSystem, uidConfigMap)
|
||||
cm, err := cont.c.Core().ConfigMaps(api.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{})
|
||||
framework.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap)
|
||||
cm, err := cont.c.Core().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -1037,7 +1010,7 @@ func (cont *NginxIngressController) init() {
|
|||
framework.Logf("waiting for pods with label %v", rc.Spec.Selector)
|
||||
sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector))
|
||||
framework.ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.c, cont.ns, sel))
|
||||
pods, err := cont.c.Core().Pods(cont.ns).List(v1.ListOptions{LabelSelector: sel.String()})
|
||||
pods, err := cont.c.Core().Pods(cont.ns).List(metav1.ListOptions{LabelSelector: sel.String()})
|
||||
framework.ExpectNoError(err)
|
||||
if len(pods.Items) == 0 {
|
||||
framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel)
|
||||
|
|
3
vendor/k8s.io/kubernetes/test/e2e/initial_resources.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/initial_resources.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
@ -53,7 +54,7 @@ var _ = framework.KubeDescribe("Initial Resources [Feature:InitialResources] [Fl
|
|||
|
||||
func runPod(f *framework.Framework, name, image string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
9
vendor/k8s.io/kubernetes/test/e2e/job.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/job.go
generated
vendored
|
@ -23,7 +23,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
|
@ -178,7 +177,7 @@ var _ = framework.KubeDescribe("Job", func() {
|
|||
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
|
||||
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job was deleted")
|
||||
|
@ -216,7 +215,7 @@ var _ = framework.KubeDescribe("Job", func() {
|
|||
// newTestJob returns a job which does one of several testing behaviors.
|
||||
func newTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, completions int32) *batch.Job {
|
||||
job := &batch.Job{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
|
@ -224,7 +223,7 @@ func newTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, compl
|
|||
Completions: &completions,
|
||||
ManualSelector: newBool(false),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{jobSelectorKey: name},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -296,7 +295,7 @@ func deleteJob(c clientset.Interface, ns, name string) error {
|
|||
func waitForAllPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName}))
|
||||
return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.Core().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
10
vendor/k8s.io/kubernetes/test/e2e/kibana_logging.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/kibana_logging.go
generated
vendored
|
@ -22,8 +22,6 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -57,7 +55,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
|||
|
||||
// Check for the existence of the Kibana service.
|
||||
By("Checking the Kibana service exists.")
|
||||
s := f.ClientSet.Core().Services(api.NamespaceSystem)
|
||||
s := f.ClientSet.Core().Services(metav1.NamespaceSystem)
|
||||
// Make a few attempts to connect. This makes the test robust against
|
||||
// being run as the first e2e test just after the e2e cluster has been created.
|
||||
var err error
|
||||
|
@ -72,8 +70,8 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
|||
// Wait for the Kibana pod(s) to enter the running state.
|
||||
By("Checking to make sure the Kibana pods are running")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := f.ClientSet.Core().Pods(metav1.NamespaceSystem).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods.Items {
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
|
||||
|
@ -94,7 +92,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
|||
defer cancel()
|
||||
|
||||
// Query against the root URL for Kibana.
|
||||
_, err = proxyRequest.Namespace(api.NamespaceSystem).
|
||||
_, err = proxyRequest.Namespace(metav1.NamespaceSystem).
|
||||
Context(ctx).
|
||||
Name("kibana-logging").
|
||||
DoRaw()
|
||||
|
|
5
vendor/k8s.io/kubernetes/test/e2e/kube_proxy.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/kube_proxy.go
generated
vendored
|
@ -24,6 +24,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -76,7 +77,7 @@ var _ = framework.KubeDescribe("Network", func() {
|
|||
zero := int64(0)
|
||||
|
||||
clientPodSpec := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "e2e-net-client",
|
||||
Namespace: fr.Namespace.Name,
|
||||
Labels: map[string]string{"app": "e2e-net-client"},
|
||||
|
@ -98,7 +99,7 @@ var _ = framework.KubeDescribe("Network", func() {
|
|||
}
|
||||
|
||||
serverPodSpec := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "e2e-net-server",
|
||||
Namespace: fr.Namespace.Name,
|
||||
Labels: map[string]string{"app": "e2e-net-server"},
|
||||
|
|
12
vendor/k8s.io/kubernetes/test/e2e/kubectl.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/e2e/kubectl.go
generated
vendored
|
@ -48,15 +48,15 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/api/annotations"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
rbacv1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
||||
rbacv1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
genericregistry "k8s.io/kubernetes/pkg/genericapiserver/registry/generic/registry"
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
genericregistry "k8s.io/kubernetes/pkg/registry/generic/registry"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
uexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
|
@ -581,7 +581,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
|
|||
By("adding rbac permissions")
|
||||
// grant the view permission widely to allow inspection of the `invalid` namespace.
|
||||
framework.BindClusterRole(f.ClientSet.Rbac(), "view", f.Namespace.Name,
|
||||
rbacv1alpha1.Subject{Kind: rbacv1alpha1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
|
||||
err := framework.WaitForAuthorizationUpdate(f.ClientSet.Authorization(),
|
||||
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
|
||||
|
@ -764,7 +764,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
|
|||
|
||||
// Node
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
nodes, err := c.Core().Nodes().List(v1.ListOptions{})
|
||||
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
node := nodes.Items[0]
|
||||
output = framework.RunKubectlOrDie("describe", "node", node.Name)
|
||||
|
@ -1736,7 +1736,7 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select
|
|||
var err error
|
||||
for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
rcs, err = c.Core().ReplicationControllers(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(rcs.Items) > 0 {
|
||||
|
|
4
vendor/k8s.io/kubernetes/test/e2e/kubelet_perf.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/kubelet_perf.go
generated
vendored
|
@ -21,8 +21,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
|
@ -201,7 +201,7 @@ var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() {
|
|||
// Wait until image prepull pod has completed so that they wouldn't
|
||||
// affect the runtime cpu usage. Fail the test if prepulling cannot
|
||||
// finish in time.
|
||||
if err := framework.WaitForPodsSuccess(f.ClientSet, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil {
|
||||
if err := framework.WaitForPodsSuccess(f.ClientSet, metav1.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil {
|
||||
framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adultrated", imagePrePullingLongTimeout)
|
||||
}
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
|
|
4
vendor/k8s.io/kubernetes/test/e2e/limit_range.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/limit_range.go
generated
vendored
|
@ -149,7 +149,7 @@ func newLimitRange(name string, limitType v1.LimitType,
|
|||
defaultLimit, defaultRequest,
|
||||
maxLimitRequestRatio v1.ResourceList) *v1.LimitRange {
|
||||
return &v1.LimitRange{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.LimitRangeSpec{
|
||||
|
@ -170,7 +170,7 @@ func newLimitRange(name string, limitType v1.LimitType,
|
|||
// newTestPod returns a pod that has the specified requests and limits
|
||||
func newTestPod(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
9
vendor/k8s.io/kubernetes/test/e2e/load.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/load.go
generated
vendored
|
@ -27,17 +27,18 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/transport"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
@ -423,7 +424,7 @@ func generateServicesForConfigs(configs []testutils.RunObjectConfig) []*v1.Servi
|
|||
serviceName := config.GetName() + "-svc"
|
||||
labels := map[string]string{"name": config.GetName()}
|
||||
service := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Namespace: config.GetNamespace(),
|
||||
},
|
||||
|
@ -483,7 +484,7 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling
|
|||
fmt.Sprintf("scaling rc %s for the first time", config.GetName()))
|
||||
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.GetName()}))
|
||||
options := v1.ListOptions{
|
||||
options := metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
ResourceVersion: "0",
|
||||
}
|
||||
|
|
6
vendor/k8s.io/kubernetes/test/e2e/mesos.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/mesos.go
generated
vendored
|
@ -45,7 +45,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
|
|||
nodeClient := f.ClientSet.Core().Nodes()
|
||||
|
||||
rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"})
|
||||
options := v1.ListOptions{LabelSelector: rackA.String()}
|
||||
options := metav1.ListOptions{LabelSelector: rackA.String()}
|
||||
nodes, err := nodeClient.List(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to query for node: %v", err)
|
||||
|
@ -83,7 +83,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Annotations: map[string]string{
|
||||
"k8s.mesosphere.io/roles": "public",
|
||||
|
@ -110,7 +110,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
|
|||
rack2 := labels.SelectorFromSet(map[string]string{
|
||||
"k8s.mesosphere.io/attribute-rack": "2",
|
||||
})
|
||||
options := v1.ListOptions{LabelSelector: rack2.String()}
|
||||
options := metav1.ListOptions{LabelSelector: rack2.String()}
|
||||
nodes, err := nodeClient.List(options)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
|
|
6
vendor/k8s.io/kubernetes/test/e2e/metrics_grabber_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/metrics_grabber_test.go
generated
vendored
|
@ -19,7 +19,7 @@ package e2e
|
|||
import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -59,7 +59,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() {
|
|||
It("should grab all metrics from a Scheduler.", func() {
|
||||
By("Proxying to Pod through the API server")
|
||||
// Check if master Node is registered
|
||||
nodes, err := c.Core().Nodes().List(v1.ListOptions{})
|
||||
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
var masterRegistered = false
|
||||
|
@ -80,7 +80,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() {
|
|||
It("should grab all metrics from a ControllerManager.", func() {
|
||||
By("Proxying to Pod through the API server")
|
||||
// Check if master Node is registered
|
||||
nodes, err := c.Core().Nodes().List(v1.ListOptions{})
|
||||
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
var masterRegistered = false
|
||||
|
|
31
vendor/k8s.io/kubernetes/test/e2e/monitoring.go
generated
vendored
31
vendor/k8s.io/kubernetes/test/e2e/monitoring.go
generated
vendored
|
@ -24,9 +24,8 @@ import (
|
|||
"time"
|
||||
|
||||
influxdb "github.com/influxdata/influxdb/client"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -110,16 +109,16 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
|
|||
// is running (which would be an error except during a rolling update).
|
||||
for _, rcLabel := range rcLabels {
|
||||
selector := labels.Set{"k8s-app": rcLabel}.AsSelector()
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
deploymentList, err := c.Extensions().Deployments(api.NamespaceSystem).List(options)
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
deploymentList, err := c.Extensions().Deployments(metav1.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rcList, err := c.Core().ReplicationControllers(api.NamespaceSystem).List(options)
|
||||
rcList, err := c.Core().ReplicationControllers(metav1.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
psList, err := c.Apps().StatefulSets(api.NamespaceSystem).List(options)
|
||||
psList, err := c.Apps().StatefulSets(metav1.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -130,8 +129,8 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
|
|||
// Check all the replication controllers.
|
||||
for _, rc := range rcList.Items {
|
||||
selector := labels.Set(rc.Spec.Selector).AsSelector()
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.Core().Pods(metav1.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -145,8 +144,8 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
|
|||
// Do the same for all deployments.
|
||||
for _, rc := range deploymentList.Items {
|
||||
selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector()
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.Core().Pods(metav1.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -160,8 +159,8 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
|
|||
// And for pet sets.
|
||||
for _, ps := range psList.Items {
|
||||
selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector()
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.Core().Pods(metav1.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -177,7 +176,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
|
|||
}
|
||||
|
||||
func expectedServicesExist(c clientset.Interface) error {
|
||||
serviceList, err := c.Core().Services(api.NamespaceSystem).List(v1.ListOptions{})
|
||||
serviceList, err := c.Core().Services(metav1.NamespaceSystem).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -196,7 +195,7 @@ func expectedServicesExist(c clientset.Interface) error {
|
|||
|
||||
func getAllNodesInCluster(c clientset.Interface) ([]string, error) {
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
nodeList, err := c.Core().Nodes().List(v1.ListOptions{})
|
||||
nodeList, err := c.Core().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -290,8 +289,8 @@ func testMonitoringUsingHeapsterInfluxdb(c clientset.Interface) {
|
|||
|
||||
func printDebugInfo(c clientset.Interface) {
|
||||
set := labels.Set{"k8s-app": "heapster"}
|
||||
options := v1.ListOptions{LabelSelector: set.AsSelector().String()}
|
||||
podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
|
||||
options := metav1.ListOptions{LabelSelector: set.AsSelector().String()}
|
||||
podList, err := c.Core().Pods(metav1.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
framework.Logf("Error while listing pods %v", err)
|
||||
return
|
||||
|
|
6
vendor/k8s.io/kubernetes/test/e2e/namespace.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/namespace.go
generated
vendored
|
@ -61,7 +61,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
|
|||
framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
|
||||
func() (bool, error) {
|
||||
var cnt = 0
|
||||
nsList, err := f.ClientSet.Core().Namespaces().List(v1.ListOptions{})
|
||||
nsList, err := f.ClientSet.Core().Namespaces().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
|
||||
By("Creating a pod in the namespace")
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pod",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -147,7 +147,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||
"baz": "blah",
|
||||
}
|
||||
service := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
|
|
24
vendor/k8s.io/kubernetes/test/e2e/network_partition.go
generated
vendored
24
vendor/k8s.io/kubernetes/test/e2e/network_partition.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -30,7 +31,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
|
@ -93,7 +93,7 @@ func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
|
|||
|
||||
func podOnNode(podName, nodeName string, image string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{
|
||||
"name": podName,
|
||||
|
@ -160,16 +160,16 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
It("All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+
|
||||
"AND all pods should be mark back to Ready when the node get back to Ready before pod eviction timeout", func() {
|
||||
By("choose a node - we will block all network traffic on this node")
|
||||
var podOpts v1.ListOptions
|
||||
nodeOpts := v1.ListOptions{}
|
||||
var podOpts metav1.ListOptions
|
||||
nodeOpts := metav1.ListOptions{}
|
||||
nodes, err := c.Core().Nodes().List(nodeOpts)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.FilterNodes(nodes, func(node v1.Node) bool {
|
||||
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
|
||||
return false
|
||||
}
|
||||
podOpts = v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
pods, err := c.Core().Pods(v1.NamespaceAll).List(podOpts)
|
||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
pods, err := c.Core().Pods(metav1.NamespaceAll).List(podOpts)
|
||||
if err != nil || len(pods.Items) <= 0 {
|
||||
return false
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
framework.Failf("No eligible node were found: %d", len(nodes.Items))
|
||||
}
|
||||
node := nodes.Items[0]
|
||||
podOpts = v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
|
||||
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
|
||||
}
|
||||
|
@ -191,12 +191,12 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
var controller cache.Controller
|
||||
_, controller = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
obj, err := f.ClientSet.Core().Nodes().List(options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
return f.ClientSet.Core().Nodes().Watch(options)
|
||||
},
|
||||
|
@ -264,7 +264,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeName := pods.Items[0].Spec.NodeName
|
||||
|
@ -329,7 +329,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeName := pods.Items[0].Spec.NodeName
|
||||
|
@ -449,7 +449,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeName := pods.Items[0].Spec.NodeName
|
||||
|
|
34
vendor/k8s.io/kubernetes/test/e2e/node_problem_detector.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/e2e/node_problem_detector.go
generated
vendored
|
@ -23,15 +23,15 @@ import (
|
|||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
rbacv1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
||||
rbacv1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -59,12 +59,12 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
|
|||
name = "node-problem-detector-" + uid
|
||||
configName = "node-problem-detector-config-" + uid
|
||||
// There is no namespace for Node, event recorder will set default namespace for node events.
|
||||
eventNamespace = v1.NamespaceDefault
|
||||
eventNamespace = metav1.NamespaceDefault
|
||||
|
||||
// this test wants extra permissions. Since the namespace names are unique, we can leave this
|
||||
// lying around so we don't have to race any caches
|
||||
framework.BindClusterRole(f.ClientSet.Rbac(), "cluster-admin", f.Namespace.Name,
|
||||
rbacv1alpha1.Subject{Kind: rbacv1alpha1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
|
||||
err := framework.WaitForAuthorizationUpdate(f.ClientSet.Authorization(),
|
||||
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
|
||||
|
@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
|
|||
)
|
||||
var source, config, tmpDir string
|
||||
var node *v1.Node
|
||||
var eventListOptions v1.ListOptions
|
||||
var eventListOptions metav1.ListOptions
|
||||
injectCommand := func(timestamp time.Time, log string, num int) string {
|
||||
var commands []string
|
||||
for i := 0; i < num; i++ {
|
||||
|
@ -147,7 +147,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
|
|||
]
|
||||
}`
|
||||
By("Get a non master node to run the pod")
|
||||
nodes, err := c.Core().Nodes().List(v1.ListOptions{})
|
||||
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
node = nil
|
||||
for _, n := range nodes.Items {
|
||||
|
@ -161,17 +161,17 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
|
|||
selector := fields.Set{
|
||||
"involvedObject.kind": "Node",
|
||||
"involvedObject.name": node.Name,
|
||||
"involvedObject.namespace": v1.NamespaceAll,
|
||||
"involvedObject.namespace": metav1.NamespaceAll,
|
||||
"source": source,
|
||||
}.AsSelector().String()
|
||||
eventListOptions = v1.ListOptions{FieldSelector: selector}
|
||||
eventListOptions = metav1.ListOptions{FieldSelector: selector}
|
||||
By("Create the test log file")
|
||||
tmpDir = "/tmp/" + name
|
||||
cmd := fmt.Sprintf("mkdir %s; > %s/%s", tmpDir, tmpDir, logFile)
|
||||
Expect(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)).To(Succeed())
|
||||
By("Create config map for the node problem detector")
|
||||
_, err = c.Core().ConfigMaps(ns).Create(&v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Data: map[string]string{configFile: config},
|
||||
|
@ -179,7 +179,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Create the node problem detector")
|
||||
_, err = c.Core().Pods(ns).Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -376,16 +376,16 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
|
|||
framework.Logf("Node Problem Detector logs:\n %s", log)
|
||||
}
|
||||
By("Delete the node problem detector")
|
||||
c.Core().Pods(ns).Delete(name, v1.NewDeleteOptions(0))
|
||||
c.Core().Pods(ns).Delete(name, metav1.NewDeleteOptions(0))
|
||||
By("Wait for the node problem detector to disappear")
|
||||
Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed())
|
||||
By("Delete the config map")
|
||||
c.Core().ConfigMaps(ns).Delete(configName, nil)
|
||||
By("Clean up the events")
|
||||
Expect(c.Core().Events(eventNamespace).DeleteCollection(v1.NewDeleteOptions(0), eventListOptions)).To(Succeed())
|
||||
Expect(c.Core().Events(eventNamespace).DeleteCollection(metav1.NewDeleteOptions(0), eventListOptions)).To(Succeed())
|
||||
By("Clean up the node condition")
|
||||
patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition))
|
||||
c.Core().RESTClient().Patch(api.StrategicMergePatchType).Resource("nodes").Name(node.Name).SubResource("status").Body(patch).Do()
|
||||
c.Core().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(node.Name).SubResource("status").Body(patch).Do()
|
||||
By("Clean up the temporary directory")
|
||||
framework.IssueSSHCommand(fmt.Sprintf("rm -r %s", tmpDir), framework.TestContext.Provider, node)
|
||||
})
|
||||
|
@ -393,7 +393,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
|
|||
})
|
||||
|
||||
// verifyEvents verifies there are num specific events generated
|
||||
func verifyEvents(e coreclientset.EventInterface, options v1.ListOptions, num int, reason, message string) error {
|
||||
func verifyEvents(e coreclientset.EventInterface, options metav1.ListOptions, num int, reason, message string) error {
|
||||
events, err := e.List(options)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -412,7 +412,7 @@ func verifyEvents(e coreclientset.EventInterface, options v1.ListOptions, num in
|
|||
}
|
||||
|
||||
// verifyNoEvents verifies there is no event generated
|
||||
func verifyNoEvents(e coreclientset.EventInterface, options v1.ListOptions) error {
|
||||
func verifyNoEvents(e coreclientset.EventInterface, options metav1.ListOptions) error {
|
||||
events, err := e.List(options)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
10
vendor/k8s.io/kubernetes/test/e2e/nodeoutofdisk.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/nodeoutofdisk.go
generated
vendored
|
@ -23,11 +23,11 @@ import (
|
|||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -140,7 +140,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
|
|||
"source": v1.DefaultSchedulerName,
|
||||
"reason": "FailedScheduling",
|
||||
}.AsSelector().String()
|
||||
options := v1.ListOptions{FieldSelector: selector}
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
schedEvents, err := c.Core().Events(ns).List(options)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
|
@ -173,7 +173,7 @@ func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64)
|
|||
podClient := c.Core().Pods(ns)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -199,10 +199,10 @@ func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64)
|
|||
// availCpu calculates the available CPU on a given node by subtracting the CPU requested by
|
||||
// all the pods from the total available CPU capacity on the node.
|
||||
func availCpu(c clientset.Interface, node *v1.Node) (int64, error) {
|
||||
podClient := c.Core().Pods(v1.NamespaceAll)
|
||||
podClient := c.Core().Pods(metav1.NamespaceAll)
|
||||
|
||||
selector := fields.Set{"spec.nodeName": node.Name}.AsSelector().String()
|
||||
options := v1.ListOptions{FieldSelector: selector}
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to retrieve all the pods on node %s: %v", node.Name, err)
|
||||
|
|
23
vendor/k8s.io/kubernetes/test/e2e/opaque_resource.go
generated
vendored
23
vendor/k8s.io/kubernetes/test/e2e/opaque_resource.go
generated
vendored
|
@ -22,14 +22,15 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -45,7 +46,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
|
|||
BeforeEach(func() {
|
||||
if node == nil {
|
||||
// Priming invocation; select the first non-master node.
|
||||
nodes, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
|
||||
nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, n := range nodes.Items {
|
||||
if !system.IsMasterNode(n.Name) {
|
||||
|
@ -139,7 +140,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
|
|||
image := framework.GetPauseImageName(f.ClientSet)
|
||||
// This pod consumes 2 "foo" resources.
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mult-container-oir",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -183,7 +184,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
|
|||
limits = v1.ResourceList{}
|
||||
// This pod consumes 6 "foo" resources.
|
||||
pod = &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mult-container-over-max-oir",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -228,7 +229,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
|
|||
func addOpaqueResource(f *framework.Framework, nodeName string, opaqueResName v1.ResourceName) {
|
||||
action := func() error {
|
||||
patch := []byte(fmt.Sprintf(`[{"op": "add", "path": "/status/capacity/%s", "value": "5"}]`, escapeForJSONPatch(opaqueResName)))
|
||||
return f.ClientSet.Core().RESTClient().Patch(api.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do().Error()
|
||||
return f.ClientSet.Core().RESTClient().Patch(types.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do().Error()
|
||||
}
|
||||
predicate := func(n *v1.Node) bool {
|
||||
capacity, foundCap := n.Status.Capacity[opaqueResName]
|
||||
|
@ -245,7 +246,7 @@ func addOpaqueResource(f *framework.Framework, nodeName string, opaqueResName v1
|
|||
func removeOpaqueResource(f *framework.Framework, nodeName string, opaqueResName v1.ResourceName) {
|
||||
action := func() error {
|
||||
patch := []byte(fmt.Sprintf(`[{"op": "remove", "path": "/status/capacity/%s"}]`, escapeForJSONPatch(opaqueResName)))
|
||||
f.ClientSet.Core().RESTClient().Patch(api.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do()
|
||||
f.ClientSet.Core().RESTClient().Patch(types.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do()
|
||||
return nil // Ignore error -- the opaque resource may not exist.
|
||||
}
|
||||
predicate := func(n *v1.Node) bool {
|
||||
|
@ -274,12 +275,12 @@ func observeNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodeP
|
|||
|
||||
_, controller := cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
ls, err := f.ClientSet.Core().Nodes().List(options)
|
||||
return ls, err
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
w, err := f.ClientSet.Core().Nodes().Watch(options)
|
||||
// Signal parent goroutine that watching has begun.
|
||||
|
@ -330,11 +331,11 @@ func observeEventAfterAction(f *framework.Framework, eventPredicate func(*v1.Eve
|
|||
// Create an informer to list/watch events from the test framework namespace.
|
||||
_, controller := cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
ls, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options)
|
||||
return ls, err
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
w, err := f.ClientSet.Core().Events(f.Namespace.Name).Watch(options)
|
||||
return w, err
|
||||
},
|
||||
|
|
56
vendor/k8s.io/kubernetes/test/e2e/pd.go
generated
vendored
56
vendor/k8s.io/kubernetes/test/e2e/pd.go
generated
vendored
|
@ -93,8 +93,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
By("cleaning up PD-RW test environment")
|
||||
podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
|
||||
podClient.Delete(host1Pod.Name, v1.NewDeleteOptions(0))
|
||||
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
|
||||
podClient.Delete(host1Pod.Name, metav1.NewDeleteOptions(0))
|
||||
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
|
||||
}()
|
||||
|
||||
|
@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
|
||||
By("deleting host0Pod")
|
||||
// Delete pod with 0 grace period
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
|
||||
By("submitting host1Pod to kubernetes")
|
||||
_, err = podClient.Create(host1Pod)
|
||||
|
@ -133,7 +133,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
|
||||
|
||||
By("deleting host1Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host1Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host1Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host1Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host1Pod")
|
||||
|
||||
By("Test completed successfully, waiting for PD to safely detach")
|
||||
waitForPDDetach(diskName, host0Name)
|
||||
|
@ -157,8 +157,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
By("cleaning up PD-RW test environment")
|
||||
podClient.Delete(host0Pod.Name, &v1.DeleteOptions{})
|
||||
podClient.Delete(host1Pod.Name, &v1.DeleteOptions{})
|
||||
podClient.Delete(host0Pod.Name, &metav1.DeleteOptions{})
|
||||
podClient.Delete(host1Pod.Name, &metav1.DeleteOptions{})
|
||||
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
|
||||
}()
|
||||
|
||||
|
@ -179,7 +179,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
|
||||
By("deleting host0Pod")
|
||||
// Delete pod with default grace period 30s
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, &v1.DeleteOptions{}), "Failed to delete host0Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, &metav1.DeleteOptions{}), "Failed to delete host0Pod")
|
||||
|
||||
By("submitting host1Pod to kubernetes")
|
||||
_, err = podClient.Create(host1Pod)
|
||||
|
@ -197,7 +197,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
|
||||
|
||||
By("deleting host1Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host1Pod.Name, &v1.DeleteOptions{}), "Failed to delete host1Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host1Pod.Name, &metav1.DeleteOptions{}), "Failed to delete host1Pod")
|
||||
|
||||
By("Test completed successfully, waiting for PD to safely detach")
|
||||
waitForPDDetach(diskName, host0Name)
|
||||
|
@ -221,9 +221,9 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
By("cleaning up PD-RO test environment")
|
||||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
podClient.Delete(rwPod.Name, v1.NewDeleteOptions(0))
|
||||
podClient.Delete(host0ROPod.Name, v1.NewDeleteOptions(0))
|
||||
podClient.Delete(host1ROPod.Name, v1.NewDeleteOptions(0))
|
||||
podClient.Delete(rwPod.Name, metav1.NewDeleteOptions(0))
|
||||
podClient.Delete(host0ROPod.Name, metav1.NewDeleteOptions(0))
|
||||
podClient.Delete(host1ROPod.Name, metav1.NewDeleteOptions(0))
|
||||
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
|
||||
}()
|
||||
|
||||
|
@ -232,7 +232,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
framework.ExpectNoError(err, "Failed to create rwPod")
|
||||
framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name))
|
||||
// Delete pod with 0 grace period
|
||||
framework.ExpectNoError(podClient.Delete(rwPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
framework.ExpectNoError(podClient.Delete(rwPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
|
||||
|
||||
By("submitting host0ROPod to kubernetes")
|
||||
|
@ -248,10 +248,10 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name))
|
||||
|
||||
By("deleting host0ROPod")
|
||||
framework.ExpectNoError(podClient.Delete(host0ROPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0ROPod")
|
||||
framework.ExpectNoError(podClient.Delete(host0ROPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0ROPod")
|
||||
|
||||
By("deleting host1ROPod")
|
||||
framework.ExpectNoError(podClient.Delete(host1ROPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host1ROPod")
|
||||
framework.ExpectNoError(podClient.Delete(host1ROPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host1ROPod")
|
||||
|
||||
By("Test completed successfully, waiting for PD to safely detach")
|
||||
waitForPDDetach(diskName, host0Name)
|
||||
|
@ -273,9 +273,9 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
By("cleaning up PD-RO test environment")
|
||||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
podClient.Delete(rwPod.Name, &v1.DeleteOptions{})
|
||||
podClient.Delete(host0ROPod.Name, &v1.DeleteOptions{})
|
||||
podClient.Delete(host1ROPod.Name, &v1.DeleteOptions{})
|
||||
podClient.Delete(rwPod.Name, &metav1.DeleteOptions{})
|
||||
podClient.Delete(host0ROPod.Name, &metav1.DeleteOptions{})
|
||||
podClient.Delete(host1ROPod.Name, &metav1.DeleteOptions{})
|
||||
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
|
||||
}()
|
||||
|
||||
|
@ -284,7 +284,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
framework.ExpectNoError(err, "Failed to create rwPod")
|
||||
framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name))
|
||||
// Delete pod with default grace period 30s
|
||||
framework.ExpectNoError(podClient.Delete(rwPod.Name, &v1.DeleteOptions{}), "Failed to delete host0Pod")
|
||||
framework.ExpectNoError(podClient.Delete(rwPod.Name, &metav1.DeleteOptions{}), "Failed to delete host0Pod")
|
||||
framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
|
||||
|
||||
By("submitting host0ROPod to kubernetes")
|
||||
|
@ -300,10 +300,10 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name))
|
||||
|
||||
By("deleting host0ROPod")
|
||||
framework.ExpectNoError(podClient.Delete(host0ROPod.Name, &v1.DeleteOptions{}), "Failed to delete host0ROPod")
|
||||
framework.ExpectNoError(podClient.Delete(host0ROPod.Name, &metav1.DeleteOptions{}), "Failed to delete host0ROPod")
|
||||
|
||||
By("deleting host1ROPod")
|
||||
framework.ExpectNoError(podClient.Delete(host1ROPod.Name, &v1.DeleteOptions{}), "Failed to delete host1ROPod")
|
||||
framework.ExpectNoError(podClient.Delete(host1ROPod.Name, &metav1.DeleteOptions{}), "Failed to delete host1ROPod")
|
||||
|
||||
By("Test completed successfully, waiting for PD to safely detach")
|
||||
waitForPDDetach(diskName, host0Name)
|
||||
|
@ -324,7 +324,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
if host0Pod != nil {
|
||||
podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
|
||||
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
|
||||
}
|
||||
detachAndDeletePDs(diskName, []types.NodeName{host0Name})
|
||||
}()
|
||||
|
@ -356,7 +356,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
|
||||
|
||||
By("deleting host0Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
}
|
||||
|
||||
By("Test completed successfully, waiting for PD to safely detach")
|
||||
|
@ -379,7 +379,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
if host0Pod != nil {
|
||||
podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
|
||||
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
|
||||
}
|
||||
detachAndDeletePDs(disk1Name, []types.NodeName{host0Name})
|
||||
detachAndDeletePDs(disk2Name, []types.NodeName{host0Name})
|
||||
|
@ -415,7 +415,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
|
||||
|
||||
By("deleting host0Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
}
|
||||
|
||||
By("Test completed successfully, waiting for PD to safely detach")
|
||||
|
@ -439,7 +439,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
|
||||
defer func() {
|
||||
By("Cleaning up PD-RW test env")
|
||||
podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
|
||||
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
|
||||
detachAndDeletePDs(diskName, []types.NodeName{host0Name})
|
||||
}()
|
||||
|
||||
|
@ -521,10 +521,10 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* should exist*/))
|
||||
|
||||
By("deleting api object of host0")
|
||||
framework.ExpectNoError(nodeClient.Delete(string(host0Name), v1.NewDeleteOptions(0)), "Unable to delete host0")
|
||||
framework.ExpectNoError(nodeClient.Delete(string(host0Name), metav1.NewDeleteOptions(0)), "Unable to delete host0")
|
||||
|
||||
By("deleting host0pod")
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Unable to delete host0Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Unable to delete host0Pod")
|
||||
// The disk should be detached from host0 on its deletion
|
||||
By("Waiting for pd to detach from host0")
|
||||
framework.ExpectNoError(waitForPDDetach(diskName, host0Name), "Timed out waiting for detach pd")
|
||||
|
@ -727,7 +727,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
|
|||
Kind: "Pod",
|
||||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pd-test-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
2
vendor/k8s.io/kubernetes/test/e2e/persistent_volumes-disruptive.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/persistent_volumes-disruptive.go
generated
vendored
|
@ -60,7 +60,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Disruptive]", func() {
|
|||
)
|
||||
|
||||
nfsServerConfig := VolumeTestConfig{
|
||||
namespace: v1.NamespaceDefault,
|
||||
namespace: metav1.NamespaceDefault,
|
||||
prefix: "nfs",
|
||||
serverImage: NfsServerImage,
|
||||
serverPorts: []int{2049},
|
||||
|
|
8
vendor/k8s.io/kubernetes/test/e2e/persistent_volumes.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/persistent_volumes.go
generated
vendored
|
@ -507,7 +507,7 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
|
|||
|
||||
// config for the nfs-server pod in the default namespace
|
||||
NFSconfig = VolumeTestConfig{
|
||||
namespace: v1.NamespaceDefault,
|
||||
namespace: metav1.NamespaceDefault,
|
||||
prefix: "nfs",
|
||||
serverImage: NfsServerImage,
|
||||
serverPorts: []int{2049},
|
||||
|
@ -775,7 +775,7 @@ func makePersistentVolume(pvConfig persistentVolumeConfig) *v1.PersistentVolume
|
|||
}
|
||||
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: pvConfig.namePrefix,
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeGidAnnotationKey: "777",
|
||||
|
@ -805,7 +805,7 @@ func makePersistentVolumeClaim(ns string) *v1.PersistentVolumeClaim {
|
|||
// Specs are expected to match this test's PersistentVolume
|
||||
|
||||
return &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-",
|
||||
Namespace: ns,
|
||||
Annotations: map[string]string{
|
||||
|
@ -846,7 +846,7 @@ func makePod(ns string, pvcName string, command ...string) *v1.Pod {
|
|||
Kind: "Pod",
|
||||
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "client-",
|
||||
Namespace: ns,
|
||||
},
|
||||
|
|
9
vendor/k8s.io/kubernetes/test/e2e/pod_gc.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/pod_gc.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
|
@ -61,7 +62,7 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect
|
|||
|
||||
By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold))
|
||||
pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) {
|
||||
pods, err = f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{})
|
||||
pods, err = f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to list pod %v", err)
|
||||
return false, nil
|
||||
|
@ -81,11 +82,8 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect
|
|||
func createTerminatingPod(f *framework.Framework) (*v1.Pod, error) {
|
||||
uuid := uuid.NewUUID()
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: string(uuid),
|
||||
Annotations: map[string]string{
|
||||
"scheduler.alpha.kubernetes.io/name": "please don't schedule my pods",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
|
@ -94,6 +92,7 @@ func createTerminatingPod(f *framework.Framework) (*v1.Pod, error) {
|
|||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
},
|
||||
},
|
||||
SchedulerName: "please don't schedule my pods",
|
||||
},
|
||||
}
|
||||
return f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
|
||||
|
|
12
vendor/k8s.io/kubernetes/test/e2e/pods.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/e2e/pods.go
generated
vendored
|
@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("Pods Extended", func() {
|
|||
name := "pod-submit-remove-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
|
@ -69,11 +69,11 @@ var _ = framework.KubeDescribe("Pods Extended", func() {
|
|||
|
||||
By("setting up watch")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
options = v1.ListOptions{
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("Pods Extended", func() {
|
|||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = v1.ListOptions{LabelSelector: selector.String()}
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
@ -184,7 +184,7 @@ var _ = framework.KubeDescribe("Pods Extended", func() {
|
|||
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
|
||||
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = v1.ListOptions{LabelSelector: selector.String()}
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
|
@ -201,7 +201,7 @@ var _ = framework.KubeDescribe("Pods Extended", func() {
|
|||
By("creating the pod")
|
||||
name := "pod-qos-class-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": name,
|
||||
|
|
3
vendor/k8s.io/kubernetes/test/e2e/portforward.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/portforward.go
generated
vendored
|
@ -28,6 +28,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
|
@ -49,7 +50,7 @@ var (
|
|||
|
||||
func pfPod(expectedClientData, chunks, chunkSize, chunkIntervalMillis string, bindAddress string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
|
|
10
vendor/k8s.io/kubernetes/test/e2e/pre_stop.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/pre_stop.go
generated
vendored
|
@ -25,10 +25,10 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
rbacv1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
||||
rbacv1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -42,7 +42,7 @@ type State struct {
|
|||
func testPreStop(c clientset.Interface, ns string) {
|
||||
// This is the server that will receive the preStop notification
|
||||
podDescr := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "server",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -75,7 +75,7 @@ func testPreStop(c clientset.Interface, ns string) {
|
|||
framework.ExpectNoError(err, "getting pod info")
|
||||
|
||||
preStopDescr := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "tester",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -181,7 +181,7 @@ var _ = framework.KubeDescribe("PreStop", func() {
|
|||
// this test wants extra permissions. Since the namespace names are unique, we can leave this
|
||||
// lying around so we don't have to race any caches
|
||||
framework.BindClusterRole(f.ClientSet.Rbac(), "cluster-admin", f.Namespace.Name,
|
||||
rbacv1alpha1.Subject{Kind: rbacv1alpha1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
|
||||
err := framework.WaitForAuthorizationUpdate(f.ClientSet.Authorization(),
|
||||
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
|
||||
|
|
3
vendor/k8s.io/kubernetes/test/e2e/proxy.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/proxy.go
generated
vendored
|
@ -25,6 +25,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -72,7 +73,7 @@ var _ = framework.KubeDescribe("Proxy", func() {
|
|||
start := time.Now()
|
||||
labels := map[string]string{"proxy-service-target": "true"}
|
||||
service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "proxy-service-",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
|
|
8
vendor/k8s.io/kubernetes/test/e2e/rc.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/rc.go
generated
vendored
|
@ -55,13 +55,13 @@ var _ = framework.KubeDescribe("ReplicationController", func() {
|
|||
func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageName string, image string) *v1.ReplicationController {
|
||||
zero := int64(0)
|
||||
return &v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rsName,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: rcPodLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -91,7 +91,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
|
|||
// in contrib/for-demos/serve_hostname
|
||||
By(fmt.Sprintf("Creating replication controller %s", name))
|
||||
controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
|
@ -100,7 +100,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
|
|||
"name": name,
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"name": name},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
8
vendor/k8s.io/kubernetes/test/e2e/reboot.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/reboot.go
generated
vendored
|
@ -23,12 +23,12 @@ import (
|
|||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
|
@ -65,9 +65,9 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
|||
if CurrentGinkgoTestDescription().Failed {
|
||||
// Most of the reboot tests just make sure that addon/system pods are running, so dump
|
||||
// events for the kube-system namespace on failures
|
||||
namespaceName := api.NamespaceSystem
|
||||
namespaceName := metav1.NamespaceSystem
|
||||
By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
|
||||
events, err := f.ClientSet.Core().Events(namespaceName).List(v1.ListOptions{})
|
||||
events, err := f.ClientSet.Core().Events(namespaceName).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, e := range events.Items {
|
||||
|
@ -218,7 +218,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
|
|||
// failed step, it will return false through result and not run the rest.
|
||||
func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
|
||||
// Setup
|
||||
ns := api.NamespaceSystem
|
||||
ns := metav1.NamespaceSystem
|
||||
ps := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name))
|
||||
defer ps.Stop()
|
||||
|
||||
|
|
10
vendor/k8s.io/kubernetes/test/e2e/replica_set.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/replica_set.go
generated
vendored
|
@ -37,13 +37,13 @@ import (
|
|||
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
|
||||
zero := int64(0)
|
||||
return &extensions.ReplicaSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rsName,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: rsPodLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -62,7 +62,7 @@ func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageNa
|
|||
|
||||
func newPodQuota(name, number string) *v1.ResourceQuota {
|
||||
return &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
|
@ -103,7 +103,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
|
|||
// in contrib/for-demos/serve_hostname
|
||||
By(fmt.Sprintf("Creating ReplicaSet %s", name))
|
||||
rs, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(&extensions.ReplicaSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
|
@ -112,7 +112,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
|
|||
"name": name,
|
||||
}},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"name": name},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
10
vendor/k8s.io/kubernetes/test/e2e/rescheduler.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/rescheduler.go
generated
vendored
|
@ -20,8 +20,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
@ -55,16 +55,16 @@ var _ = framework.KubeDescribe("Rescheduler [Serial]", func() {
|
|||
|
||||
By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kubernetes-dashboard"}))
|
||||
listOpts := v1.ListOptions{LabelSelector: label.String()}
|
||||
deployments, err := f.ClientSet.Extensions().Deployments(api.NamespaceSystem).List(listOpts)
|
||||
listOpts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
deployments, err := f.ClientSet.Extensions().Deployments(metav1.NamespaceSystem).List(listOpts)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(len(deployments.Items)).Should(Equal(1))
|
||||
|
||||
deployment := deployments.Items[0]
|
||||
replicas := uint(*(deployment.Spec.Replicas))
|
||||
|
||||
err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, api.NamespaceSystem, deployment.Name, replicas+1, true)
|
||||
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, api.NamespaceSystem, deployment.Name, replicas, true))
|
||||
err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, metav1.NamespaceSystem, deployment.Name, replicas+1, true)
|
||||
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, metav1.NamespaceSystem, deployment.Name, replicas, true))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
})
|
||||
|
|
7
vendor/k8s.io/kubernetes/test/e2e/resize_nodes.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/resize_nodes.go
generated
vendored
|
@ -24,7 +24,6 @@ import (
|
|||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
|
@ -138,7 +137,7 @@ func WaitForGroupSize(group string, size int32) error {
|
|||
|
||||
func svcByName(name string, port int) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
|
@ -238,10 +237,10 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
|
|||
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
||||
// the cluster is restored to health.
|
||||
By("waiting for system pods to successfully restart")
|
||||
err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, ignoreLabels, true)
|
||||
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, ignoreLabels, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("waiting for image prepulling pods to complete")
|
||||
framework.WaitForPodsSuccess(c, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout)
|
||||
framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout)
|
||||
})
|
||||
|
||||
It("should be able to delete nodes", func() {
|
||||
|
|
30
vendor/k8s.io/kubernetes/test/e2e/resource_quota.go
generated
vendored
30
vendor/k8s.io/kubernetes/test/e2e/resource_quota.go
generated
vendored
|
@ -93,7 +93,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
|
|||
|
||||
It("should create a ResourceQuota and capture the life of a secret.", func() {
|
||||
By("Discovering how many secrets are in namespace by default")
|
||||
secrets, err := f.ClientSet.Core().Secrets(f.Namespace.Name).List(v1.ListOptions{})
|
||||
secrets, err := f.ClientSet.Core().Secrets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defaultSecrets := fmt.Sprintf("%d", len(secrets.Items))
|
||||
hardSecrets := fmt.Sprintf("%d", len(secrets.Items)+1)
|
||||
|
@ -188,7 +188,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting the pod")
|
||||
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, v1.NewDeleteOptions(0))
|
||||
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status released the pod usage")
|
||||
|
@ -410,7 +410,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting the pod")
|
||||
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, v1.NewDeleteOptions(0))
|
||||
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status released the pod usage")
|
||||
|
@ -449,7 +449,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting the pod")
|
||||
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, v1.NewDeleteOptions(0))
|
||||
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status released the pod usage")
|
||||
|
@ -497,7 +497,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting the pod")
|
||||
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
|
||||
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status released the pod usage")
|
||||
|
@ -527,7 +527,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting the pod")
|
||||
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
|
||||
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status released the pod usage")
|
||||
|
@ -549,7 +549,7 @@ func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1
|
|||
hard[v1.ResourceLimitsMemory] = resource.MustParse("1Gi")
|
||||
}
|
||||
return &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{Name: name},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.ResourceQuotaSpec{Hard: hard, Scopes: []v1.ResourceQuotaScope{scope}},
|
||||
}
|
||||
}
|
||||
|
@ -572,7 +572,7 @@ func newTestResourceQuota(name string) *v1.ResourceQuota {
|
|||
hard[core.V1ResourceByStorageClass("gold", v1.ResourcePersistentVolumeClaims)] = resource.MustParse("10")
|
||||
hard[core.V1ResourceByStorageClass("gold", v1.ResourceRequestsStorage)] = resource.MustParse("10Gi")
|
||||
return &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{Name: name},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.ResourceQuotaSpec{Hard: hard},
|
||||
}
|
||||
}
|
||||
|
@ -580,7 +580,7 @@ func newTestResourceQuota(name string) *v1.ResourceQuota {
|
|||
// newTestPodForQuota returns a pod that has the specified requests and limits
|
||||
func newTestPodForQuota(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -601,7 +601,7 @@ func newTestPodForQuota(f *framework.Framework, name string, requests v1.Resourc
|
|||
// newTestPersistentVolumeClaimForQuota returns a simple persistent volume claim
|
||||
func newTestPersistentVolumeClaimForQuota(name string) *v1.PersistentVolumeClaim {
|
||||
return &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
|
@ -622,7 +622,7 @@ func newTestPersistentVolumeClaimForQuota(name string) *v1.PersistentVolumeClaim
|
|||
// newTestReplicationControllerForQuota returns a simple replication controller
|
||||
func newTestReplicationControllerForQuota(name, image string, replicas int32) *v1.ReplicationController {
|
||||
return &v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
|
@ -631,7 +631,7 @@ func newTestReplicationControllerForQuota(name, image string, replicas int32) *v
|
|||
"name": name,
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"name": name},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -650,7 +650,7 @@ func newTestReplicationControllerForQuota(name, image string, replicas int32) *v
|
|||
// newTestServiceForQuota returns a simple service
|
||||
func newTestServiceForQuota(name string, serviceType v1.ServiceType) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
|
@ -665,7 +665,7 @@ func newTestServiceForQuota(name string, serviceType v1.ServiceType) *v1.Service
|
|||
|
||||
func newTestConfigMapForQuota(name string) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
|
@ -676,7 +676,7 @@ func newTestConfigMapForQuota(name string) *v1.ConfigMap {
|
|||
|
||||
func newTestSecretForQuota(name string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
|
|
7
vendor/k8s.io/kubernetes/test/e2e/restart.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/restart.go
generated
vendored
|
@ -21,11 +21,10 @@ import (
|
|||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
@ -65,7 +64,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
|
|||
// check must be identical to that call.
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
ps = testutils.NewPodStore(f.ClientSet, api.NamespaceSystem, labels.Everything(), fields.Everything())
|
||||
ps = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
|
@ -90,7 +89,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
|
|||
for i, p := range pods {
|
||||
podNamesBefore[i] = p.ObjectMeta.Name
|
||||
}
|
||||
ns := api.NamespaceSystem
|
||||
ns := metav1.NamespaceSystem
|
||||
if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, ns, podNamesBefore, framework.PodReadyBeforeTimeout) {
|
||||
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
|
||||
}
|
||||
|
|
13
vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go
generated
vendored
|
@ -23,7 +23,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
@ -91,7 +90,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||
}
|
||||
}
|
||||
|
||||
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels, true)
|
||||
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
|
@ -158,7 +157,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||
}
|
||||
framework.WaitForStableCluster(cs, masterNodes)
|
||||
|
||||
pods, err := cs.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
|
||||
pods, err := cs.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, pod := range pods.Items {
|
||||
_, found := nodeToCapacityMap[pod.Spec.NodeName]
|
||||
|
@ -506,7 +505,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||
By("Launching two pods on two distinct nodes to get two node names")
|
||||
CreateHostPortPods(f, "host-port", 2, true)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "host-port")
|
||||
podList, err := cs.Core().Pods(ns).List(v1.ListOptions{})
|
||||
podList, err := cs.Core().Pods(ns).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
Expect(len(podList.Items)).To(Equal(2))
|
||||
nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
|
||||
|
@ -759,7 +758,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||
|
||||
func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: conf.Name,
|
||||
Labels: conf.Labels,
|
||||
Annotations: conf.Annotations,
|
||||
|
@ -803,7 +802,7 @@ func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string {
|
|||
pod := runPausePod(f, conf)
|
||||
|
||||
By("Explicitly delete pod here to free the resource it takes.")
|
||||
err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
|
||||
err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
return pod.Spec.NodeName
|
||||
|
@ -917,7 +916,7 @@ func waitForScheduler() {
|
|||
|
||||
// TODO: upgrade calls in PodAffinity tests when we're able to run them
|
||||
func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
|
||||
allPods, err := c.Core().Pods(ns).List(v1.ListOptions{})
|
||||
allPods, err := c.Core().Pods(ns).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/test/e2e/security_context.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/security_context.go
generated
vendored
|
@ -37,7 +37,7 @@ import (
|
|||
func scTestPod(hostIPC bool, hostPID bool) *v1.Pod {
|
||||
podName := "security-context-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
Annotations: map[string]string{},
|
||||
|
|
14
vendor/k8s.io/kubernetes/test/e2e/service.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/service.go
generated
vendored
|
@ -66,7 +66,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
|
||||
|
||||
It("should provide secure master service [Conformance]", func() {
|
||||
_, err := cs.Core().Services(v1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
_, err := cs.Core().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
@ -86,7 +86,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
}()
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
|
@ -148,7 +148,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
|
||||
By("creating service " + serviceName + " in namespace " + ns)
|
||||
service := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
|
@ -791,7 +791,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
|
||||
By("creating service " + serviceName + " with same NodePort but different protocols in namespace " + ns)
|
||||
service := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: t.ServiceName,
|
||||
Namespace: t.Namespace,
|
||||
},
|
||||
|
@ -1027,7 +1027,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
terminateSeconds := int64(600)
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: t.ServiceName,
|
||||
Namespace: t.Namespace,
|
||||
Annotations: map[string]string{endpoint.TolerateUnreadyEndpointsAnnotation: "true"},
|
||||
|
@ -1137,7 +1137,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
|
||||
By("Remove pods immediately")
|
||||
label := labels.SelectorFromSet(labels.Set(t.Labels))
|
||||
options := v1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
podClient := t.Client.Core().Pods(f.Namespace.Name)
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
|
@ -1145,7 +1145,7 @@ var _ = framework.KubeDescribe("Services", func() {
|
|||
} else {
|
||||
for _, pod := range pods.Items {
|
||||
var gracePeriodSeconds int64 = 0
|
||||
err := podClient.Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds})
|
||||
err := podClient.Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds})
|
||||
if err != nil {
|
||||
framework.Logf("warning: error force deleting pod '%s': %s", pod.Name, err)
|
||||
}
|
||||
|
|
2
vendor/k8s.io/kubernetes/test/e2e/service_accounts.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/service_accounts.go
generated
vendored
|
@ -191,7 +191,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
|
|||
}))
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pod-service-account-" + string(uuid.NewUUID()) + "-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
11
vendor/k8s.io/kubernetes/test/e2e/service_latency.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e/service_latency.go
generated
vendored
|
@ -22,13 +22,14 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
|
@ -278,11 +279,11 @@ func (eq *endpointQueries) added(e *v1.Endpoints) {
|
|||
func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
|
||||
_, controller := cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
obj, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options)
|
||||
},
|
||||
},
|
||||
|
@ -317,7 +318,7 @@ func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
|
|||
func singleServiceLatency(f *framework.Framework, name string, q *endpointQueries) (time.Duration, error) {
|
||||
// Make a service that points to that pod.
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "latency-svc-",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
|
|
30
vendor/k8s.io/kubernetes/test/e2e/serviceloadbalancers.go
generated
vendored
30
vendor/k8s.io/kubernetes/test/e2e/serviceloadbalancers.go
generated
vendored
|
@ -18,9 +18,9 @@ package e2e
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -113,7 +113,7 @@ func (h *haproxyControllerTester) start(namespace string) (err error) {
|
|||
// Find the pods of the rc we just created.
|
||||
labelSelector := labels.SelectorFromSet(
|
||||
labels.Set(map[string]string{"name": h.rcName}))
|
||||
options := v1.ListOptions{LabelSelector: labelSelector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: labelSelector.String()}
|
||||
pods, err := h.client.Core().Pods(h.rcNamespace).List(options)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -197,7 +197,7 @@ func (s *ingManager) test(path string) error {
|
|||
url := fmt.Sprintf("%v/hostName", path)
|
||||
httpClient := &http.Client{}
|
||||
return wait.Poll(pollInterval, framework.ServiceRespondingTimeout, func() (bool, error) {
|
||||
body, err := simpleGET(httpClient, url, "")
|
||||
body, err := framework.SimpleGET(httpClient, url, "")
|
||||
if err != nil {
|
||||
framework.Logf("%v\n%v\n%v", url, body, err)
|
||||
return false, nil
|
||||
|
@ -239,30 +239,6 @@ var _ = framework.KubeDescribe("ServiceLoadBalancer [Feature:ServiceLoadBalancer
|
|||
})
|
||||
})
|
||||
|
||||
// simpleGET executes a get on the given url, returns error if non-200 returned.
|
||||
func simpleGET(c *http.Client, url, host string) (string, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Host = host
|
||||
res, err := c.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
rawBody, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
body := string(rawBody)
|
||||
if res.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf(
|
||||
"GET returned http error %v", res.StatusCode)
|
||||
}
|
||||
return body, err
|
||||
}
|
||||
|
||||
// rcFromManifest reads a .json/yaml file and returns the rc in it.
|
||||
func rcFromManifest(fileName string) *v1.ReplicationController {
|
||||
var controller v1.ReplicationController
|
||||
|
|
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue