Vendor: Update k8s version
Signed-off-by: Michał Żyłowski <michal.zylowski@intel.com>
This commit is contained in:
parent
dfa93414c5
commit
52baf68d50
3756 changed files with 113013 additions and 92675 deletions
11
vendor/k8s.io/kubernetes/test/integration/auth/accessreview_test.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/integration/auth/accessreview_test.go
generated
vendored
|
@ -24,13 +24,14 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
authorizationapi "k8s.io/kubernetes/pkg/apis/authorization"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
@ -248,7 +249,7 @@ func TestLocalSubjectAccessReview(t *testing.T) {
|
|||
name: "simple allow",
|
||||
namespace: "foo",
|
||||
sar: &authorizationapi.LocalSubjectAccessReview{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "foo"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo"},
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
|
@ -269,7 +270,7 @@ func TestLocalSubjectAccessReview(t *testing.T) {
|
|||
name: "simple deny",
|
||||
namespace: "foo",
|
||||
sar: &authorizationapi.LocalSubjectAccessReview{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "foo"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo"},
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
|
@ -291,7 +292,7 @@ func TestLocalSubjectAccessReview(t *testing.T) {
|
|||
name: "conflicting namespace",
|
||||
namespace: "foo",
|
||||
sar: &authorizationapi.LocalSubjectAccessReview{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "foo"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo"},
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
|
@ -309,7 +310,7 @@ func TestLocalSubjectAccessReview(t *testing.T) {
|
|||
name: "missing namespace",
|
||||
namespace: "foo",
|
||||
sar: &authorizationapi.LocalSubjectAccessReview{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "foo"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "foo"},
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
|
|
13
vendor/k8s.io/kubernetes/test/integration/auth/auth_test.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/integration/auth/auth_test.go
generated
vendored
|
@ -36,20 +36,21 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/group"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
authenticationv1beta1 "k8s.io/kubernetes/pkg/apis/authentication/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/auth/authorizer/abac"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1"
|
||||
apiserverauthorizer "k8s.io/kubernetes/pkg/genericapiserver/authorizer"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
"k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/tokentest"
|
||||
"k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook"
|
||||
|
@ -968,10 +969,10 @@ func TestNamespaceAuthorization(t *testing.T) {
|
|||
{"GET", path("pods", "foo", "a"), "bar", "", integration.Code403},
|
||||
{"DELETE", timeoutPath("pods", "foo", "a"), "bar", "", integration.Code403},
|
||||
|
||||
{"POST", timeoutPath("pods", api.NamespaceDefault, ""), "", aPod, integration.Code403},
|
||||
{"POST", timeoutPath("pods", metav1.NamespaceDefault, ""), "", aPod, integration.Code403},
|
||||
{"GET", path("pods", "", ""), "", "", integration.Code403},
|
||||
{"GET", path("pods", api.NamespaceDefault, "a"), "", "", integration.Code403},
|
||||
{"DELETE", timeoutPath("pods", api.NamespaceDefault, "a"), "", "", integration.Code403},
|
||||
{"GET", path("pods", metav1.NamespaceDefault, "a"), "", "", integration.Code403},
|
||||
{"DELETE", timeoutPath("pods", metav1.NamespaceDefault, "a"), "", "", integration.Code403},
|
||||
}
|
||||
|
||||
for _, r := range requests {
|
||||
|
@ -1139,7 +1140,7 @@ func TestReadOnlyAuthorization(t *testing.T) {
|
|||
}{
|
||||
{"POST", path("pods", ns.Name, ""), aPod, integration.Code403},
|
||||
{"GET", path("pods", ns.Name, ""), "", integration.Code200},
|
||||
{"GET", path("pods", api.NamespaceDefault, "a"), "", integration.Code404},
|
||||
{"GET", path("pods", metav1.NamespaceDefault, "a"), "", integration.Code404},
|
||||
}
|
||||
|
||||
for _, r := range requests {
|
||||
|
|
35
vendor/k8s.io/kubernetes/test/integration/auth/rbac_test.go
generated
vendored
35
vendor/k8s.io/kubernetes/test/integration/auth/rbac_test.go
generated
vendored
|
@ -30,19 +30,20 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
rbacapi "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/transport"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver/registry/generic"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/pkg/registry/generic"
|
||||
"k8s.io/kubernetes/pkg/registry/rbac/clusterrole"
|
||||
clusterrolestore "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding"
|
||||
|
@ -169,7 +170,7 @@ func (s statusCode) String() string {
|
|||
var (
|
||||
writeJobsRoleBinding = `
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1alpha1",
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "RoleBinding",
|
||||
"metadata": {
|
||||
"name": "pi"%s
|
||||
|
@ -268,17 +269,17 @@ func TestRBAC(t *testing.T) {
|
|||
bootstrapRoles: bootstrapRoles{
|
||||
clusterRoles: []rbacapi.ClusterRole{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "allow-all"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "allow-all"},
|
||||
Rules: []rbacapi.PolicyRule{ruleAllowAll},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "read-pods"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "read-pods"},
|
||||
Rules: []rbacapi.PolicyRule{ruleReadPods},
|
||||
},
|
||||
},
|
||||
clusterRoleBindings: []rbacapi.ClusterRoleBinding{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "read-pods"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "read-pods"},
|
||||
Subjects: []rbacapi.Subject{
|
||||
{Kind: "User", Name: "pod-reader"},
|
||||
},
|
||||
|
@ -306,17 +307,17 @@ func TestRBAC(t *testing.T) {
|
|||
bootstrapRoles: bootstrapRoles{
|
||||
clusterRoles: []rbacapi.ClusterRole{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "write-jobs"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "write-jobs"},
|
||||
Rules: []rbacapi.PolicyRule{ruleWriteJobs},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "create-rolebindings"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "create-rolebindings"},
|
||||
Rules: []rbacapi.PolicyRule{
|
||||
rbacapi.NewRule("create").Groups("rbac.authorization.k8s.io").Resources("rolebindings").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "bind-any-clusterrole"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "bind-any-clusterrole"},
|
||||
Rules: []rbacapi.PolicyRule{
|
||||
rbacapi.NewRule("bind").Groups("rbac.authorization.k8s.io").Resources("clusterroles").RuleOrDie(),
|
||||
},
|
||||
|
@ -324,12 +325,12 @@ func TestRBAC(t *testing.T) {
|
|||
},
|
||||
clusterRoleBindings: []rbacapi.ClusterRoleBinding{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "write-jobs"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "write-jobs"},
|
||||
Subjects: []rbacapi.Subject{{Kind: "User", Name: "job-writer"}},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "write-jobs"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "create-rolebindings"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "create-rolebindings"},
|
||||
Subjects: []rbacapi.Subject{
|
||||
{Kind: "User", Name: "job-writer"},
|
||||
{Kind: "User", Name: "nonescalating-rolebinding-writer"},
|
||||
|
@ -338,19 +339,19 @@ func TestRBAC(t *testing.T) {
|
|||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "create-rolebindings"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "bind-any-clusterrole"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "bind-any-clusterrole"},
|
||||
Subjects: []rbacapi.Subject{{Kind: "User", Name: "any-rolebinding-writer"}},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "bind-any-clusterrole"},
|
||||
},
|
||||
},
|
||||
roleBindings: []rbacapi.RoleBinding{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "write-jobs", Namespace: "job-namespace"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "write-jobs", Namespace: "job-namespace"},
|
||||
Subjects: []rbacapi.Subject{{Kind: "User", Name: "job-writer-namespace"}},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "write-jobs"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "create-rolebindings", Namespace: "job-namespace"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "create-rolebindings", Namespace: "job-namespace"},
|
||||
Subjects: []rbacapi.Subject{{Kind: "User", Name: "job-writer-namespace"}},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "create-rolebindings"},
|
||||
},
|
||||
|
@ -507,7 +508,7 @@ func TestBootstrapping(t *testing.T) {
|
|||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{BearerToken: superUser, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
|
||||
watcher, err := clientset.Rbac().ClusterRoles().Watch(api.ListOptions{ResourceVersion: "0"})
|
||||
watcher, err := clientset.Rbac().ClusterRoles().Watch(metav1.ListOptions{ResourceVersion: "0"})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -521,7 +522,7 @@ func TestBootstrapping(t *testing.T) {
|
|||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
clusterRoles, err := clientset.Rbac().ClusterRoles().List(api.ListOptions{})
|
||||
clusterRoles, err := clientset.Rbac().ClusterRoles().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
|
43
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
43
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
|
@ -32,12 +32,13 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
@ -60,7 +61,7 @@ func TestClient(t *testing.T) {
|
|||
t.Errorf("expected %#v, got %#v", e, a)
|
||||
}
|
||||
|
||||
pods, err := client.Core().Pods(ns.Name).List(v1.ListOptions{})
|
||||
pods, err := client.Core().Pods(ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -70,7 +71,7 @@ func TestClient(t *testing.T) {
|
|||
|
||||
// get a validation error
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
|
@ -99,7 +100,7 @@ func TestClient(t *testing.T) {
|
|||
}
|
||||
|
||||
// pod is shown, but not scheduled
|
||||
pods, err = client.Core().Pods(ns.Name).List(v1.ListOptions{})
|
||||
pods, err = client.Core().Pods(ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -128,7 +129,7 @@ func TestAtomicPut(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "atomicrc",
|
||||
Namespace: ns.Name,
|
||||
Labels: map[string]string{
|
||||
|
@ -141,7 +142,7 @@ func TestAtomicPut(t *testing.T) {
|
|||
"foo": "bar",
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
|
@ -222,7 +223,7 @@ func TestPatch(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
Labels: map[string]string{},
|
||||
|
@ -239,23 +240,23 @@ func TestPatch(t *testing.T) {
|
|||
t.Fatalf("Failed creating patchpods: %v", err)
|
||||
}
|
||||
|
||||
patchBodies := map[schema.GroupVersion]map[api.PatchType]struct {
|
||||
patchBodies := map[schema.GroupVersion]map[types.PatchType]struct {
|
||||
AddLabelBody []byte
|
||||
RemoveLabelBody []byte
|
||||
RemoveAllLabelsBody []byte
|
||||
}{
|
||||
v1.SchemeGroupVersion: {
|
||||
api.JSONPatchType: {
|
||||
types.JSONPatchType: {
|
||||
[]byte(`[{"op":"add","path":"/metadata/labels","value":{"foo":"bar","baz":"qux"}}]`),
|
||||
[]byte(`[{"op":"remove","path":"/metadata/labels/foo"}]`),
|
||||
[]byte(`[{"op":"remove","path":"/metadata/labels"}]`),
|
||||
},
|
||||
api.MergePatchType: {
|
||||
types.MergePatchType: {
|
||||
[]byte(`{"metadata":{"labels":{"foo":"bar","baz":"qux"}}}`),
|
||||
[]byte(`{"metadata":{"labels":{"foo":null}}}`),
|
||||
[]byte(`{"metadata":{"labels":null}}`),
|
||||
},
|
||||
api.StrategicMergePatchType: {
|
||||
types.StrategicMergePatchType: {
|
||||
[]byte(`{"metadata":{"labels":{"foo":"bar","baz":"qux"}}}`),
|
||||
[]byte(`{"metadata":{"labels":{"foo":null}}}`),
|
||||
[]byte(`{"metadata":{"labels":{"$patch":"replace"}}}`),
|
||||
|
@ -265,7 +266,7 @@ func TestPatch(t *testing.T) {
|
|||
|
||||
pb := patchBodies[c.Core().RESTClient().APIVersion()]
|
||||
|
||||
execPatch := func(pt api.PatchType, body []byte) error {
|
||||
execPatch := func(pt types.PatchType, body []byte) error {
|
||||
return c.Core().RESTClient().Patch(pt).
|
||||
Resource(resource).
|
||||
Namespace(ns.Name).
|
||||
|
@ -326,7 +327,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
|
|||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
endpointTemplate := &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "patchendpoint",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
|
@ -339,7 +340,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
patchEndpoint := func(json []byte) (runtime.Object, error) {
|
||||
return c.Core().RESTClient().Patch(api.MergePatchType).Resource("endpoints").Namespace(ns.Name).Name("patchendpoint").Body(json).Do().Get()
|
||||
return c.Core().RESTClient().Patch(types.MergePatchType).Resource("endpoints").Namespace(ns.Name).Name("patchendpoint").Body(json).Do().Get()
|
||||
}
|
||||
|
||||
// Make sure patch doesn't get to CreateOnUpdate
|
||||
|
@ -461,7 +462,7 @@ func TestSingleWatch(t *testing.T) {
|
|||
mkEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("event-%v", i)
|
||||
return &v1.Event{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns.Name,
|
||||
Name: name,
|
||||
},
|
||||
|
@ -546,7 +547,7 @@ func TestMultiWatch(t *testing.T) {
|
|||
dummyEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("unrelated-%v", i)
|
||||
return &v1.Event{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()),
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
|
@ -571,7 +572,7 @@ func TestMultiWatch(t *testing.T) {
|
|||
watchesStarted.Add(1)
|
||||
name := fmt.Sprintf("multi-watch-%v", i)
|
||||
got, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels.Set{"watchlabel": name},
|
||||
},
|
||||
|
@ -587,7 +588,7 @@ func TestMultiWatch(t *testing.T) {
|
|||
t.Fatalf("Couldn't make %v: %v", name, err)
|
||||
}
|
||||
go func(name, rv string) {
|
||||
options := v1.ListOptions{
|
||||
options := metav1.ListOptions{
|
||||
LabelSelector: labels.Set{"watchlabel": name}.AsSelector().String(),
|
||||
ResourceVersion: rv,
|
||||
}
|
||||
|
@ -678,7 +679,7 @@ func TestMultiWatch(t *testing.T) {
|
|||
}
|
||||
name := fmt.Sprintf("unrelated-%v", i)
|
||||
_, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -742,7 +743,7 @@ func TestMultiWatch(t *testing.T) {
|
|||
|
||||
func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace string) {
|
||||
podBody := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "selflinktest",
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
|
@ -763,7 +764,7 @@ func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace s
|
|||
t.Errorf("Failed listing pod with supplied self link '%v': %v", pod.SelfLink, err)
|
||||
}
|
||||
|
||||
podList, err := c.Core().Pods(namespace).List(v1.ListOptions{})
|
||||
podList, err := c.Core().Pods(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed listing pods: %v", err)
|
||||
}
|
||||
|
|
8
vendor/k8s.io/kubernetes/test/integration/client/dynamic_client_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/integration/client/dynamic_client_test.go
generated
vendored
|
@ -25,11 +25,11 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
@ -74,7 +74,7 @@ func TestDynamicClient(t *testing.T) {
|
|||
|
||||
// Create a Pod with the normal client
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -93,7 +93,7 @@ func TestDynamicClient(t *testing.T) {
|
|||
}
|
||||
|
||||
// check dynamic list
|
||||
obj, err := dynamicClient.Resource(&resource, ns.Name).List(&v1.ListOptions{})
|
||||
obj, err := dynamicClient.Resource(&resource, ns.Name).List(&metav1.ListOptions{})
|
||||
unstructuredList, ok := obj.(*unstructured.UnstructuredList)
|
||||
if !ok {
|
||||
t.Fatalf("expected *unstructured.UnstructuredList, got %#v", obj)
|
||||
|
@ -136,7 +136,7 @@ func TestDynamicClient(t *testing.T) {
|
|||
t.Fatalf("unexpected error when deleting pod: %v", err)
|
||||
}
|
||||
|
||||
list, err := client.Core().Pods(ns.Name).List(v1.ListOptions{})
|
||||
list, err := client.Core().Pods(ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when listing pods: %v", err)
|
||||
}
|
||||
|
|
7
vendor/k8s.io/kubernetes/test/integration/configmap/configmap_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/integration/configmap/configmap_test.go
generated
vendored
|
@ -23,10 +23,11 @@ package configmap
|
|||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
@ -46,7 +47,7 @@ func TestConfigMap(t *testing.T) {
|
|||
|
||||
func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
|
||||
cfg := v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "configmap",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
|
@ -63,7 +64,7 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace)
|
|||
defer deleteConfigMapOrErrorf(t, client, cfg.Namespace, cfg.Name)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "XXX",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
|
|
12
vendor/k8s.io/kubernetes/test/integration/evictions/evictions_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/integration/evictions/evictions_test.go
generated
vendored
|
@ -30,11 +30,11 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
|
@ -68,7 +68,7 @@ func TestConcurrentEvictionRequests(t *testing.T) {
|
|||
}
|
||||
|
||||
var gracePeriodSeconds int64 = 30
|
||||
deleteOption := &v1.DeleteOptions{
|
||||
deleteOption := &metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gracePeriodSeconds,
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,7 @@ func TestConcurrentEvictionRequests(t *testing.T) {
|
|||
|
||||
func newPod(podName string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"app": "test-evictions"},
|
||||
},
|
||||
|
@ -198,7 +198,7 @@ func addPodConditionReady(pod *v1.Pod) {
|
|||
|
||||
func newPDB() *v1beta1.PodDisruptionBudget {
|
||||
return &v1beta1.PodDisruptionBudget{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pdb",
|
||||
},
|
||||
Spec: v1beta1.PodDisruptionBudgetSpec{
|
||||
|
@ -213,13 +213,13 @@ func newPDB() *v1beta1.PodDisruptionBudget {
|
|||
}
|
||||
}
|
||||
|
||||
func newEviction(ns, evictionName string, deleteOption *v1.DeleteOptions) *v1beta1.Eviction {
|
||||
func newEviction(ns, evictionName string, deleteOption *metav1.DeleteOptions) *v1beta1.Eviction {
|
||||
return &v1beta1.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "Policy/v1beta1",
|
||||
Kind: "Eviction",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: evictionName,
|
||||
Namespace: ns,
|
||||
},
|
||||
|
|
1
vendor/k8s.io/kubernetes/test/integration/federation/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/federation/BUILD
generated
vendored
|
@ -16,6 +16,7 @@ go_test(
|
|||
"//federation/cmd/federation-apiserver/app:go_default_library",
|
||||
"//federation/cmd/federation-apiserver/app/options:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/autoscaling/v1:go_default_library",
|
||||
"//pkg/apis/batch/v1:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
|
|
29
vendor/k8s.io/kubernetes/test/integration/federation/server_test.go
generated
vendored
29
vendor/k8s.io/kubernetes/test/integration/federation/server_test.go
generated
vendored
|
@ -33,6 +33,7 @@ import (
|
|||
"k8s.io/kubernetes/federation/cmd/federation-apiserver/app"
|
||||
"k8s.io/kubernetes/federation/cmd/federation-apiserver/app/options"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
autoscaling_v1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
|
||||
batch_v1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
ext_v1b1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
)
|
||||
|
@ -44,6 +45,7 @@ var groupVersions = []schema.GroupVersion{
|
|||
fed_v1b1.SchemeGroupVersion,
|
||||
ext_v1b1.SchemeGroupVersion,
|
||||
batch_v1.SchemeGroupVersion,
|
||||
autoscaling_v1.SchemeGroupVersion,
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
|
@ -214,6 +216,7 @@ func testAPIResourceList(t *testing.T) {
|
|||
testCoreResourceList(t)
|
||||
testExtensionsResourceList(t)
|
||||
testBatchResourceList(t)
|
||||
testAutoscalingResourceList(t)
|
||||
}
|
||||
|
||||
func testFederationResourceList(t *testing.T) {
|
||||
|
@ -373,3 +376,29 @@ func testBatchResourceList(t *testing.T) {
|
|||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
}
|
||||
|
||||
func testAutoscalingResourceList(t *testing.T) {
|
||||
serverURL := serverIP + "/apis/" + autoscaling_v1.SchemeGroupVersion.String()
|
||||
contents, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
var apiResourceList metav1.APIResourceList
|
||||
err = json.Unmarshal(contents, &apiResourceList)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
|
||||
}
|
||||
// empty APIVersion for extensions group
|
||||
assert.Equal(t, "v1", apiResourceList.APIVersion)
|
||||
assert.Equal(t, autoscaling_v1.SchemeGroupVersion.String(), apiResourceList.GroupVersion)
|
||||
// Assert that there are exactly this number of resources.
|
||||
assert.Equal(t, 2, len(apiResourceList.APIResources))
|
||||
|
||||
// Verify hpa
|
||||
found := findResource(apiResourceList.APIResources, "horizontalpodautoscalers")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
found = findResource(apiResourceList.APIResources, "horizontalpodautoscalers/status")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
}
|
||||
|
|
6
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
|
@ -23,7 +23,7 @@ go_library(
|
|||
"//pkg/apis/apps/v1beta1:go_default_library",
|
||||
"//pkg/apis/autoscaling/v1:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/certificates/v1alpha1:go_default_library",
|
||||
"//pkg/apis/certificates/v1beta1:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/apis/policy/v1alpha1:go_default_library",
|
||||
"//pkg/apis/rbac/v1alpha1:go_default_library",
|
||||
|
@ -32,13 +32,12 @@ go_library(
|
|||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/generated/openapi:go_default_library",
|
||||
"//pkg/genericapiserver:go_default_library",
|
||||
"//pkg/genericapiserver/authenticator:go_default_library",
|
||||
"//pkg/genericapiserver/authorizer:go_default_library",
|
||||
"//pkg/genericapiserver/server:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubelet/client:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
|
@ -62,6 +61,7 @@ go_library(
|
|||
"//vendor:k8s.io/apiserver/pkg/authentication/user",
|
||||
"//vendor:k8s.io/apiserver/pkg/authorization/authorizer",
|
||||
"//vendor:k8s.io/apiserver/pkg/authorization/union",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
10
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
|
@ -37,13 +37,14 @@ import (
|
|||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
authauthorizer "k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
authorizerunion "k8s.io/apiserver/pkg/authorization/union"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
|
||||
rbac "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
||||
|
@ -52,13 +53,12 @@ import (
|
|||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/pkg/generated/openapi"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver/authenticator"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver/authorizer"
|
||||
genericapiserver "k8s.io/kubernetes/pkg/genericapiserver/server"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
|
@ -267,7 +267,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
|||
if masterConfig.EnableCoreControllers {
|
||||
// TODO Once /healthz is updated for posthooks, we'll wait for good health
|
||||
coreClient := coreclient.NewForConfigOrDie(&cfg)
|
||||
svcWatch, err := coreClient.Services(v1.NamespaceDefault).Watch(v1.ListOptions{})
|
||||
svcWatch, err := coreClient.Services(metav1.NamespaceDefault).Watch(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
@ -403,7 +403,7 @@ func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *test
|
|||
// Currently we neither create the namespace nor delete all its contents at the end.
|
||||
// But as long as tests are not using the same namespaces, this should work fine.
|
||||
return &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// TODO: Once we start creating namespaces, switch to GenerateName.
|
||||
Name: baseName,
|
||||
},
|
||||
|
|
5
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package framework
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
@ -52,7 +53,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
|||
|
||||
glog.Infof("Making %d nodes", numNodes)
|
||||
baseNode := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: p.nodeNamePrefix,
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
|
@ -95,7 +96,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
|||
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
|
||||
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
|
||||
for i := range nodes.Items {
|
||||
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &v1.DeleteOptions{}); err != nil {
|
||||
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil {
|
||||
glog.Errorf("Error while deleting Node: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
34
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
|
@ -34,10 +34,10 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/typed/discovery"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector"
|
||||
|
@ -46,14 +46,14 @@ import (
|
|||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func getOrphanOptions() *v1.DeleteOptions {
|
||||
func getOrphanOptions() *metav1.DeleteOptions {
|
||||
var trueVar = true
|
||||
return &v1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
return &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
}
|
||||
|
||||
func getNonOrphanOptions() *v1.DeleteOptions {
|
||||
func getNonOrphanOptions() *metav1.DeleteOptions {
|
||||
var falseVar = false
|
||||
return &v1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
return &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
}
|
||||
|
||||
const garbageCollectedPodName = "test.pod.1"
|
||||
|
@ -74,7 +74,7 @@ func newPod(podName, podNamespace string, ownerReferences []metav1.OwnerReferenc
|
|||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
OwnerReferences: ownerReferences,
|
||||
|
@ -96,14 +96,14 @@ func newOwnerRC(name, namespace string) *v1.ReplicationController {
|
|||
Kind: "ReplicationController",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{"name": "test"},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"name": "test"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -172,7 +172,7 @@ func TestCascadingDeletion(t *testing.T) {
|
|||
t.Fatalf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
|
||||
rcs, err := rcClient.List(v1.ListOptions{})
|
||||
rcs, err := rcClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list replication controllers: %v", err)
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ func TestCascadingDeletion(t *testing.T) {
|
|||
}
|
||||
|
||||
// set up watch
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list pods: %v", err)
|
||||
}
|
||||
|
@ -257,7 +257,7 @@ func TestCreateWithNonExistentOwner(t *testing.T) {
|
|||
}
|
||||
|
||||
// set up watch
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list pods: %v", err)
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ func TestCreateWithNonExistentOwner(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options *v1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) {
|
||||
func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options *metav1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) {
|
||||
defer wg.Done()
|
||||
rcClient := clientSet.Core().ReplicationControllers(namespace)
|
||||
podClient := clientSet.Core().Pods(namespace)
|
||||
|
@ -313,7 +313,7 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
|
|||
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) {
|
||||
rcClient := clientSet.Core().ReplicationControllers(namespace)
|
||||
podClient := clientSet.Core().Pods(namespace)
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
|
@ -322,7 +322,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa
|
|||
ret = false
|
||||
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
|
||||
}
|
||||
rcs, err := rcClient.List(v1.ListOptions{})
|
||||
rcs, err := rcClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ func TestStressingCascadingDeletion(t *testing.T) {
|
|||
|
||||
// verify the remaining pods all have "orphan" in their names.
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -454,7 +454,7 @@ func TestOrphaning(t *testing.T) {
|
|||
}
|
||||
// verify the toBeDeleteRC is deleted
|
||||
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
rcs, err := rcClient.List(v1.ListOptions{})
|
||||
rcs, err := rcClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -468,7 +468,7 @@ func TestOrphaning(t *testing.T) {
|
|||
}
|
||||
|
||||
// verify pods don't have the ownerPod as an owner anymore
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list pods: %v", err)
|
||||
}
|
||||
|
|
4
vendor/k8s.io/kubernetes/test/integration/kubectl/kubectl_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/kubectl/kubectl_test.go
generated
vendored
|
@ -21,8 +21,8 @@ package kubectl
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
|
7
vendor/k8s.io/kubernetes/test/integration/master/master_benchmark_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/integration/master/master_benchmark_test.go
generated
vendored
|
@ -26,10 +26,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
|
@ -137,7 +138,7 @@ func BenchmarkPodList(b *testing.B) {
|
|||
defer func() {
|
||||
glog.V(3).Infof("Worker %d: Node %v listing pods took %v", id, host, time.Since(now))
|
||||
}()
|
||||
if pods, err := m.ClientSet.Core().Pods(ns.Name).List(api.ListOptions{
|
||||
if pods, err := m.ClientSet.Core().Pods(ns.Name).List(metav1.ListOptions{
|
||||
LabelSelector: labels.Everything(),
|
||||
FieldSelector: fields.OneTermEqualSelector(api.PodHostField, host),
|
||||
}); err != nil {
|
||||
|
@ -180,7 +181,7 @@ func BenchmarkPodListEtcd(b *testing.B) {
|
|||
defer func() {
|
||||
glog.V(3).Infof("Worker %d: listing pods took %v", id, time.Since(now))
|
||||
}()
|
||||
pods, err := m.ClientSet.Core().Pods(ns.Name).List(api.ListOptions{
|
||||
pods, err := m.ClientSet.Core().Pods(ns.Name).List(metav1.ListOptions{
|
||||
LabelSelector: labels.Everything(),
|
||||
FieldSelector: fields.Everything(),
|
||||
})
|
||||
|
|
36
vendor/k8s.io/kubernetes/test/integration/master/master_test.go
generated
vendored
36
vendor/k8s.io/kubernetes/test/integration/master/master_test.go
generated
vendored
|
@ -36,12 +36,12 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clienttypedv1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
@ -162,9 +162,9 @@ func TestAutoscalingGroupBackwardCompatibility(t *testing.T) {
|
|||
expectedStatusCodes map[int]bool
|
||||
expectedVersion string
|
||||
}{
|
||||
{"POST", autoscalingPath("horizontalpodautoscalers", api.NamespaceDefault, ""), hpaV1, integration.Code201, ""},
|
||||
{"GET", autoscalingPath("horizontalpodautoscalers", api.NamespaceDefault, ""), "", integration.Code200, testapi.Autoscaling.GroupVersion().String()},
|
||||
{"GET", extensionsPath("horizontalpodautoscalers", api.NamespaceDefault, ""), "", integration.Code200, testapi.Extensions.GroupVersion().String()},
|
||||
{"POST", autoscalingPath("horizontalpodautoscalers", metav1.NamespaceDefault, ""), hpaV1, integration.Code201, ""},
|
||||
{"GET", autoscalingPath("horizontalpodautoscalers", metav1.NamespaceDefault, ""), "", integration.Code200, testapi.Autoscaling.GroupVersion().String()},
|
||||
{"GET", extensionsPath("horizontalpodautoscalers", metav1.NamespaceDefault, ""), "", integration.Code200, testapi.Extensions.GroupVersion().String()},
|
||||
}
|
||||
|
||||
for _, r := range requests {
|
||||
|
@ -281,7 +281,7 @@ func TestMasterService(t *testing.T) {
|
|||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
|
||||
err := wait.Poll(time.Second, time.Minute, func() (bool, error) {
|
||||
svcList, err := client.Core().Services(api.NamespaceDefault).List(api.ListOptions{})
|
||||
svcList, err := client.Core().Services(metav1.NamespaceDefault).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return false, nil
|
||||
|
@ -294,7 +294,7 @@ func TestMasterService(t *testing.T) {
|
|||
}
|
||||
}
|
||||
if found {
|
||||
ep, err := client.Core().Endpoints(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
ep, err := client.Core().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -324,7 +324,7 @@ func TestServiceAlloc(t *testing.T) {
|
|||
|
||||
svc := func(i int) *api.Service {
|
||||
return &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("svc-%v", i),
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
|
@ -338,7 +338,7 @@ func TestServiceAlloc(t *testing.T) {
|
|||
|
||||
// Wait until the default "kubernetes" service is created.
|
||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||
_, err := client.Core().Services(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
_, err := client.Core().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
|
@ -349,18 +349,18 @@ func TestServiceAlloc(t *testing.T) {
|
|||
|
||||
// make 5 more services to take up all IPs
|
||||
for i := 0; i < 5; i++ {
|
||||
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(i)); err != nil {
|
||||
if _, err := client.Core().Services(metav1.NamespaceDefault).Create(svc(i)); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make another service. It will fail because we're out of cluster IPs
|
||||
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(8)); err != nil {
|
||||
if _, err := client.Core().Services(metav1.NamespaceDefault).Create(svc(8)); err != nil {
|
||||
if !strings.Contains(err.Error(), "range is full") {
|
||||
t.Errorf("unexpected error text: %v", err)
|
||||
}
|
||||
} else {
|
||||
svcs, err := client.Core().Services(api.NamespaceAll).List(api.ListOptions{})
|
||||
svcs, err := client.Core().Services(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected success, and error getting the services: %v", err)
|
||||
}
|
||||
|
@ -372,12 +372,12 @@ func TestServiceAlloc(t *testing.T) {
|
|||
}
|
||||
|
||||
// Delete the first service.
|
||||
if err := client.Core().Services(api.NamespaceDefault).Delete(svc(1).ObjectMeta.Name, nil); err != nil {
|
||||
if err := client.Core().Services(metav1.NamespaceDefault).Delete(svc(1).ObjectMeta.Name, nil); err != nil {
|
||||
t.Fatalf("got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// This time creating the second service should work.
|
||||
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(8)); err != nil {
|
||||
if _, err := client.Core().Services(metav1.NamespaceDefault).Create(svc(8)); err != nil {
|
||||
t.Fatalf("got unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -408,7 +408,7 @@ func TestUpdateNodeObjects(t *testing.T) {
|
|||
for i := 0; i < nodes*6; i++ {
|
||||
c.Nodes().Delete(fmt.Sprintf("node-%d", i), nil)
|
||||
_, err := c.Nodes().Create(&v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("node-%d", i),
|
||||
},
|
||||
})
|
||||
|
@ -420,7 +420,7 @@ func TestUpdateNodeObjects(t *testing.T) {
|
|||
for k := 0; k < listers; k++ {
|
||||
go func(lister int) {
|
||||
for i := 0; i < iterations; i++ {
|
||||
_, err := c.Nodes().List(v1.ListOptions{})
|
||||
_, err := c.Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[list:%d] error after %d: %v\n", lister, i, err)
|
||||
break
|
||||
|
@ -432,7 +432,7 @@ func TestUpdateNodeObjects(t *testing.T) {
|
|||
|
||||
for k := 0; k < watchers; k++ {
|
||||
go func(lister int) {
|
||||
w, err := c.Nodes().Watch(v1.ListOptions{})
|
||||
w, err := c.Nodes().Watch(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[watch:%d] error: %v", k, err)
|
||||
return
|
||||
|
@ -462,14 +462,14 @@ func TestUpdateNodeObjects(t *testing.T) {
|
|||
fmt.Printf("[%d] iteration %d ...\n", node, i)
|
||||
}
|
||||
if i%20 == 0 {
|
||||
_, err := c.Nodes().List(v1.ListOptions{})
|
||||
_, err := c.Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
r, err := c.Nodes().List(v1.ListOptions{
|
||||
r, err := c.Nodes().List(metav1.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("metadata.name=node-%d", node),
|
||||
ResourceVersion: "0",
|
||||
})
|
||||
|
|
5
vendor/k8s.io/kubernetes/test/integration/metrics/metrics_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/integration/metrics/metrics_test.go
generated
vendored
|
@ -25,10 +25,11 @@ import (
|
|||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -109,7 +110,7 @@ func TestApiserverMetrics(t *testing.T) {
|
|||
// Make a request to the apiserver to ensure there's at least one data point
|
||||
// for the metrics we're expecting -- otherwise, they won't be exported.
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
if _, err := client.Core().Pods(v1.NamespaceDefault).List(v1.ListOptions{}); err != nil {
|
||||
if _, err := client.Core().Pods(metav1.NamespaceDefault).List(metav1.ListOptions{}); err != nil {
|
||||
t.Fatalf("unexpected error getting pods: %v", err)
|
||||
}
|
||||
|
||||
|
|
3
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
|
@ -15,9 +15,10 @@ go_test(
|
|||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
5
vendor/k8s.io/kubernetes/test/integration/objectmeta/objectmeta_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/integration/objectmeta/objectmeta_test.go
generated
vendored
|
@ -21,10 +21,11 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
|
@ -35,7 +36,7 @@ func TestIgnoreClusterName(t *testing.T) {
|
|||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-namespace",
|
||||
ClusterName: "cluster-name-to-ignore",
|
||||
},
|
||||
|
|
2
vendor/k8s.io/kubernetes/test/integration/openshift/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/openshift/BUILD
generated
vendored
|
@ -12,7 +12,7 @@ go_test(
|
|||
srcs = ["openshift_test.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/genericapiserver:go_default_library",
|
||||
"//pkg/genericapiserver/server:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
2
vendor/k8s.io/kubernetes/test/integration/openshift/openshift_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/integration/openshift/openshift_test.go
generated
vendored
|
@ -19,7 +19,7 @@ package openshift
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/genericapiserver"
|
||||
genericapiserver "k8s.io/kubernetes/pkg/genericapiserver/server"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
)
|
||||
|
||||
|
|
7
vendor/k8s.io/kubernetes/test/integration/pods/pods_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/integration/pods/pods_test.go
generated
vendored
|
@ -22,10 +22,11 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
@ -48,7 +49,7 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) {
|
|||
|
||||
prototypePod := func() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "xxx",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -158,7 +159,7 @@ func TestPodReadOnlyFilesystem(t *testing.T) {
|
|||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "xxx",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
17
vendor/k8s.io/kubernetes/test/integration/quota/quota_test.go
generated
vendored
17
vendor/k8s.io/kubernetes/test/integration/quota/quota_test.go
generated
vendored
|
@ -25,21 +25,22 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
quotainstall "k8s.io/kubernetes/pkg/quota/install"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/resourcequota"
|
||||
|
@ -111,7 +112,7 @@ func TestQuota(t *testing.T) {
|
|||
t.Logf("Took %v to scale up without quota", endTime.Sub(startTime))
|
||||
|
||||
quota := &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "quota",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
|
@ -130,7 +131,7 @@ func TestQuota(t *testing.T) {
|
|||
}
|
||||
|
||||
func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) {
|
||||
w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: quota.Name}))
|
||||
w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: quota.Name}))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -163,7 +164,7 @@ func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Cl
|
|||
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
||||
target := int32(100)
|
||||
rc := &v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: namespace,
|
||||
},
|
||||
|
@ -171,7 +172,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
|||
Replicas: &target,
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
|
@ -188,7 +189,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
|||
},
|
||||
}
|
||||
|
||||
w, err := clientset.Core().ReplicationControllers(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: rc.Name}))
|
||||
w, err := clientset.Core().ReplicationControllers(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: rc.Name}))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -215,7 +216,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
|||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
pods, _ := clientset.Core().Pods(namespace).List(v1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()})
|
||||
pods, _ := clientset.Core().Pods(namespace).List(metav1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()})
|
||||
t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items))
|
||||
}
|
||||
}
|
||||
|
|
22
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
22
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
|
@ -26,13 +26,13 @@ import (
|
|||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
@ -49,7 +49,7 @@ func newRS(name, namespace string, replicas int) *v1beta1.ReplicaSet {
|
|||
Kind: "ReplicaSet",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
|
@ -59,7 +59,7 @@ func newRS(name, namespace string, replicas int) *v1beta1.ReplicaSet {
|
|||
},
|
||||
Replicas: &replicasCopy,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: testLabels(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -81,7 +81,7 @@ func newMatchingPod(podName, namespace string) *v1.Pod {
|
|||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: testLabels(),
|
||||
|
@ -106,7 +106,7 @@ func newMatchingPod(podName, namespace string) *v1.Pod {
|
|||
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rsNum, podNum int) (bool, error) {
|
||||
rsClient := clientSet.Extensions().ReplicaSets(namespace)
|
||||
podClient := clientSet.Core().Pods(namespace)
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa
|
|||
ret = false
|
||||
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
|
||||
}
|
||||
rss, err := rsClient.List(v1.ListOptions{})
|
||||
rss, err := rsClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list replica sets: %v", err)
|
||||
}
|
||||
|
@ -320,7 +320,7 @@ func TestUpdateSelectorToAdopt(t *testing.T) {
|
|||
// change the rs's selector to match both pods
|
||||
patch := `{"spec":{"selector":{"matchLabels": {"uniqueKey":null}}}}`
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
rs, err := rsClient.Patch(rs.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
rs, err := rsClient.Patch(rs.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch replica set: %v", err)
|
||||
}
|
||||
|
@ -359,7 +359,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
|
|||
// change the rs's selector to match both pods
|
||||
patch := `{"spec":{"selector":{"matchLabels": {"uniqueKey":"1"}},"template":{"metadata":{"labels":{"uniqueKey":"1"}}}}}`
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
rs, err := rsClient.Patch(rs.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
rs, err := rsClient.Patch(rs.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch replica set: %v", err)
|
||||
}
|
||||
|
@ -403,7 +403,7 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
|
|||
// change the rs's selector to match both pods
|
||||
patch := `{"metadata":{"labels":{"name":null}}}`
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err := podClient.Patch(pod2.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
pod2, err := podClient.Patch(pod2.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch pod2: %v", err)
|
||||
}
|
||||
|
@ -451,7 +451,7 @@ func TestUpdateLabelToBeAdopted(t *testing.T) {
|
|||
// change the rs's selector to match both pods
|
||||
patch := `{"metadata":{"labels":{"uniqueKey":"1"}}}`
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err := podClient.Patch(pod2.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
pod2, err := podClient.Patch(pod2.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch pod2: %v", err)
|
||||
}
|
||||
|
|
|
@ -26,12 +26,12 @@ import (
|
|||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
@ -48,7 +48,7 @@ func newRC(name, namespace string, replicas int) *v1.ReplicationController {
|
|||
Kind: "ReplicationController",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
|
@ -56,7 +56,7 @@ func newRC(name, namespace string, replicas int) *v1.ReplicationController {
|
|||
Selector: testLabels(),
|
||||
Replicas: &replicasCopy,
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: testLabels(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -78,7 +78,7 @@ func newMatchingPod(podName, namespace string) *v1.Pod {
|
|||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: testLabels(),
|
||||
|
@ -103,7 +103,7 @@ func newMatchingPod(podName, namespace string) *v1.Pod {
|
|||
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) {
|
||||
rcClient := clientSet.Core().ReplicationControllers(namespace)
|
||||
podClient := clientSet.Core().Pods(namespace)
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa
|
|||
ret = false
|
||||
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
|
||||
}
|
||||
rcs, err := rcClient.List(v1.ListOptions{})
|
||||
rcs, err := rcClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ func TestUpdateSelectorToAdopt(t *testing.T) {
|
|||
// change the rc's selector to match both pods
|
||||
patch := `{"spec":{"selector":{"uniqueKey":null}}}`
|
||||
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
|
||||
rc, err := rcClient.Patch(rc.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
rc, err := rcClient.Patch(rc.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch replication controller: %v", err)
|
||||
}
|
||||
|
@ -344,7 +344,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
|
|||
// change the rc's selector to match both pods
|
||||
patch := `{"spec":{"selector":{"uniqueKey":"1"},"template":{"metadata":{"labels":{"uniqueKey":"1"}}}}}`
|
||||
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
|
||||
rc, err := rcClient.Patch(rc.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
rc, err := rcClient.Patch(rc.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch replication controller: %v", err)
|
||||
}
|
||||
|
@ -386,7 +386,7 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
|
|||
// change the rc's selector to match both pods
|
||||
patch := `{"metadata":{"labels":{"name":null}}}`
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err := podClient.Patch(pod2.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
pod2, err := podClient.Patch(pod2.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch pod2: %v", err)
|
||||
}
|
||||
|
@ -432,7 +432,7 @@ func TestUpdateLabelToBeAdopted(t *testing.T) {
|
|||
// change the rc's selector to match both pods
|
||||
patch := `{"metadata":{"labels":{"uniqueKey":"1"}}}`
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err := podClient.Patch(pod2.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
pod2, err := podClient.Patch(pod2.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch pod2: %v", err)
|
||||
}
|
||||
|
|
6
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
|
@ -31,13 +31,13 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
|
@ -256,7 +256,7 @@ func TestSchedulerExtender(t *testing.T) {
|
|||
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer cs.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer cs.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
goodCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
|
@ -282,7 +282,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
|
|||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "extender-test-pod"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "extender-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||
},
|
||||
|
|
79
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
79
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
|
@ -28,6 +28,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -35,7 +36,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
|
@ -71,7 +71,7 @@ func TestUnschedulableNodes(t *testing.T) {
|
|||
|
||||
defer close(schedulerConfig.StopEverything)
|
||||
|
||||
DoTestUnschedulableNodes(t, clientSet, ns, schedulerConfigFactory.NodeLister.Store)
|
||||
DoTestUnschedulableNodes(t, clientSet, ns, schedulerConfigFactory.GetNodeStore())
|
||||
}
|
||||
|
||||
func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
|
@ -123,7 +123,7 @@ func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n
|
|||
func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Namespace, nodeStore cache.Store) {
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer cs.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer cs.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
goodCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
|
@ -140,7 +140,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
|
|||
// Create a new schedulable node, since we're first going to apply
|
||||
// the unschedulable condition and verify that pods aren't scheduled.
|
||||
node := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node-scheduling-test-node"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-scheduling-test-node"},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
|
@ -246,7 +246,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
|
|||
// Create the new pod, note that this needs to happen post unschedulable
|
||||
// modification or we have a race in the test.
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node-scheduling-test-pod"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-scheduling-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||
},
|
||||
|
@ -282,7 +282,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
|
|||
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
|
||||
}
|
||||
|
||||
err = cs.Core().Pods(ns.Name).Delete(myPod.Name, v1.NewDeleteOptions(0))
|
||||
err = cs.Core().Pods(ns.Name).Delete(myPod.Name, metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ func TestMultiScheduler(t *testing.T) {
|
|||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer clientSet.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
|
@ -341,7 +341,7 @@ func TestMultiScheduler(t *testing.T) {
|
|||
|
||||
// 2. create a node
|
||||
node := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node-multi-scheduler-test-node"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-multi-scheduler-test-node"},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
|
@ -352,48 +352,48 @@ func TestMultiScheduler(t *testing.T) {
|
|||
clientSet.Core().Nodes().Create(node)
|
||||
|
||||
// 3. create 3 pods for testing
|
||||
podWithNoAnnotation := createPod(clientSet, "pod-with-no-annotation", nil)
|
||||
testPodNoAnnotation, err := clientSet.Core().Pods(ns.Name).Create(podWithNoAnnotation)
|
||||
podWithoutSchedulerName := createPod(clientSet, "pod-without-scheduler-name", "")
|
||||
testPod, err := clientSet.Core().Pods(ns.Name).Create(podWithoutSchedulerName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"}
|
||||
podWithAnnotationFitsDefault := createPod(clientSet, "pod-with-annotation-fits-default", schedulerAnnotationFitsDefault)
|
||||
testPodWithAnnotationFitsDefault, err := clientSet.Core().Pods(ns.Name).Create(podWithAnnotationFitsDefault)
|
||||
schedulerFitsDefault := "default-scheduler"
|
||||
podFitsDefault := createPod(clientSet, "pod-fits-default", schedulerFitsDefault)
|
||||
testPodFitsDefault, err := clientSet.Core().Pods(ns.Name).Create(podFitsDefault)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"}
|
||||
podWithAnnotationFitsFoo := createPod(clientSet, "pod-with-annotation-fits-foo", schedulerAnnotationFitsFoo)
|
||||
testPodWithAnnotationFitsFoo, err := clientSet.Core().Pods(ns.Name).Create(podWithAnnotationFitsFoo)
|
||||
schedulerFitsFoo := "foo-scheduler"
|
||||
podFitsFoo := createPod(clientSet, "pod-fits-foo", schedulerFitsFoo)
|
||||
testPodFitsFoo, err := clientSet.Core().Pods(ns.Name).Create(podFitsFoo)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 4. **check point-1**:
|
||||
// - testPodNoAnnotation, testPodWithAnnotationFitsDefault should be scheduled
|
||||
// - testPodWithAnnotationFitsFoo should NOT be scheduled
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodNoAnnotation.Namespace, testPodNoAnnotation.Name))
|
||||
// - testPod, testPodFitsDefault should be scheduled
|
||||
// - testPodFitsFoo should NOT be scheduled
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPod.Namespace, testPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodNoAnnotation.Name, err)
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPod.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodNoAnnotation.Name)
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPod.Name)
|
||||
}
|
||||
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsDefault.Namespace, testPodWithAnnotationFitsDefault.Name))
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodFitsDefault.Namespace, testPodFitsDefault.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodWithAnnotationFitsDefault.Name, err)
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodFitsDefault.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodWithAnnotationFitsDefault.Name)
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodFitsDefault.Name)
|
||||
}
|
||||
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name))
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodFitsFoo.Namespace, testPodFitsFoo.Name))
|
||||
if err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodWithAnnotationFitsFoo.Name, err)
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodFitsFoo.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodWithAnnotationFitsFoo.Name)
|
||||
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodFitsFoo.Name)
|
||||
}
|
||||
|
||||
// 5. create and start a scheduler with name "foo-scheduler"
|
||||
|
@ -413,19 +413,19 @@ func TestMultiScheduler(t *testing.T) {
|
|||
|
||||
// 6. **check point-2**:
|
||||
// - testPodWithAnnotationFitsFoo should be scheduled
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name))
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodFitsFoo.Namespace, testPodFitsFoo.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled, %v", testPodWithAnnotationFitsFoo.Name, err)
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled, %v", testPodFitsFoo.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodWithAnnotationFitsFoo.Name)
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodFitsFoo.Name)
|
||||
}
|
||||
|
||||
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
|
||||
err = clientSet.Core().Pods(ns.Name).Delete(testPodNoAnnotation.Name, v1.NewDeleteOptions(0))
|
||||
err = clientSet.Core().Pods(ns.Name).Delete(testPod.Name, metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
err = clientSet.Core().Pods(ns.Name).Delete(testPodWithAnnotationFitsDefault.Name, v1.NewDeleteOptions(0))
|
||||
err = clientSet.Core().Pods(ns.Name).Delete(testPodFitsDefault.Name, metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
|
@ -469,11 +469,12 @@ func TestMultiScheduler(t *testing.T) {
|
|||
*/
|
||||
}
|
||||
|
||||
func createPod(client clientset.Interface, name string, annotation map[string]string) *v1.Pod {
|
||||
func createPod(client clientset.Interface, name string, scheduler string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: name, Annotations: annotation},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(client)}},
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(client)}},
|
||||
SchedulerName: scheduler,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -491,7 +492,7 @@ func TestAllocatable(t *testing.T) {
|
|||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer clientSet.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
|
@ -507,7 +508,7 @@ func TestAllocatable(t *testing.T) {
|
|||
|
||||
// 2. create a node without allocatable awareness
|
||||
node := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node-allocatable-scheduler-test-node"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-allocatable-scheduler-test-node"},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
|
@ -525,7 +526,7 @@ func TestAllocatable(t *testing.T) {
|
|||
|
||||
// 3. create resource pod which requires less than Capacity
|
||||
podResource := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "pod-test-allocatable"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod-test-allocatable"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
|
@ -573,7 +574,7 @@ func TestAllocatable(t *testing.T) {
|
|||
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
|
||||
}
|
||||
|
||||
if err := clientSet.Core().Pods(ns.Name).Delete(podResource.Name, &v1.DeleteOptions{}); err != nil {
|
||||
if err := clientSet.Core().Pods(ns.Name).Delete(podResource.Name, &metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("Failed to remove first resource pod: %v", err)
|
||||
}
|
||||
|
||||
|
|
5
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
|
@ -18,12 +18,12 @@ go_library(
|
|||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -37,11 +37,12 @@ go_test(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/renstrom/dedent",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
6
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_bench_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_bench_test.go
generated
vendored
|
@ -56,7 +56,7 @@ func BenchmarkScheduling1000Nodes1000Pods(b *testing.B) {
|
|||
func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
|
||||
schedulerConfigFactory, finalFunc := mustSetupScheduler()
|
||||
defer finalFunc()
|
||||
c := schedulerConfigFactory.Client
|
||||
c := schedulerConfigFactory.GetClient()
|
||||
|
||||
nodePreparer := framework.NewIntegrationTestNodePreparer(
|
||||
c,
|
||||
|
@ -74,7 +74,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
|
|||
podCreator.CreatePods()
|
||||
|
||||
for {
|
||||
scheduled := schedulerConfigFactory.ScheduledPodLister.Indexer.List()
|
||||
scheduled := schedulerConfigFactory.GetScheduledPodListerIndexer().List()
|
||||
if len(scheduled) >= numScheduledPods {
|
||||
break
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
|
|||
for {
|
||||
// This can potentially affect performance of scheduler, since List() is done under mutex.
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled := schedulerConfigFactory.ScheduledPodLister.Indexer.List()
|
||||
scheduled := schedulerConfigFactory.GetScheduledPodListerIndexer().List()
|
||||
if len(scheduled) >= numScheduledPods+b.N {
|
||||
break
|
||||
}
|
||||
|
|
33
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
33
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
|
@ -22,13 +22,14 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/renstrom/dedent"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -73,7 +74,7 @@ func TestSchedule100Node3KNodeAffinityPods(t *testing.T) {
|
|||
})
|
||||
}
|
||||
config.nodePreparer = framework.NewIntegrationTestNodePreparer(
|
||||
config.schedulerConfigFactory.Client,
|
||||
config.schedulerSupportFunctions.GetClient(),
|
||||
nodeStrategies,
|
||||
"scheduler-perf-",
|
||||
)
|
||||
|
@ -97,7 +98,7 @@ func TestSchedule100Node3KNodeAffinityPods(t *testing.T) {
|
|||
for i := 0; i < numGroups; i++ {
|
||||
podCreatorConfig.AddStrategy("sched-perf-node-affinity", config.numPods/numGroups,
|
||||
testutils.NewCustomCreatePodStrategy(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "sched-perf-node-affinity-pod-",
|
||||
Annotations: map[string]string{v1.AffinityAnnotationKey: fmt.Sprintf(affinityTemplate, i)},
|
||||
},
|
||||
|
@ -105,7 +106,7 @@ func TestSchedule100Node3KNodeAffinityPods(t *testing.T) {
|
|||
}),
|
||||
)
|
||||
}
|
||||
config.podCreator = testutils.NewTestPodCreator(config.schedulerConfigFactory.Client, podCreatorConfig)
|
||||
config.podCreator = testutils.NewTestPodCreator(config.schedulerSupportFunctions.GetClient(), podCreatorConfig)
|
||||
|
||||
if min := schedulePods(config); min < threshold30K {
|
||||
t.Errorf("Too small pod scheduling throughput for 30k pods. Expected %v got %v", threshold30K, min)
|
||||
|
@ -143,19 +144,19 @@ func TestSchedule1000Node30KPods(t *testing.T) {
|
|||
// }
|
||||
|
||||
type testConfig struct {
|
||||
numPods int
|
||||
numNodes int
|
||||
nodePreparer testutils.TestNodePreparer
|
||||
podCreator *testutils.TestPodCreator
|
||||
schedulerConfigFactory *factory.ConfigFactory
|
||||
destroyFunc func()
|
||||
numPods int
|
||||
numNodes int
|
||||
nodePreparer testutils.TestNodePreparer
|
||||
podCreator *testutils.TestPodCreator
|
||||
schedulerSupportFunctions scheduler.Configurator
|
||||
destroyFunc func()
|
||||
}
|
||||
|
||||
func baseConfig() *testConfig {
|
||||
schedulerConfigFactory, destroyFunc := mustSetupScheduler()
|
||||
return &testConfig{
|
||||
schedulerConfigFactory: schedulerConfigFactory,
|
||||
destroyFunc: destroyFunc,
|
||||
schedulerSupportFunctions: schedulerConfigFactory,
|
||||
destroyFunc: destroyFunc,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -163,14 +164,14 @@ func defaultSchedulerBenchmarkConfig(numNodes, numPods int) *testConfig {
|
|||
baseConfig := baseConfig()
|
||||
|
||||
nodePreparer := framework.NewIntegrationTestNodePreparer(
|
||||
baseConfig.schedulerConfigFactory.Client,
|
||||
baseConfig.schedulerSupportFunctions.GetClient(),
|
||||
[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
|
||||
"scheduler-perf-",
|
||||
)
|
||||
|
||||
config := testutils.NewTestPodCreatorConfig()
|
||||
config.AddStrategy("sched-test", numPods, testutils.NewSimpleWithControllerCreatePodStrategy("rc1"))
|
||||
podCreator := testutils.NewTestPodCreator(baseConfig.schedulerConfigFactory.Client, config)
|
||||
podCreator := testutils.NewTestPodCreator(baseConfig.schedulerSupportFunctions.GetClient(), config)
|
||||
|
||||
baseConfig.nodePreparer = nodePreparer
|
||||
baseConfig.podCreator = podCreator
|
||||
|
@ -202,7 +203,7 @@ func schedulePods(config *testConfig) int32 {
|
|||
// Bake in time for the first pod scheduling event.
|
||||
for {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
scheduled := config.schedulerConfigFactory.ScheduledPodLister.Indexer.List()
|
||||
scheduled := config.schedulerSupportFunctions.GetScheduledPodListerIndexer().List()
|
||||
// 30,000 pods -> wait till @ least 300 are scheduled to start measuring.
|
||||
// TODO Find out why sometimes there may be scheduling blips in the beggining.
|
||||
if len(scheduled) > config.numPods/100 {
|
||||
|
@ -217,7 +218,7 @@ func schedulePods(config *testConfig) int32 {
|
|||
// This can potentially affect performance of scheduler, since List() is done under mutex.
|
||||
// Listing 10000 pods is an expensive operation, so running it frequently may impact scheduler.
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled := config.schedulerConfigFactory.ScheduledPodLister.Indexer.List()
|
||||
scheduled := config.schedulerSupportFunctions.GetScheduledPodListerIndexer().List()
|
||||
|
||||
// We will be completed when all pods are done being scheduled.
|
||||
// return the worst-case-scenario interval that was seen during this time.
|
||||
|
|
4
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/util.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/util.go
generated
vendored
|
@ -21,12 +21,12 @@ import (
|
|||
"net/http/httptest"
|
||||
|
||||
"github.com/golang/glog"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
|
@ -40,7 +40,7 @@ import (
|
|||
// remove resources after finished.
|
||||
// Notes on rate limiter:
|
||||
// - client rate limit is set to 5000.
|
||||
func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destroyFunc func()) {
|
||||
func mustSetupScheduler() (schedulerConfigFactory scheduler.Configurator, destroyFunc func()) {
|
||||
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
|
|
7
vendor/k8s.io/kubernetes/test/integration/secrets/secrets_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/integration/secrets/secrets_test.go
generated
vendored
|
@ -23,10 +23,11 @@ package secrets
|
|||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
@ -54,7 +55,7 @@ func TestSecrets(t *testing.T) {
|
|||
func DoTestSecrets(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
|
||||
// Make a secret object.
|
||||
s := v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "secret",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
|
@ -70,7 +71,7 @@ func DoTestSecrets(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
|
|||
|
||||
// Template for pods that use a secret.
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "XXX",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
|
|
31
vendor/k8s.io/kubernetes/test/integration/serviceaccount/service_account_test.go
generated
vendored
31
vendor/k8s.io/kubernetes/test/integration/serviceaccount/service_account_test.go
generated
vendored
|
@ -38,13 +38,14 @@ import (
|
|||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
"k8s.io/apiserver/pkg/authentication/request/union"
|
||||
serviceaccountapiserver "k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
|
@ -68,7 +69,7 @@ func TestServiceAccountAutoCreate(t *testing.T) {
|
|||
ns := "test-service-account-creation"
|
||||
|
||||
// Create namespace
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: ns}})
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
@ -103,13 +104,13 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
|||
name := "my-service-account"
|
||||
|
||||
// Create namespace
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: ns}})
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Create service account
|
||||
serviceAccount, err := c.Core().ServiceAccounts(ns).Create(&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Name: name}})
|
||||
serviceAccount, err := c.Core().ServiceAccounts(ns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: name}})
|
||||
if err != nil {
|
||||
t.Fatalf("Service Account not created: %v", err)
|
||||
}
|
||||
|
@ -171,7 +172,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
|||
tokensToCleanup := sets.NewString(token1Name, token2Name, token3Name)
|
||||
err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) {
|
||||
// Get all secrets in the namespace
|
||||
secrets, err := c.Core().Secrets(ns).List(v1.ListOptions{})
|
||||
secrets, err := c.Core().Secrets(ns).List(metav1.ListOptions{})
|
||||
// Retrieval errors should fail
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -197,7 +198,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
|||
ns := "auto-mount-ns"
|
||||
|
||||
// Create "my" namespace
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: ns}})
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
@ -210,7 +211,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
|
|||
|
||||
// Pod to create
|
||||
protoPod := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "protopod"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "protopod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
|
@ -277,19 +278,19 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
|
|||
otherns := "other-ns"
|
||||
|
||||
// Create "my" namespace
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: myns}})
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Create "other" namespace
|
||||
_, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: otherns}})
|
||||
_, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Create "ro" user in myns
|
||||
_, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Name: readOnlyServiceAccountName}})
|
||||
_, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}})
|
||||
if err != nil {
|
||||
t.Fatalf("Service Account not created: %v", err)
|
||||
}
|
||||
|
@ -309,7 +310,7 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
|
|||
doServiceAccountAPIRequests(t, roClient, myns, false, false, false)
|
||||
|
||||
// Create "rw" user in myns
|
||||
_, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Name: readWriteServiceAccountName}})
|
||||
_, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}})
|
||||
if err != nil {
|
||||
t.Fatalf("Service Account not created: %v", err)
|
||||
}
|
||||
|
@ -385,7 +386,7 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
|
|||
}
|
||||
|
||||
// If the user is a service account...
|
||||
if serviceAccountNamespace, serviceAccountName, err := serviceaccount.SplitUsername(username); err == nil {
|
||||
if serviceAccountNamespace, serviceAccountName, err := serviceaccountapiserver.SplitUsername(username); err == nil {
|
||||
// Limit them to their own namespace
|
||||
if serviceAccountNamespace == ns {
|
||||
switch serviceAccountName {
|
||||
|
@ -512,17 +513,17 @@ type testOperation func() error
|
|||
|
||||
func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string, authenticated bool, canRead bool, canWrite bool) {
|
||||
testSecret := &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "testSecret"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testSecret"},
|
||||
Data: map[string][]byte{"test": []byte("data")},
|
||||
}
|
||||
|
||||
readOps := []testOperation{
|
||||
func() error {
|
||||
_, err := c.Core().Secrets(ns).List(v1.ListOptions{})
|
||||
_, err := c.Core().Secrets(ns).List(metav1.ListOptions{})
|
||||
return err
|
||||
},
|
||||
func() error {
|
||||
_, err := c.Core().Pods(ns).List(v1.ListOptions{})
|
||||
_, err := c.Core().Pods(ns).List(metav1.ListOptions{})
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
|
6
vendor/k8s.io/kubernetes/test/integration/storageclasses/storage_classes_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/storageclasses/storage_classes_test.go
generated
vendored
|
@ -24,13 +24,13 @@ import (
|
|||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
|
@ -56,7 +56,7 @@ func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *v1.Names
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "gold",
|
||||
},
|
||||
Provisioner: provisionerPluginName,
|
||||
|
@ -69,7 +69,7 @@ func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *v1.Names
|
|||
|
||||
// Template for pvcs that specify a storage class
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "XXX",
|
||||
Namespace: ns.Name,
|
||||
Annotations: map[string]string{
|
||||
|
|
17
vendor/k8s.io/kubernetes/test/integration/thirdparty/thirdparty_test.go
generated
vendored
17
vendor/k8s.io/kubernetes/test/integration/thirdparty/thirdparty_test.go
generated
vendored
|
@ -33,11 +33,10 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
|
@ -54,7 +53,7 @@ func TestThirdPartyDiscovery(t *testing.T) {
|
|||
once := sync.Once{}
|
||||
deleteFoo := installThirdParty(t, client, clientConfig,
|
||||
&extensions.ThirdPartyResource{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo.company.com"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo.company.com"},
|
||||
Versions: []extensions.APIVersion{{Name: version}},
|
||||
}, group, version, "foos",
|
||||
)
|
||||
|
@ -117,8 +116,8 @@ func TestThirdPartyMultiple(t *testing.T) {
|
|||
var versionsToTest = []string{"v1"}
|
||||
|
||||
type Foo struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
v1.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"`
|
||||
|
||||
SomeField string `json:"someField"`
|
||||
OtherField int `json:"otherField"`
|
||||
|
@ -184,7 +183,7 @@ func DoTestInstallMultipleAPIs(t *testing.T, client clientset.Interface, clientC
|
|||
|
||||
deleteFoo := installThirdParty(t, client, clientConfig,
|
||||
&extensions.ThirdPartyResource{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo.company.com"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo.company.com"},
|
||||
Versions: []extensions.APIVersion{{Name: version}},
|
||||
}, group, version, "foos",
|
||||
)
|
||||
|
@ -193,7 +192,7 @@ func DoTestInstallMultipleAPIs(t *testing.T, client clientset.Interface, clientC
|
|||
// TODO make multiple resources in one version work
|
||||
// deleteBar = installThirdParty(t, client, clientConfig,
|
||||
// &extensions.ThirdPartyResource{
|
||||
// ObjectMeta: v1.ObjectMeta{Name: "bar.company.com"},
|
||||
// ObjectMeta: metav1.ObjectMeta{Name: "bar.company.com"},
|
||||
// Versions: []extensions.APIVersion{{Name: version}},
|
||||
// }, group, version, "bars",
|
||||
// )
|
||||
|
@ -211,7 +210,7 @@ func testInstallThirdPartyAPIDeleteVersion(t *testing.T, client clientset.Interf
|
|||
|
||||
deleteFoo := installThirdParty(t, client, clientConfig,
|
||||
&extensions.ThirdPartyResource{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo.company.com"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo.company.com"},
|
||||
Versions: []extensions.APIVersion{{Name: version}},
|
||||
}, group, version, "foos",
|
||||
)
|
||||
|
@ -226,7 +225,7 @@ func testInstallThirdPartyAPIDeleteVersion(t *testing.T, client clientset.Interf
|
|||
}
|
||||
|
||||
expectedObj := Foo{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
},
|
||||
|
|
7
vendor/k8s.io/kubernetes/test/integration/volume/attach_detach_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/integration/volume/attach_detach_test.go
generated
vendored
|
@ -23,12 +23,13 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach"
|
||||
|
@ -40,7 +41,7 @@ import (
|
|||
|
||||
func fakePodWithVol(namespace string) *v1.Pod {
|
||||
fakePod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: "fakepod",
|
||||
},
|
||||
|
@ -82,7 +83,7 @@ func TestPodDeletionWithDswp(t *testing.T) {
|
|||
namespaceName := "test-pod-deletion"
|
||||
|
||||
node := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-sandbox",
|
||||
Annotations: map[string]string{
|
||||
volumehelper.ControllerManagedAttachAnnotation: "true",
|
||||
|
|
42
vendor/k8s.io/kubernetes/test/integration/volume/persistent_volumes_test.go
generated
vendored
42
vendor/k8s.io/kubernetes/test/integration/volume/persistent_volumes_test.go
generated
vendored
|
@ -29,13 +29,13 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
@ -116,7 +116,7 @@ func TestPersistentVolumeRecycler(t *testing.T) {
|
|||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go ctrl.Run(stopCh)
|
||||
|
@ -170,7 +170,7 @@ func TestPersistentVolumeDeleter(t *testing.T) {
|
|||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go ctrl.Run(stopCh)
|
||||
|
@ -229,7 +229,7 @@ func TestPersistentVolumeBindRace(t *testing.T) {
|
|||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go ctrl.Run(stopCh)
|
||||
|
@ -244,7 +244,7 @@ func TestPersistentVolumeBindRace(t *testing.T) {
|
|||
counter += 1
|
||||
clone, _ := api.Scheme.DeepCopy(pvc)
|
||||
newPvc, _ := clone.(*v1.PersistentVolumeClaim)
|
||||
newPvc.ObjectMeta = v1.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)}
|
||||
newPvc.ObjectMeta = metav1.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)}
|
||||
claim, err := testClient.PersistentVolumeClaims(ns.Name).Create(newPvc)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating newPvc: %v", err)
|
||||
|
@ -300,7 +300,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
|
|||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go controller.Run(stopCh)
|
||||
|
@ -380,7 +380,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
|
|||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go controller.Run(stopCh)
|
||||
|
@ -479,7 +479,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
|
|||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go controller.Run(stopCh)
|
||||
|
@ -568,7 +568,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
|||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
controllerStopCh := make(chan struct{})
|
||||
go binder.Run(controllerStopCh)
|
||||
|
@ -637,7 +637,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
|||
// Modify PVC
|
||||
i := rand.Intn(objCount)
|
||||
name := "pvc-" + strconv.Itoa(i)
|
||||
pvc, err := testClient.PersistentVolumeClaims(v1.NamespaceDefault).Get(name, metav1.GetOptions{})
|
||||
pvc, err := testClient.PersistentVolumeClaims(metav1.NamespaceDefault).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// Silently ignore error, the PVC may have be already
|
||||
// deleted or not exists yet.
|
||||
|
@ -649,7 +649,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
|||
} else {
|
||||
pvc.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int())
|
||||
}
|
||||
_, err = testClient.PersistentVolumeClaims(v1.NamespaceDefault).Update(pvc)
|
||||
_, err = testClient.PersistentVolumeClaims(metav1.NamespaceDefault).Update(pvc)
|
||||
if err != nil {
|
||||
// Silently ignore error, the PVC may have been updated by
|
||||
// the controller.
|
||||
|
@ -856,14 +856,14 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
|||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes and StorageClasses).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer testClient.Storage().StorageClasses().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
defer testClient.Storage().StorageClasses().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
storageClass := storage.StorageClass{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "gold",
|
||||
},
|
||||
Provisioner: provisionerPluginName,
|
||||
|
@ -901,7 +901,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
|||
glog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: claims are bound")
|
||||
|
||||
// check that we have enough bound PVs
|
||||
pvList, err := testClient.PersistentVolumes().List(v1.ListOptions{})
|
||||
pvList, err := testClient.PersistentVolumes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list volumes: %s", err)
|
||||
}
|
||||
|
@ -924,7 +924,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
|||
// Wait for the PVs to get deleted by listing remaining volumes
|
||||
// (delete events were unreliable)
|
||||
for {
|
||||
volumes, err := testClient.PersistentVolumes().List(v1.ListOptions{})
|
||||
volumes, err := testClient.PersistentVolumes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list volumes: %v", err)
|
||||
}
|
||||
|
@ -953,7 +953,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
|||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go controller.Run(stopCh)
|
||||
|
@ -1128,11 +1128,11 @@ func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPerio
|
|||
EnableDynamicProvisioning: true,
|
||||
})
|
||||
|
||||
watchPV, err := testClient.PersistentVolumes().Watch(v1.ListOptions{})
|
||||
watchPV, err := testClient.PersistentVolumes().Watch(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to watch PersistentVolumes: %v", err)
|
||||
}
|
||||
watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(v1.ListOptions{})
|
||||
watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to watch PersistentVolumeClaims: %v", err)
|
||||
}
|
||||
|
@ -1142,7 +1142,7 @@ func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPerio
|
|||
|
||||
func createPV(name, path, cap string, mode []v1.PersistentVolumeAccessMode, reclaim v1.PersistentVolumeReclaimPolicy) *v1.PersistentVolume {
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: v1.ObjectMeta{Name: name},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: path}},
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse(cap)},
|
||||
|
@ -1154,7 +1154,7 @@ func createPV(name, path, cap string, mode []v1.PersistentVolumeAccessMode, recl
|
|||
|
||||
func createPVC(name, namespace, cap string, mode []v1.PersistentVolumeAccessMode) *v1.PersistentVolumeClaim {
|
||||
return &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue