Switch to github.com/golang/dep for vendoring
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
parent
d6ab91be27
commit
8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions
47
vendor/k8s.io/kubernetes/test/integration/BUILD
generated
vendored
Normal file
47
vendor/k8s.io/kubernetes/test/integration/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"utils.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/integration/discoverysummarizer:all-srcs",
|
||||
"//test/integration/examples:all-srcs",
|
||||
"//test/integration/federation:all-srcs",
|
||||
"//test/integration/framework:all-srcs",
|
||||
"//test/integration/metrics:all-srcs",
|
||||
"//test/integration/objectmeta:all-srcs",
|
||||
"//test/integration/openshift:all-srcs",
|
||||
"//test/integration/scheduler_perf:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
347
vendor/k8s.io/kubernetes/test/integration/auth/accessreview_test.go
generated
vendored
Normal file
347
vendor/k8s.io/kubernetes/test/integration/auth/accessreview_test.go
generated
vendored
Normal file
|
@ -0,0 +1,347 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
authorizationapi "k8s.io/kubernetes/pkg/apis/authorization"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// Inject into master an authorizer that uses user info.
|
||||
// TODO(etune): remove this test once a more comprehensive built-in authorizer is implemented.
|
||||
type sarAuthorizer struct{}
|
||||
|
||||
func (sarAuthorizer) Authorize(a authorizer.Attributes) (bool, string, error) {
|
||||
if a.GetUser().GetName() == "dave" {
|
||||
return false, "no", errors.New("I'm sorry, Dave")
|
||||
}
|
||||
|
||||
return true, "you're not dave", nil
|
||||
}
|
||||
|
||||
func alwaysAlice(req *http.Request) (user.Info, bool, error) {
|
||||
return &user.DefaultInfo{
|
||||
Name: "alice",
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
func TestSubjectAccessReview(t *testing.T) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(alwaysAlice)
|
||||
masterConfig.GenericConfig.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s := framework.RunAMaster(masterConfig)
|
||||
defer s.Close()
|
||||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
sar *authorizationapi.SubjectAccessReview
|
||||
expectedError string
|
||||
expectedStatus authorizationapi.SubjectAccessReviewStatus
|
||||
}{
|
||||
{
|
||||
name: "simple allow",
|
||||
sar: &authorizationapi.SubjectAccessReview{
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
},
|
||||
User: "alice",
|
||||
},
|
||||
},
|
||||
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
|
||||
Allowed: true,
|
||||
Reason: "you're not dave",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple deny",
|
||||
sar: &authorizationapi.SubjectAccessReview{
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
},
|
||||
User: "dave",
|
||||
},
|
||||
},
|
||||
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
|
||||
Allowed: false,
|
||||
Reason: "no",
|
||||
EvaluationError: "I'm sorry, Dave",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple error",
|
||||
sar: &authorizationapi.SubjectAccessReview{
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: "at least one of user or group must be specified",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
response, err := clientset.Authorization().SubjectAccessReviews().Create(test.sar)
|
||||
switch {
|
||||
case err == nil && len(test.expectedError) == 0:
|
||||
|
||||
case err != nil && strings.Contains(err.Error(), test.expectedError):
|
||||
continue
|
||||
|
||||
case err != nil && len(test.expectedError) != 0:
|
||||
t.Errorf("%s: unexpected error: %v", test.name, err)
|
||||
continue
|
||||
default:
|
||||
t.Errorf("%s: expected %v, got %v", test.name, test.expectedError, err)
|
||||
continue
|
||||
}
|
||||
if response.Status != test.expectedStatus {
|
||||
t.Errorf("%s: expected %v, got %v", test.name, test.expectedStatus, response.Status)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelfSubjectAccessReview(t *testing.T) {
|
||||
username := "alice"
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) {
|
||||
return &user.DefaultInfo{Name: username}, true, nil
|
||||
})
|
||||
masterConfig.GenericConfig.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s := framework.RunAMaster(masterConfig)
|
||||
defer s.Close()
|
||||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
username string
|
||||
sar *authorizationapi.SelfSubjectAccessReview
|
||||
expectedError string
|
||||
expectedStatus authorizationapi.SubjectAccessReviewStatus
|
||||
}{
|
||||
{
|
||||
name: "simple allow",
|
||||
username: "alice",
|
||||
sar: &authorizationapi.SelfSubjectAccessReview{
|
||||
Spec: authorizationapi.SelfSubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
|
||||
Allowed: true,
|
||||
Reason: "you're not dave",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple deny",
|
||||
username: "dave",
|
||||
sar: &authorizationapi.SelfSubjectAccessReview{
|
||||
Spec: authorizationapi.SelfSubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
|
||||
Allowed: false,
|
||||
Reason: "no",
|
||||
EvaluationError: "I'm sorry, Dave",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
username = test.username
|
||||
|
||||
response, err := clientset.Authorization().SelfSubjectAccessReviews().Create(test.sar)
|
||||
switch {
|
||||
case err == nil && len(test.expectedError) == 0:
|
||||
|
||||
case err != nil && strings.Contains(err.Error(), test.expectedError):
|
||||
continue
|
||||
|
||||
case err != nil && len(test.expectedError) != 0:
|
||||
t.Errorf("%s: unexpected error: %v", test.name, err)
|
||||
continue
|
||||
default:
|
||||
t.Errorf("%s: expected %v, got %v", test.name, test.expectedError, err)
|
||||
continue
|
||||
}
|
||||
if response.Status != test.expectedStatus {
|
||||
t.Errorf("%s: expected %v, got %v", test.name, test.expectedStatus, response.Status)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalSubjectAccessReview(t *testing.T) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authenticator = authenticator.RequestFunc(alwaysAlice)
|
||||
masterConfig.GenericConfig.Authorizer = sarAuthorizer{}
|
||||
masterConfig.GenericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
_, s := framework.RunAMaster(masterConfig)
|
||||
defer s.Close()
|
||||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
sar *authorizationapi.LocalSubjectAccessReview
|
||||
expectedError string
|
||||
expectedStatus authorizationapi.SubjectAccessReviewStatus
|
||||
}{
|
||||
{
|
||||
name: "simple allow",
|
||||
namespace: "foo",
|
||||
sar: &authorizationapi.LocalSubjectAccessReview{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "foo"},
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
Namespace: "foo",
|
||||
},
|
||||
User: "alice",
|
||||
},
|
||||
},
|
||||
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
|
||||
Allowed: true,
|
||||
Reason: "you're not dave",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple deny",
|
||||
namespace: "foo",
|
||||
sar: &authorizationapi.LocalSubjectAccessReview{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "foo"},
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
Namespace: "foo",
|
||||
},
|
||||
User: "dave",
|
||||
},
|
||||
},
|
||||
expectedStatus: authorizationapi.SubjectAccessReviewStatus{
|
||||
Allowed: false,
|
||||
Reason: "no",
|
||||
EvaluationError: "I'm sorry, Dave",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "conflicting namespace",
|
||||
namespace: "foo",
|
||||
sar: &authorizationapi.LocalSubjectAccessReview{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "foo"},
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
Namespace: "bar",
|
||||
},
|
||||
User: "dave",
|
||||
},
|
||||
},
|
||||
expectedError: "must match metadata.namespace",
|
||||
},
|
||||
{
|
||||
name: "missing namespace",
|
||||
namespace: "foo",
|
||||
sar: &authorizationapi.LocalSubjectAccessReview{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "foo"},
|
||||
Spec: authorizationapi.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationapi.ResourceAttributes{
|
||||
Verb: "list",
|
||||
Group: api.GroupName,
|
||||
Version: "v1",
|
||||
Resource: "pods",
|
||||
},
|
||||
User: "dave",
|
||||
},
|
||||
},
|
||||
expectedError: "must match metadata.namespace",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
response, err := clientset.Authorization().LocalSubjectAccessReviews(test.namespace).Create(test.sar)
|
||||
switch {
|
||||
case err == nil && len(test.expectedError) == 0:
|
||||
|
||||
case err != nil && strings.Contains(err.Error(), test.expectedError):
|
||||
continue
|
||||
|
||||
case err != nil && len(test.expectedError) != 0:
|
||||
t.Errorf("%s: unexpected error: %v", test.name, err)
|
||||
continue
|
||||
default:
|
||||
t.Errorf("%s: expected %v, got %v", test.name, test.expectedError, err)
|
||||
continue
|
||||
}
|
||||
if response.Status != test.expectedStatus {
|
||||
t.Errorf("%s: expected %#v, got %#v", test.name, test.expectedStatus, response.Status)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
1287
vendor/k8s.io/kubernetes/test/integration/auth/auth_test.go
generated
vendored
Normal file
1287
vendor/k8s.io/kubernetes/test/integration/auth/auth_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
545
vendor/k8s.io/kubernetes/test/integration/auth/rbac_test.go
generated
vendored
Normal file
545
vendor/k8s.io/kubernetes/test/integration/auth/rbac_test.go
generated
vendored
Normal file
|
@ -0,0 +1,545 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
rbacapi "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/transport"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/pkg/registry/generic"
|
||||
"k8s.io/kubernetes/pkg/registry/rbac/clusterrole"
|
||||
clusterrolestore "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding"
|
||||
clusterrolebindingstore "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/rbac/role"
|
||||
rolestore "k8s.io/kubernetes/pkg/registry/rbac/role/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/rbac/rolebinding"
|
||||
rolebindingstore "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage"
|
||||
"k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/anytoken"
|
||||
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func newFakeAuthenticator() authenticator.Request {
|
||||
return bearertoken.New(anytoken.AnyTokenAuthenticator{})
|
||||
}
|
||||
|
||||
func clientForUser(user string) *http.Client {
|
||||
return &http.Client{
|
||||
Transport: transport.NewBearerAuthRoundTripper(
|
||||
user,
|
||||
transport.DebugWrappers(http.DefaultTransport),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func clientsetForUser(user string, config *restclient.Config) clientset.Interface {
|
||||
configCopy := *config
|
||||
configCopy.BearerToken = user
|
||||
return clientset.NewForConfigOrDie(&configCopy)
|
||||
}
|
||||
|
||||
type testRESTOptionsGetter struct {
|
||||
config *master.Config
|
||||
}
|
||||
|
||||
func (getter *testRESTOptionsGetter) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) {
|
||||
storageConfig, err := getter.config.StorageFactory.NewConfig(resource)
|
||||
if err != nil {
|
||||
return generic.RESTOptions{}, fmt.Errorf("failed to get storage: %v", err)
|
||||
}
|
||||
return generic.RESTOptions{StorageConfig: storageConfig, Decorator: generic.UndecoratedStorage, ResourcePrefix: resource.Resource}, nil
|
||||
}
|
||||
|
||||
func newRBACAuthorizer(config *master.Config) authorizer.Authorizer {
|
||||
optsGetter := &testRESTOptionsGetter{config}
|
||||
roleRegistry := role.AuthorizerAdapter{Registry: role.NewRegistry(rolestore.NewREST(optsGetter))}
|
||||
roleBindingRegistry := rolebinding.AuthorizerAdapter{Registry: rolebinding.NewRegistry(rolebindingstore.NewREST(optsGetter))}
|
||||
clusterRoleRegistry := clusterrole.AuthorizerAdapter{Registry: clusterrole.NewRegistry(clusterrolestore.NewREST(optsGetter))}
|
||||
clusterRoleBindingRegistry := clusterrolebinding.AuthorizerAdapter{Registry: clusterrolebinding.NewRegistry(clusterrolebindingstore.NewREST(optsGetter))}
|
||||
return rbac.New(roleRegistry, roleBindingRegistry, clusterRoleRegistry, clusterRoleBindingRegistry)
|
||||
}
|
||||
|
||||
// bootstrapRoles are a set of RBAC roles which will be populated before the test.
|
||||
type bootstrapRoles struct {
|
||||
roles []rbacapi.Role
|
||||
roleBindings []rbacapi.RoleBinding
|
||||
clusterRoles []rbacapi.ClusterRole
|
||||
clusterRoleBindings []rbacapi.ClusterRoleBinding
|
||||
}
|
||||
|
||||
// bootstrap uses the provided client to create the bootstrap roles and role bindings.
|
||||
//
|
||||
// client should be authenticated as the RBAC super user.
|
||||
func (b bootstrapRoles) bootstrap(client clientset.Interface) error {
|
||||
for _, r := range b.clusterRoles {
|
||||
_, err := client.Rbac().ClusterRoles().Create(&r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make request: %v", err)
|
||||
}
|
||||
}
|
||||
for _, r := range b.roles {
|
||||
_, err := client.Rbac().Roles(r.Namespace).Create(&r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make request: %v", err)
|
||||
}
|
||||
}
|
||||
for _, r := range b.clusterRoleBindings {
|
||||
_, err := client.Rbac().ClusterRoleBindings().Create(&r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make request: %v", err)
|
||||
}
|
||||
}
|
||||
for _, r := range b.roleBindings {
|
||||
_, err := client.Rbac().RoleBindings(r.Namespace).Create(&r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make request: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// request is a test case which can.
|
||||
type request struct {
|
||||
// The username attempting to send the request.
|
||||
user string
|
||||
|
||||
// Resource metadata
|
||||
verb string
|
||||
apiGroup string
|
||||
resource string
|
||||
namespace string
|
||||
name string
|
||||
|
||||
// The actual resource.
|
||||
body string
|
||||
|
||||
// The expected return status of this request.
|
||||
expectedStatus int
|
||||
}
|
||||
|
||||
func (r request) String() string {
|
||||
return fmt.Sprintf("%s %s %s", r.user, r.verb, r.resource)
|
||||
}
|
||||
|
||||
type statusCode int
|
||||
|
||||
func (s statusCode) String() string {
|
||||
return fmt.Sprintf("%d %s", int(s), http.StatusText(int(s)))
|
||||
}
|
||||
|
||||
// Declare a set of raw objects to use.
|
||||
var (
|
||||
writeJobsRoleBinding = `
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1alpha1",
|
||||
"kind": "RoleBinding",
|
||||
"metadata": {
|
||||
"name": "pi"%s
|
||||
},
|
||||
"roleRef": {
|
||||
"apiGroup": "rbac.authorization.k8s.io",
|
||||
"kind": "ClusterRole",
|
||||
"name": "write-jobs"
|
||||
},
|
||||
"subjects": [{
|
||||
"apiVersion": "rbac/v1alpha1",
|
||||
"kind": "User",
|
||||
"name": "admin"
|
||||
}]
|
||||
}`
|
||||
|
||||
aJob = `
|
||||
{
|
||||
"apiVersion": "batch/v1",
|
||||
"kind": "Job",
|
||||
"metadata": {
|
||||
"name": "pi"%s
|
||||
},
|
||||
"spec": {
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "a",
|
||||
"labels": {
|
||||
"name": "pijob"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"name": "pi",
|
||||
"image": "perl",
|
||||
"command": [
|
||||
"perl",
|
||||
"-Mbignum=bpi",
|
||||
"-wle",
|
||||
"print bpi(2000)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"restartPolicy": "Never"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
podNamespace = `
|
||||
{
|
||||
"apiVersion": "` + api.Registry.GroupOrDie(api.GroupName).GroupVersion.String() + `",
|
||||
"kind": "Namespace",
|
||||
"metadata": {
|
||||
"name": "pod-namespace"%s
|
||||
}
|
||||
}
|
||||
`
|
||||
jobNamespace = `
|
||||
{
|
||||
"apiVersion": "` + api.Registry.GroupOrDie(api.GroupName).GroupVersion.String() + `",
|
||||
"kind": "Namespace",
|
||||
"metadata": {
|
||||
"name": "job-namespace"%s
|
||||
}
|
||||
}
|
||||
`
|
||||
forbiddenNamespace = `
|
||||
{
|
||||
"apiVersion": "` + api.Registry.GroupOrDie(api.GroupName).GroupVersion.String() + `",
|
||||
"kind": "Namespace",
|
||||
"metadata": {
|
||||
"name": "forbidden-namespace"%s
|
||||
}
|
||||
}
|
||||
`
|
||||
)
|
||||
|
||||
// Declare some PolicyRules beforehand.
|
||||
var (
|
||||
ruleAllowAll = rbacapi.NewRule("*").Groups("*").Resources("*").RuleOrDie()
|
||||
ruleReadPods = rbacapi.NewRule("list", "get", "watch").Groups("").Resources("pods").RuleOrDie()
|
||||
ruleWriteJobs = rbacapi.NewRule("*").Groups("batch").Resources("*").RuleOrDie()
|
||||
)
|
||||
|
||||
func TestRBAC(t *testing.T) {
|
||||
superUser := "admin/system:masters"
|
||||
|
||||
tests := []struct {
|
||||
bootstrapRoles bootstrapRoles
|
||||
|
||||
requests []request
|
||||
}{
|
||||
{
|
||||
bootstrapRoles: bootstrapRoles{
|
||||
clusterRoles: []rbacapi.ClusterRole{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "allow-all"},
|
||||
Rules: []rbacapi.PolicyRule{ruleAllowAll},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "read-pods"},
|
||||
Rules: []rbacapi.PolicyRule{ruleReadPods},
|
||||
},
|
||||
},
|
||||
clusterRoleBindings: []rbacapi.ClusterRoleBinding{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "read-pods"},
|
||||
Subjects: []rbacapi.Subject{
|
||||
{Kind: "User", Name: "pod-reader"},
|
||||
},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "read-pods"},
|
||||
},
|
||||
},
|
||||
},
|
||||
requests: []request{
|
||||
// Create the namespace used later in the test
|
||||
{superUser, "POST", "", "namespaces", "", "", podNamespace, http.StatusCreated},
|
||||
|
||||
{superUser, "GET", "", "pods", "", "", "", http.StatusOK},
|
||||
{superUser, "GET", "", "pods", "pod-namespace", "a", "", http.StatusNotFound},
|
||||
{superUser, "POST", "", "pods", "pod-namespace", "", aPod, http.StatusCreated},
|
||||
{superUser, "GET", "", "pods", "pod-namespace", "a", "", http.StatusOK},
|
||||
|
||||
{"bob", "GET", "", "pods", "", "", "", http.StatusForbidden},
|
||||
{"bob", "GET", "", "pods", "pod-namespace", "a", "", http.StatusForbidden},
|
||||
|
||||
{"pod-reader", "GET", "", "pods", "", "", "", http.StatusOK},
|
||||
{"pod-reader", "POST", "", "pods", "pod-namespace", "", aPod, http.StatusForbidden},
|
||||
},
|
||||
},
|
||||
{
|
||||
bootstrapRoles: bootstrapRoles{
|
||||
clusterRoles: []rbacapi.ClusterRole{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "write-jobs"},
|
||||
Rules: []rbacapi.PolicyRule{ruleWriteJobs},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "create-rolebindings"},
|
||||
Rules: []rbacapi.PolicyRule{
|
||||
rbacapi.NewRule("create").Groups("rbac.authorization.k8s.io").Resources("rolebindings").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "bind-any-clusterrole"},
|
||||
Rules: []rbacapi.PolicyRule{
|
||||
rbacapi.NewRule("bind").Groups("rbac.authorization.k8s.io").Resources("clusterroles").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
},
|
||||
clusterRoleBindings: []rbacapi.ClusterRoleBinding{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "write-jobs"},
|
||||
Subjects: []rbacapi.Subject{{Kind: "User", Name: "job-writer"}},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "write-jobs"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "create-rolebindings"},
|
||||
Subjects: []rbacapi.Subject{
|
||||
{Kind: "User", Name: "job-writer"},
|
||||
{Kind: "User", Name: "nonescalating-rolebinding-writer"},
|
||||
{Kind: "User", Name: "any-rolebinding-writer"},
|
||||
},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "create-rolebindings"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "bind-any-clusterrole"},
|
||||
Subjects: []rbacapi.Subject{{Kind: "User", Name: "any-rolebinding-writer"}},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "bind-any-clusterrole"},
|
||||
},
|
||||
},
|
||||
roleBindings: []rbacapi.RoleBinding{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "write-jobs", Namespace: "job-namespace"},
|
||||
Subjects: []rbacapi.Subject{{Kind: "User", Name: "job-writer-namespace"}},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "write-jobs"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "create-rolebindings", Namespace: "job-namespace"},
|
||||
Subjects: []rbacapi.Subject{{Kind: "User", Name: "job-writer-namespace"}},
|
||||
RoleRef: rbacapi.RoleRef{Kind: "ClusterRole", Name: "create-rolebindings"},
|
||||
},
|
||||
},
|
||||
},
|
||||
requests: []request{
|
||||
// Create the namespace used later in the test
|
||||
{superUser, "POST", "", "namespaces", "", "", jobNamespace, http.StatusCreated},
|
||||
{superUser, "POST", "", "namespaces", "", "", forbiddenNamespace, http.StatusCreated},
|
||||
|
||||
{"user-with-no-permissions", "POST", "batch", "jobs", "job-namespace", "", aJob, http.StatusForbidden},
|
||||
{"user-with-no-permissions", "GET", "batch", "jobs", "job-namespace", "pi", "", http.StatusForbidden},
|
||||
|
||||
// job-writer-namespace cannot write to the "forbidden-namespace"
|
||||
{"job-writer-namespace", "GET", "batch", "jobs", "forbidden-namespace", "", "", http.StatusForbidden},
|
||||
{"job-writer-namespace", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusForbidden},
|
||||
{"job-writer-namespace", "POST", "batch", "jobs", "forbidden-namespace", "", aJob, http.StatusForbidden},
|
||||
{"job-writer-namespace", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusForbidden},
|
||||
|
||||
// job-writer can write to any namespace
|
||||
{"job-writer", "GET", "batch", "jobs", "forbidden-namespace", "", "", http.StatusOK},
|
||||
{"job-writer", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusNotFound},
|
||||
{"job-writer", "POST", "batch", "jobs", "forbidden-namespace", "", aJob, http.StatusCreated},
|
||||
{"job-writer", "GET", "batch", "jobs", "forbidden-namespace", "pi", "", http.StatusOK},
|
||||
|
||||
{"job-writer-namespace", "GET", "batch", "jobs", "job-namespace", "", "", http.StatusOK},
|
||||
{"job-writer-namespace", "GET", "batch", "jobs", "job-namespace", "pi", "", http.StatusNotFound},
|
||||
{"job-writer-namespace", "POST", "batch", "jobs", "job-namespace", "", aJob, http.StatusCreated},
|
||||
{"job-writer-namespace", "GET", "batch", "jobs", "job-namespace", "pi", "", http.StatusOK},
|
||||
|
||||
// cannot bind role anywhere
|
||||
{"user-with-no-permissions", "POST", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "", writeJobsRoleBinding, http.StatusForbidden},
|
||||
// can only bind role in namespace where they have covering permissions
|
||||
{"job-writer-namespace", "POST", "rbac.authorization.k8s.io", "rolebindings", "forbidden-namespace", "", writeJobsRoleBinding, http.StatusForbidden},
|
||||
{"job-writer-namespace", "POST", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "", writeJobsRoleBinding, http.StatusCreated},
|
||||
{superUser, "DELETE", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "pi", "", http.StatusOK},
|
||||
// can bind role in any namespace where they have covering permissions
|
||||
{"job-writer", "POST", "rbac.authorization.k8s.io", "rolebindings", "forbidden-namespace", "", writeJobsRoleBinding, http.StatusCreated},
|
||||
{superUser, "DELETE", "rbac.authorization.k8s.io", "rolebindings", "forbidden-namespace", "pi", "", http.StatusOK},
|
||||
// cannot bind role because they don't have covering permissions
|
||||
{"nonescalating-rolebinding-writer", "POST", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "", writeJobsRoleBinding, http.StatusForbidden},
|
||||
// can bind role because they have explicit bind permission
|
||||
{"any-rolebinding-writer", "POST", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "", writeJobsRoleBinding, http.StatusCreated},
|
||||
{superUser, "DELETE", "rbac.authorization.k8s.io", "rolebindings", "job-namespace", "pi", "", http.StatusOK},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
// Create an API Server.
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authorizer = newRBACAuthorizer(masterConfig)
|
||||
masterConfig.GenericConfig.Authenticator = newFakeAuthenticator()
|
||||
_, s := framework.RunAMaster(masterConfig)
|
||||
defer s.Close()
|
||||
|
||||
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
|
||||
|
||||
// Bootstrap the API Server with the test case's initial roles.
|
||||
if err := tc.bootstrapRoles.bootstrap(clientsetForUser(superUser, clientConfig)); err != nil {
|
||||
t.Errorf("case %d: failed to apply initial roles: %v", i, err)
|
||||
continue
|
||||
}
|
||||
previousResourceVersion := make(map[string]float64)
|
||||
|
||||
for j, r := range tc.requests {
|
||||
testGroup, ok := testapi.Groups[r.apiGroup]
|
||||
if !ok {
|
||||
t.Errorf("case %d %d: unknown api group %q, %s", i, j, r.apiGroup, r)
|
||||
continue
|
||||
}
|
||||
path := testGroup.ResourcePath(r.resource, r.namespace, r.name)
|
||||
|
||||
var body io.Reader
|
||||
if r.body != "" {
|
||||
sub := ""
|
||||
if r.verb == "PUT" {
|
||||
// For update operations, insert previous resource version
|
||||
if resVersion := previousResourceVersion[getPreviousResourceVersionKey(path, "")]; resVersion != 0 {
|
||||
sub += fmt.Sprintf(",\"resourceVersion\": \"%v\"", resVersion)
|
||||
}
|
||||
}
|
||||
// For any creation requests, add the namespace to the object meta.
|
||||
if r.verb == "POST" || r.verb == "PUT" {
|
||||
if r.namespace != "" {
|
||||
sub += fmt.Sprintf(",\"namespace\": %q", r.namespace)
|
||||
}
|
||||
}
|
||||
body = strings.NewReader(fmt.Sprintf(r.body, sub))
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(r.verb, s.URL+path, body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
func() {
|
||||
reqDump, err := httputil.DumpRequest(req, true)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dump request: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := clientForUser(r.user).Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("case %d, req %d: failed to make request: %v", i, j, err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respDump, err := httputil.DumpResponse(resp, true)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dump response: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if resp.StatusCode != r.expectedStatus {
|
||||
// When debugging is on, dump the entire request and response. Very helpful for
|
||||
// debugging malformed test cases.
|
||||
//
|
||||
// To turn on debugging, use the '-args' flag.
|
||||
//
|
||||
// go test -v -tags integration -run RBAC -args -v 10
|
||||
//
|
||||
glog.V(8).Infof("case %d, req %d: %s\n%s\n", i, j, reqDump, respDump)
|
||||
t.Errorf("case %d, req %d: %s expected %q got %q", i, j, r, statusCode(r.expectedStatus), statusCode(resp.StatusCode))
|
||||
}
|
||||
|
||||
b, _ := ioutil.ReadAll(resp.Body)
|
||||
|
||||
if r.verb == "POST" && (resp.StatusCode/100) == 2 {
|
||||
// For successful create operations, extract resourceVersion
|
||||
id, currentResourceVersion, err := parseResourceVersion(b)
|
||||
if err == nil {
|
||||
key := getPreviousResourceVersionKey(path, id)
|
||||
previousResourceVersion[key] = currentResourceVersion
|
||||
} else {
|
||||
t.Logf("error in trying to extract resource version: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBootstrapping(t *testing.T) {
|
||||
superUser := "admin/system:masters"
|
||||
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.Authorizer = newRBACAuthorizer(masterConfig)
|
||||
masterConfig.GenericConfig.Authenticator = newFakeAuthenticator()
|
||||
_, s := framework.RunAMaster(masterConfig)
|
||||
defer s.Close()
|
||||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{BearerToken: superUser, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
|
||||
watcher, err := clientset.Rbac().ClusterRoles().Watch(api.ListOptions{ResourceVersion: "0"})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
_, err = watch.Until(30*time.Second, watcher, func(event watch.Event) (bool, error) {
|
||||
if event.Type != watch.Added {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
clusterRoles, err := clientset.Rbac().ClusterRoles().List(api.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(clusterRoles.Items) == 0 {
|
||||
t.Fatalf("missing cluster roles")
|
||||
}
|
||||
|
||||
for _, clusterRole := range clusterRoles.Items {
|
||||
if clusterRole.Name == "cluster-admin" {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t.Errorf("missing cluster-admin: %v", clusterRoles)
|
||||
|
||||
healthBytes, err := clientset.Discovery().RESTClient().Get().AbsPath("/healthz/poststarthooks/rbac/bootstrap-roles").DoRaw()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
t.Errorf("expected %v, got %v", "asdf", string(healthBytes))
|
||||
}
|
25
vendor/k8s.io/kubernetes/test/integration/benchmark-controller.json
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/test/integration/benchmark-controller.json
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"kind": "ReplicationController",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "test-controller",
|
||||
"namespace": "test",
|
||||
"labels": {"name": "test-controller"}
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 0,
|
||||
"selector": {"name": "test-pod"},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"namespace": "test",
|
||||
"labels": {"name": "test-pod"}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "test-container",
|
||||
"image": "gcr.io/google_containers/pause-amd64:3.0"
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
803
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
Normal file
803
vendor/k8s.io/kubernetes/test/integration/client/client_test.go
generated
vendored
Normal file
|
@ -0,0 +1,803 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
rt "runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("client", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
info, err := client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if e, a := version.Get(), *info; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("expected %#v, got %#v", e, a)
|
||||
}
|
||||
|
||||
pods, err := client.Core().Pods(ns.Name).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(pods.Items) != 0 {
|
||||
t.Errorf("expected no pods, got %#v", pods)
|
||||
}
|
||||
|
||||
// get a validation error
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "test",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := client.Core().Pods(ns.Name).Create(pod)
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected non-error: %v", got)
|
||||
}
|
||||
|
||||
// get a created pod
|
||||
pod.Spec.Containers[0].Image = "an-image"
|
||||
got, err = client.Core().Pods(ns.Name).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if got.Name == "" {
|
||||
t.Errorf("unexpected empty pod Name %v", got)
|
||||
}
|
||||
|
||||
// pod is shown, but not scheduled
|
||||
pods, err = client.Core().Pods(ns.Name).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(pods.Items) != 1 {
|
||||
t.Errorf("expected one pod, got %#v", pods)
|
||||
}
|
||||
actual := pods.Items[0]
|
||||
if actual.Name != got.Name {
|
||||
t.Errorf("expected pod %#v, got %#v", got, actual)
|
||||
}
|
||||
if actual.Spec.NodeName != "" {
|
||||
t.Errorf("expected pod to be unscheduled, got %#v", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAtomicPut(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("atomic-put", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rcBody := v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "atomicrc",
|
||||
Namespace: ns.Name,
|
||||
Labels: map[string]string{
|
||||
"name": "atomicrc",
|
||||
},
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(0),
|
||||
Selector: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "name", Image: "image"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
rcs := c.Core().ReplicationControllers(ns.Name)
|
||||
rc, err := rcs.Create(&rcBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating atomicRC: %v", err)
|
||||
}
|
||||
testLabels := labels.Set{
|
||||
"foo": "bar",
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
// a: z, b: y, etc...
|
||||
testLabels[string([]byte{byte('a' + i)})] = string([]byte{byte('z' - i)})
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(testLabels))
|
||||
for label, value := range testLabels {
|
||||
go func(l, v string) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
tmpRC, err := rcs.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error getting atomicRC: %v", err)
|
||||
continue
|
||||
}
|
||||
if tmpRC.Spec.Selector == nil {
|
||||
tmpRC.Spec.Selector = map[string]string{l: v}
|
||||
tmpRC.Spec.Template.Labels = map[string]string{l: v}
|
||||
} else {
|
||||
tmpRC.Spec.Selector[l] = v
|
||||
tmpRC.Spec.Template.Labels[l] = v
|
||||
}
|
||||
tmpRC, err = rcs.Update(tmpRC)
|
||||
if err != nil {
|
||||
if apierrors.IsConflict(err) {
|
||||
// This is what we expect.
|
||||
continue
|
||||
}
|
||||
t.Errorf("Unexpected error putting atomicRC: %v", err)
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
}(label, value)
|
||||
}
|
||||
wg.Wait()
|
||||
rc, err = rcs.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting atomicRC after writers are complete: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(testLabels, labels.Set(rc.Spec.Selector)) {
|
||||
t.Errorf("Selector PUTs were not atomic: wanted %v, got %v", testLabels, rc.Spec.Selector)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatch(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("patch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
name := "patchpod"
|
||||
resource := "pods"
|
||||
podBody := v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: c.Core().RESTClient().APIVersion().String(),
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "name", Image: "image"},
|
||||
},
|
||||
},
|
||||
}
|
||||
pods := c.Core().Pods(ns.Name)
|
||||
pod, err := pods.Create(&podBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating patchpods: %v", err)
|
||||
}
|
||||
|
||||
patchBodies := map[schema.GroupVersion]map[api.PatchType]struct {
|
||||
AddLabelBody []byte
|
||||
RemoveLabelBody []byte
|
||||
RemoveAllLabelsBody []byte
|
||||
}{
|
||||
v1.SchemeGroupVersion: {
|
||||
api.JSONPatchType: {
|
||||
[]byte(`[{"op":"add","path":"/metadata/labels","value":{"foo":"bar","baz":"qux"}}]`),
|
||||
[]byte(`[{"op":"remove","path":"/metadata/labels/foo"}]`),
|
||||
[]byte(`[{"op":"remove","path":"/metadata/labels"}]`),
|
||||
},
|
||||
api.MergePatchType: {
|
||||
[]byte(`{"metadata":{"labels":{"foo":"bar","baz":"qux"}}}`),
|
||||
[]byte(`{"metadata":{"labels":{"foo":null}}}`),
|
||||
[]byte(`{"metadata":{"labels":null}}`),
|
||||
},
|
||||
api.StrategicMergePatchType: {
|
||||
[]byte(`{"metadata":{"labels":{"foo":"bar","baz":"qux"}}}`),
|
||||
[]byte(`{"metadata":{"labels":{"foo":null}}}`),
|
||||
[]byte(`{"metadata":{"labels":{"$patch":"replace"}}}`),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pb := patchBodies[c.Core().RESTClient().APIVersion()]
|
||||
|
||||
execPatch := func(pt api.PatchType, body []byte) error {
|
||||
return c.Core().RESTClient().Patch(pt).
|
||||
Resource(resource).
|
||||
Namespace(ns.Name).
|
||||
Name(name).
|
||||
Body(body).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
for k, v := range pb {
|
||||
// add label
|
||||
err := execPatch(k, v.AddLabelBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err)
|
||||
}
|
||||
pod, err = pods.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting patchpod: %v", err)
|
||||
}
|
||||
if len(pod.Labels) != 2 || pod.Labels["foo"] != "bar" || pod.Labels["baz"] != "qux" {
|
||||
t.Errorf("Failed updating patchpod with patch type %s: labels are: %v", k, pod.Labels)
|
||||
}
|
||||
|
||||
// remove one label
|
||||
err = execPatch(k, v.RemoveLabelBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err)
|
||||
}
|
||||
pod, err = pods.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting patchpod: %v", err)
|
||||
}
|
||||
if len(pod.Labels) != 1 || pod.Labels["baz"] != "qux" {
|
||||
t.Errorf("Failed updating patchpod with patch type %s: labels are: %v", k, pod.Labels)
|
||||
}
|
||||
|
||||
// remove all labels
|
||||
err = execPatch(k, v.RemoveAllLabelsBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err)
|
||||
}
|
||||
pod, err = pods.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed getting patchpod: %v", err)
|
||||
}
|
||||
if pod.Labels != nil {
|
||||
t.Errorf("Failed remove all labels from patchpod with patch type %s: %v", k, pod.Labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatchWithCreateOnUpdate(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("patch-with-create", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
endpointTemplate := &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "patchendpoint",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 80, Protocol: v1.ProtocolTCP}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
patchEndpoint := func(json []byte) (runtime.Object, error) {
|
||||
return c.Core().RESTClient().Patch(api.MergePatchType).Resource("endpoints").Namespace(ns.Name).Name("patchendpoint").Body(json).Do().Get()
|
||||
}
|
||||
|
||||
// Make sure patch doesn't get to CreateOnUpdate
|
||||
{
|
||||
endpointJSON, err := runtime.Encode(api.Codecs.LegacyCodec(v1.SchemeGroupVersion), endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint JSON: %v", err)
|
||||
}
|
||||
if obj, err := patchEndpoint(endpointJSON); !apierrors.IsNotFound(err) {
|
||||
t.Errorf("Expected notfound creating from patch, got error=%v and object: %#v", err, obj)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the endpoint (endpoints set AllowCreateOnUpdate=true) to get a UID and resource version
|
||||
createdEndpoint, err := c.Core().Endpoints(ns.Name).Update(endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint: %v", err)
|
||||
}
|
||||
|
||||
// Make sure identity patch is accepted
|
||||
{
|
||||
endpointJSON, err := runtime.Encode(api.Codecs.LegacyCodec(v1.SchemeGroupVersion), createdEndpoint)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint JSON: %v", err)
|
||||
}
|
||||
if _, err := patchEndpoint(endpointJSON); err != nil {
|
||||
t.Errorf("Failed patching endpoint: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure patch complains about a mismatched resourceVersion
|
||||
{
|
||||
endpointTemplate.Name = ""
|
||||
endpointTemplate.UID = ""
|
||||
endpointTemplate.ResourceVersion = "1"
|
||||
endpointJSON, err := runtime.Encode(api.Codecs.LegacyCodec(v1.SchemeGroupVersion), endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint JSON: %v", err)
|
||||
}
|
||||
if _, err := patchEndpoint(endpointJSON); !apierrors.IsConflict(err) {
|
||||
t.Errorf("Expected error, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure patch complains about mutating the UID
|
||||
{
|
||||
endpointTemplate.Name = ""
|
||||
endpointTemplate.UID = "abc"
|
||||
endpointTemplate.ResourceVersion = ""
|
||||
endpointJSON, err := runtime.Encode(api.Codecs.LegacyCodec(v1.SchemeGroupVersion), endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint JSON: %v", err)
|
||||
}
|
||||
if _, err := patchEndpoint(endpointJSON); !apierrors.IsInvalid(err) {
|
||||
t.Errorf("Expected error, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure patch complains about a mismatched name
|
||||
{
|
||||
endpointTemplate.Name = "changedname"
|
||||
endpointTemplate.UID = ""
|
||||
endpointTemplate.ResourceVersion = ""
|
||||
endpointJSON, err := runtime.Encode(api.Codecs.LegacyCodec(v1.SchemeGroupVersion), endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint JSON: %v", err)
|
||||
}
|
||||
if _, err := patchEndpoint(endpointJSON); !apierrors.IsBadRequest(err) {
|
||||
t.Errorf("Expected error, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure patch containing originally submitted JSON is accepted
|
||||
{
|
||||
endpointTemplate.Name = ""
|
||||
endpointTemplate.UID = ""
|
||||
endpointTemplate.ResourceVersion = ""
|
||||
endpointJSON, err := runtime.Encode(api.Codecs.LegacyCodec(v1.SchemeGroupVersion), endpointTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating endpoint JSON: %v", err)
|
||||
}
|
||||
if _, err := patchEndpoint(endpointJSON); err != nil {
|
||||
t.Errorf("Failed patching endpoint: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAPIVersions(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
clientVersion := c.Core().RESTClient().APIVersion().String()
|
||||
g, err := c.Discovery().ServerGroups()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get api versions: %v", err)
|
||||
}
|
||||
versions := metav1.ExtractGroupVersions(g)
|
||||
|
||||
// Verify that the server supports the API version used by the client.
|
||||
for _, version := range versions {
|
||||
if version == clientVersion {
|
||||
return
|
||||
}
|
||||
}
|
||||
t.Errorf("Server does not support APIVersion used by client. Server supported APIVersions: '%v', client APIVersion: '%v'", versions, clientVersion)
|
||||
}
|
||||
|
||||
func TestSingleWatch(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("single-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
mkEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("event-%v", i)
|
||||
return &v1.Event{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: ns.Name,
|
||||
Name: name,
|
||||
},
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Namespace: ns.Name,
|
||||
Name: name,
|
||||
},
|
||||
Reason: fmt.Sprintf("event %v", i),
|
||||
}
|
||||
}
|
||||
|
||||
rv1 := ""
|
||||
for i := 0; i < 10; i++ {
|
||||
event := mkEvent(i)
|
||||
got, err := client.Core().Events(ns.Name).Create(event)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating event %#q: %v", event, err)
|
||||
}
|
||||
if rv1 == "" {
|
||||
rv1 = got.ResourceVersion
|
||||
if rv1 == "" {
|
||||
t.Fatal("did not get a resource version.")
|
||||
}
|
||||
}
|
||||
t.Logf("Created event %#v", got.ObjectMeta)
|
||||
}
|
||||
|
||||
w, err := client.Core().RESTClient().Get().
|
||||
Prefix("watch").
|
||||
Namespace(ns.Name).
|
||||
Resource("events").
|
||||
Name("event-9").
|
||||
Param("resourceVersion", rv1).
|
||||
Watch()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed watch: %v", err)
|
||||
}
|
||||
defer w.Stop()
|
||||
|
||||
select {
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Fatalf("watch took longer than %s", wait.ForeverTestTimeout.String())
|
||||
case got, ok := <-w.ResultChan():
|
||||
if !ok {
|
||||
t.Fatal("Watch channel closed unexpectedly.")
|
||||
}
|
||||
|
||||
// We expect to see an ADD of event-9 and only event-9. (This
|
||||
// catches a bug where all the events would have been sent down
|
||||
// the channel.)
|
||||
if e, a := watch.Added, got.Type; e != a {
|
||||
t.Errorf("Wanted %v, got %v", e, a)
|
||||
}
|
||||
switch o := got.Object.(type) {
|
||||
case *v1.Event:
|
||||
if e, a := "event-9", o.Name; e != a {
|
||||
t.Errorf("Wanted %v, got %v", e, a)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("Unexpected watch event containing object %#q", got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiWatch(t *testing.T) {
|
||||
// Disable this test as long as it demonstrates a problem.
|
||||
// TODO: Reenable this test when we get #6059 resolved.
|
||||
return
|
||||
|
||||
const watcherCount = 50
|
||||
rt.GOMAXPROCS(watcherCount)
|
||||
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("multi-watch", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
dummyEvent := func(i int) *v1.Event {
|
||||
name := fmt.Sprintf("unrelated-%v", i)
|
||||
return &v1.Event{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()),
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Reason: fmt.Sprintf("unrelated change %v", i),
|
||||
}
|
||||
}
|
||||
|
||||
type timePair struct {
|
||||
t time.Time
|
||||
name string
|
||||
}
|
||||
|
||||
receivedTimes := make(chan timePair, watcherCount*2)
|
||||
watchesStarted := sync.WaitGroup{}
|
||||
|
||||
// make a bunch of pods and watch them
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
watchesStarted.Add(1)
|
||||
name := fmt.Sprintf("multi-watch-%v", i)
|
||||
got, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels.Set{"watchlabel": name},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: e2e.GetPauseImageName(client),
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't make %v: %v", name, err)
|
||||
}
|
||||
go func(name, rv string) {
|
||||
options := v1.ListOptions{
|
||||
LabelSelector: labels.Set{"watchlabel": name}.AsSelector().String(),
|
||||
ResourceVersion: rv,
|
||||
}
|
||||
w, err := client.Core().Pods(ns.Name).Watch(options)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("watch error for %v: %v", name, err))
|
||||
}
|
||||
defer w.Stop()
|
||||
watchesStarted.Done()
|
||||
e, ok := <-w.ResultChan() // should get the update (that we'll do below)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("%v ended early?", name))
|
||||
}
|
||||
if e.Type != watch.Modified {
|
||||
panic(fmt.Sprintf("Got unexpected watch notification:\n%v: %+v %+v", name, e, e.Object))
|
||||
}
|
||||
receivedTimes <- timePair{time.Now(), name}
|
||||
}(name, got.ObjectMeta.ResourceVersion)
|
||||
}
|
||||
log.Printf("%v: %v pods made and watchers started", time.Now(), watcherCount)
|
||||
|
||||
// wait for watches to start before we start spamming the system with
|
||||
// objects below, otherwise we'll hit the watch window restriction.
|
||||
watchesStarted.Wait()
|
||||
|
||||
const (
|
||||
useEventsAsUnrelatedType = false
|
||||
usePodsAsUnrelatedType = true
|
||||
)
|
||||
|
||||
// make a bunch of unrelated changes in parallel
|
||||
if useEventsAsUnrelatedType {
|
||||
const unrelatedCount = 3000
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
changeToMake := make(chan int, unrelatedCount*2)
|
||||
changeMade := make(chan int, unrelatedCount*2)
|
||||
go func() {
|
||||
for i := 0; i < unrelatedCount; i++ {
|
||||
changeToMake <- i
|
||||
}
|
||||
close(changeToMake)
|
||||
}()
|
||||
for i := 0; i < 50; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
i, ok := <-changeToMake
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if _, err := client.Core().Events(ns.Name).Create(dummyEvent(i)); err != nil {
|
||||
panic(fmt.Sprintf("couldn't make an event: %v", err))
|
||||
}
|
||||
changeMade <- i
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < 2000; i++ {
|
||||
<-changeMade
|
||||
if (i+1)%50 == 0 {
|
||||
log.Printf("%v: %v unrelated changes made", time.Now(), i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
if usePodsAsUnrelatedType {
|
||||
const unrelatedCount = 3000
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
changeToMake := make(chan int, unrelatedCount*2)
|
||||
changeMade := make(chan int, unrelatedCount*2)
|
||||
go func() {
|
||||
for i := 0; i < unrelatedCount; i++ {
|
||||
changeToMake <- i
|
||||
}
|
||||
close(changeToMake)
|
||||
}()
|
||||
for i := 0; i < 50; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
i, ok := <-changeToMake
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
name := fmt.Sprintf("unrelated-%v", i)
|
||||
_, err := client.Core().Pods(ns.Name).Create(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nothing",
|
||||
Image: e2e.GetPauseImageName(client),
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("couldn't make unrelated pod: %v", err))
|
||||
}
|
||||
changeMade <- i
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < 2000; i++ {
|
||||
<-changeMade
|
||||
if (i+1)%50 == 0 {
|
||||
log.Printf("%v: %v unrelated changes made", time.Now(), i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now we still have changes being made in parallel, but at least 1000 have been made.
|
||||
// Make some updates to send down the watches.
|
||||
sentTimes := make(chan timePair, watcherCount*2)
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
go func(i int) {
|
||||
name := fmt.Sprintf("multi-watch-%v", i)
|
||||
pod, err := client.Core().Pods(ns.Name).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Couldn't get %v: %v", name, err))
|
||||
}
|
||||
pod.Spec.Containers[0].Image = e2e.GetPauseImageName(client)
|
||||
sentTimes <- timePair{time.Now(), name}
|
||||
if _, err := client.Core().Pods(ns.Name).Update(pod); err != nil {
|
||||
panic(fmt.Sprintf("Couldn't make %v: %v", name, err))
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
sent := map[string]time.Time{}
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
tp := <-sentTimes
|
||||
sent[tp.name] = tp.t
|
||||
}
|
||||
log.Printf("all changes made")
|
||||
dur := map[string]time.Duration{}
|
||||
for i := 0; i < watcherCount; i++ {
|
||||
tp := <-receivedTimes
|
||||
delta := tp.t.Sub(sent[tp.name])
|
||||
dur[tp.name] = delta
|
||||
log.Printf("%v: %v", tp.name, delta)
|
||||
}
|
||||
log.Printf("all watches ended")
|
||||
t.Errorf("durations: %v", dur)
|
||||
}
|
||||
|
||||
func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace string) {
|
||||
podBody := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "selflinktest",
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
"name": "selflinktest",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "name", Image: "image"},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod, err := c.Core().Pods(namespace).Create(&podBody)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating selflinktest pod: %v", err)
|
||||
}
|
||||
if err = c.Core().RESTClient().Get().RequestURI(pod.SelfLink).Do().Into(pod); err != nil {
|
||||
t.Errorf("Failed listing pod with supplied self link '%v': %v", pod.SelfLink, err)
|
||||
}
|
||||
|
||||
podList, err := c.Core().Pods(namespace).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed listing pods: %v", err)
|
||||
}
|
||||
|
||||
if err = c.Core().RESTClient().Get().RequestURI(podList.SelfLink).Do().Into(podList); err != nil {
|
||||
t.Errorf("Failed listing pods with supplied self link '%v': %v", podList.SelfLink, err)
|
||||
}
|
||||
|
||||
found := false
|
||||
for i := range podList.Items {
|
||||
item := &podList.Items[i]
|
||||
if item.Name != "selflinktest" {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
err = c.Core().RESTClient().Get().RequestURI(item.SelfLink).Do().Into(pod)
|
||||
if err != nil {
|
||||
t.Errorf("Failed listing pod with supplied self link '%v': %v", item.SelfLink, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("never found selflinktest pod in namespace %s", namespace)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelfLinkOnNamespace(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("selflink", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
runSelfLinkTestOnNamespace(t, c, ns.Name)
|
||||
}
|
159
vendor/k8s.io/kubernetes/test/integration/client/dynamic_client_test.go
generated
vendored
Normal file
159
vendor/k8s.io/kubernetes/test/integration/client/dynamic_client_test.go
generated
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestDynamicClient(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("dynamic-client", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
gv := &api.Registry.GroupOrDie(v1.GroupName).GroupVersion
|
||||
config := &restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: gv},
|
||||
}
|
||||
|
||||
client := clientset.NewForConfigOrDie(config)
|
||||
dynamicClient, err := dynamic.NewClient(config)
|
||||
_ = dynamicClient
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating dynamic client: %v", err)
|
||||
}
|
||||
|
||||
// Find the Pod resource
|
||||
resources, err := client.Discovery().ServerResourcesForGroupVersion(gv.String())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error listing resources: %v", err)
|
||||
}
|
||||
|
||||
var resource metav1.APIResource
|
||||
for _, r := range resources.APIResources {
|
||||
if r.Kind == "Pod" {
|
||||
resource = r
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(resource.Name) == 0 {
|
||||
t.Fatalf("could not find the pod resource in group/version %q", gv.String())
|
||||
}
|
||||
|
||||
// Create a Pod with the normal client
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "test",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test",
|
||||
Image: "test-image",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
actual, err := client.Core().Pods(ns.Name).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when creating pod: %v", err)
|
||||
}
|
||||
|
||||
// check dynamic list
|
||||
obj, err := dynamicClient.Resource(&resource, ns.Name).List(&v1.ListOptions{})
|
||||
unstructuredList, ok := obj.(*unstructured.UnstructuredList)
|
||||
if !ok {
|
||||
t.Fatalf("expected *unstructured.UnstructuredList, got %#v", obj)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when listing pods: %v", err)
|
||||
}
|
||||
|
||||
if len(unstructuredList.Items) != 1 {
|
||||
t.Fatalf("expected one pod, got %d", len(unstructuredList.Items))
|
||||
}
|
||||
|
||||
got, err := unstructuredToPod(unstructuredList.Items[0])
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error converting Unstructured to v1.Pod: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(actual, got) {
|
||||
t.Fatalf("unexpected pod in list. wanted %#v, got %#v", actual, got)
|
||||
}
|
||||
|
||||
// check dynamic get
|
||||
unstruct, err := dynamicClient.Resource(&resource, ns.Name).Get(actual.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when getting pod %q: %v", actual.Name, err)
|
||||
}
|
||||
|
||||
got, err = unstructuredToPod(unstruct)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error converting Unstructured to v1.Pod: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(actual, got) {
|
||||
t.Fatalf("unexpected pod in list. wanted %#v, got %#v", actual, got)
|
||||
}
|
||||
|
||||
// delete the pod dynamically
|
||||
err = dynamicClient.Resource(&resource, ns.Name).Delete(actual.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when deleting pod: %v", err)
|
||||
}
|
||||
|
||||
list, err := client.Core().Pods(ns.Name).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when listing pods: %v", err)
|
||||
}
|
||||
|
||||
if len(list.Items) != 0 {
|
||||
t.Fatalf("expected zero pods, got %d", len(list.Items))
|
||||
}
|
||||
}
|
||||
|
||||
func unstructuredToPod(obj *unstructured.Unstructured) (*v1.Pod, error) {
|
||||
json, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod := new(v1.Pod)
|
||||
err = runtime.DecodeInto(testapi.Default.Codec(), json, pod)
|
||||
pod.Kind = ""
|
||||
pod.APIVersion = ""
|
||||
return pod, err
|
||||
}
|
125
vendor/k8s.io/kubernetes/test/integration/configmap/configmap_test.go
generated
vendored
Normal file
125
vendor/k8s.io/kubernetes/test/integration/configmap/configmap_test.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package configmap
|
||||
|
||||
// This file tests use of the configMap API resource.
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// TestConfigMap tests apiserver-side behavior of creation of ConfigMaps and pods that consume them.
|
||||
func TestConfigMap(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("config-map", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
DoTestConfigMap(t, client, ns)
|
||||
}
|
||||
|
||||
func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
|
||||
cfg := v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "configmap",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
"data-2": "value-2",
|
||||
"data-3": "value-3",
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := client.Core().ConfigMaps(cfg.Namespace).Create(&cfg); err != nil {
|
||||
t.Errorf("unable to create test configMap: %v", err)
|
||||
}
|
||||
defer deleteConfigMapOrErrorf(t, client, cfg.Namespace, cfg.Name)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "XXX",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "CONFIG_DATA_1",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "configmap",
|
||||
},
|
||||
Key: "data-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "CONFIG_DATA_2",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "configmap",
|
||||
},
|
||||
Key: "data-2",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "CONFIG_DATA_3",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "configmap",
|
||||
},
|
||||
Key: "data-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pod.ObjectMeta.Name = "uses-configmap"
|
||||
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod: %v", err)
|
||||
}
|
||||
defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
|
||||
}
|
||||
|
||||
func deleteConfigMapOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
|
||||
if err := c.Core().ConfigMaps(ns).Delete(name, nil); err != nil {
|
||||
t.Errorf("unable to delete ConfigMap %v: %v", name, err)
|
||||
}
|
||||
}
|
31
vendor/k8s.io/kubernetes/test/integration/discoverysummarizer/BUILD
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/test/integration/discoverysummarizer/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["discoverysummarizer_test.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kube-aggregator/pkg/legacy:go_default_library",
|
||||
"//examples/apiserver:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
116
vendor/k8s.io/kubernetes/test/integration/discoverysummarizer/discoverysummarizer_test.go
generated
vendored
Normal file
116
vendor/k8s.io/kubernetes/test/integration/discoverysummarizer/discoverysummarizer_test.go
generated
vendored
Normal file
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package discoverysummarizer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/cmd/kube-aggregator/pkg/legacy"
|
||||
"k8s.io/kubernetes/examples/apiserver"
|
||||
)
|
||||
|
||||
func waitForServerUp(serverURL string) error {
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
||||
_, err := http.Get(serverURL)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("waiting for server timed out")
|
||||
}
|
||||
|
||||
func testResponse(t *testing.T, serverURL, path string, expectedStatusCode int) {
|
||||
response, err := http.Get(serverURL + path)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error in GET %s: %v", path, err)
|
||||
}
|
||||
if response.StatusCode != expectedStatusCode {
|
||||
t.Errorf("unexpected status code: %v, expected: %v", response.StatusCode, expectedStatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func runDiscoverySummarizer(t *testing.T) string {
|
||||
configFilePath := "../../../cmd/kube-aggregator/config.json"
|
||||
port := "9090"
|
||||
serverURL := "http://localhost:" + port
|
||||
s, err := legacy.NewDiscoverySummarizer(configFilePath)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v\n", err)
|
||||
}
|
||||
go func() {
|
||||
if err := s.Run(port); err != nil {
|
||||
t.Fatalf("error in bringing up the server: %v", err)
|
||||
}
|
||||
}()
|
||||
if err := waitForServerUp(serverURL); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
return serverURL
|
||||
}
|
||||
|
||||
func runAPIServer(t *testing.T, stopCh <-chan struct{}) string {
|
||||
serverRunOptions := apiserver.NewServerRunOptions()
|
||||
// Change the ports, because otherwise it will fail if examples/apiserver/apiserver_test and this are run in parallel.
|
||||
serverRunOptions.SecureServing.ServingOptions.BindPort = 6443 + 3
|
||||
serverRunOptions.InsecureServing.BindPort = 8080 + 3
|
||||
go func() {
|
||||
if err := serverRunOptions.Run(stopCh); err != nil {
|
||||
t.Fatalf("Error in bringing up the example apiserver: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
serverURL := fmt.Sprintf("http://localhost:%d", serverRunOptions.InsecureServing.BindPort)
|
||||
if err := waitForServerUp(serverURL); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
return serverURL
|
||||
}
|
||||
|
||||
// Runs a discovery summarizer server and tests that all endpoints work as expected.
|
||||
func TestRunDiscoverySummarizer(t *testing.T) {
|
||||
discoveryURL := runDiscoverySummarizer(t)
|
||||
|
||||
// Test /api path.
|
||||
// There is no server running at that URL, so we will get a 500.
|
||||
testResponse(t, discoveryURL, "/api", http.StatusBadGateway)
|
||||
|
||||
// Test /apis path.
|
||||
// There is no server running at that URL, so we will get a 500.
|
||||
testResponse(t, discoveryURL, "/apis", http.StatusBadGateway)
|
||||
|
||||
// Test a random path, which should give a 404.
|
||||
testResponse(t, discoveryURL, "/randomPath", http.StatusNotFound)
|
||||
|
||||
// Run the APIServer now to test the good case.
|
||||
stopCh := make(chan struct{})
|
||||
runAPIServer(t, stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
// Test /api path.
|
||||
// There is no server running at that URL, so we will get a 500.
|
||||
testResponse(t, discoveryURL, "/api", http.StatusOK)
|
||||
|
||||
// Test /apis path.
|
||||
// There is no server running at that URL, so we will get a 500.
|
||||
testResponse(t, discoveryURL, "/apis", http.StatusOK)
|
||||
|
||||
// Test a random path, which should give a 404.
|
||||
testResponse(t, discoveryURL, "/randomPath", http.StatusNotFound)
|
||||
}
|
20
vendor/k8s.io/kubernetes/test/integration/doc.go
generated
vendored
Normal file
20
vendor/k8s.io/kubernetes/test/integration/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package integration provides integration tests for Kubernetes. Use the integration
|
||||
// build tag during `go test` to start the tests. Some tests require a running etcd
|
||||
// or Docker installation on the system which you can skip with no-docker and no-etcd.
|
||||
package integration // import "k8s.io/kubernetes/test/integration"
|
277
vendor/k8s.io/kubernetes/test/integration/evictions/evictions_test.go
generated
vendored
Normal file
277
vendor/k8s.io/kubernetes/test/integration/evictions/evictions_test.go
generated
vendored
Normal file
|
@ -0,0 +1,277 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package evictions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
numOfEvictions = 10
|
||||
)
|
||||
|
||||
// TestConcurrentEvictionRequests is to make sure pod disruption budgets (PDB) controller is able to
|
||||
// handle concurrent eviction requests. Original issue:#37605
|
||||
func TestConcurrentEvictionRequests(t *testing.T) {
|
||||
podNameFormat := "test-pod-%d"
|
||||
|
||||
s, rm, podInformer, clientSet := rmSetup(t)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("concurrent-eviction-requests", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go podInformer.Run(stopCh)
|
||||
go rm.Run(stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
config := restclient.Config{Host: s.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create clientset: %v", err)
|
||||
}
|
||||
|
||||
var gracePeriodSeconds int64 = 30
|
||||
deleteOption := &v1.DeleteOptions{
|
||||
GracePeriodSeconds: &gracePeriodSeconds,
|
||||
}
|
||||
|
||||
// Generate numOfEvictions pods to evict
|
||||
for i := 0; i < numOfEvictions; i++ {
|
||||
podName := fmt.Sprintf(podNameFormat, i)
|
||||
pod := newPod(podName)
|
||||
|
||||
if _, err := clientSet.Core().Pods(ns.Name).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
addPodConditionReady(pod)
|
||||
if _, err := clientSet.Core().Pods(ns.Name).UpdateStatus(pod); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
waitToObservePods(t, podInformer, numOfEvictions)
|
||||
|
||||
pdb := newPDB()
|
||||
if _, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil {
|
||||
t.Errorf("Failed to create PodDisruptionBudget: %v", err)
|
||||
}
|
||||
|
||||
waitPDBStable(t, clientSet, numOfEvictions, ns.Name, pdb.Name)
|
||||
|
||||
var numberPodsEvicted uint32 = 0
|
||||
errCh := make(chan error, 3*numOfEvictions)
|
||||
var wg sync.WaitGroup
|
||||
// spawn numOfEvictions goroutines to concurrently evict the pods
|
||||
for i := 0; i < numOfEvictions; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int, errCh chan error) {
|
||||
defer wg.Done()
|
||||
podName := fmt.Sprintf(podNameFormat, id)
|
||||
eviction := newEviction(ns.Name, podName, deleteOption)
|
||||
|
||||
err := wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
e := clientSet.Policy().Evictions(ns.Name).Evict(eviction)
|
||||
switch {
|
||||
case errors.IsTooManyRequests(e):
|
||||
return false, nil
|
||||
case errors.IsConflict(e):
|
||||
return false, fmt.Errorf("Unexpected Conflict (409) error caused by failing to handle concurrent PDB updates: %v", e)
|
||||
case e == nil:
|
||||
return true, nil
|
||||
default:
|
||||
return false, e
|
||||
}
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
// should not return here otherwise we would leak the pod
|
||||
}
|
||||
|
||||
_, err = clientSet.Core().Pods(ns.Name).Get(podName, metav1.GetOptions{})
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
atomic.AddUint32(&numberPodsEvicted, 1)
|
||||
// pod was evicted and deleted so return from goroutine immediately
|
||||
return
|
||||
case err == nil:
|
||||
// this shouldn't happen if the pod was evicted successfully
|
||||
errCh <- fmt.Errorf("Pod %q is expected to be evicted", podName)
|
||||
default:
|
||||
errCh <- err
|
||||
}
|
||||
|
||||
// delete pod which still exists due to error
|
||||
e := clientSet.Core().Pods(ns.Name).Delete(podName, deleteOption)
|
||||
if e != nil {
|
||||
errCh <- e
|
||||
}
|
||||
|
||||
}(i, errCh)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
close(errCh)
|
||||
var errList []error
|
||||
if err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil {
|
||||
errList = append(errList, fmt.Errorf("Failed to delete PodDisruptionBudget: %v", err))
|
||||
}
|
||||
for err := range errCh {
|
||||
errList = append(errList, err)
|
||||
}
|
||||
if len(errList) > 0 {
|
||||
t.Fatal(utilerrors.NewAggregate(errList))
|
||||
}
|
||||
|
||||
if atomic.LoadUint32(&numberPodsEvicted) != numOfEvictions {
|
||||
t.Fatalf("fewer number of successful evictions than expected :", numberPodsEvicted)
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(podName string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"app": "test-evictions"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func addPodConditionReady(pod *v1.Pod) {
|
||||
pod.Status = v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newPDB() *v1beta1.PodDisruptionBudget {
|
||||
return &v1beta1.PodDisruptionBudget{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "test-pdb",
|
||||
},
|
||||
Spec: v1beta1.PodDisruptionBudgetSpec{
|
||||
MinAvailable: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 0,
|
||||
},
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"app": "test-evictions"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newEviction(ns, evictionName string, deleteOption *v1.DeleteOptions) *v1beta1.Eviction {
|
||||
return &v1beta1.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "Policy/v1beta1",
|
||||
Kind: "Eviction",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: evictionName,
|
||||
Namespace: ns,
|
||||
},
|
||||
DeleteOptions: deleteOption,
|
||||
}
|
||||
}
|
||||
|
||||
func rmSetup(t *testing.T) (*httptest.Server, *disruption.DisruptionController, cache.SharedIndexInformer, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, s := framework.RunAMaster(masterConfig)
|
||||
|
||||
config := restclient.Config{Host: s.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pdb-informers")), nil, resyncPeriod)
|
||||
|
||||
rm := disruption.NewDisruptionController(
|
||||
informers.Pods().Informer(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "disruption-controller")),
|
||||
)
|
||||
return s, rm, informers.Pods().Informer(), clientSet
|
||||
}
|
||||
|
||||
// wait for the podInformer to observe the pods. Call this function before
|
||||
// running the RS controller to prevent the rc manager from creating new pods
|
||||
// rather than adopting the existing ones.
|
||||
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
|
||||
if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
if len(objects) == podNum {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func waitPDBStable(t *testing.T, clientSet clientset.Interface, podNum int32, ns, pdbName string) {
|
||||
if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) {
|
||||
pdb, err := clientSet.Policy().PodDisruptionBudgets(ns).Get(pdbName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if pdb.Status.CurrentHealthy != podNum {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
34
vendor/k8s.io/kubernetes/test/integration/examples/BUILD
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/test/integration/examples/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["apiserver_test.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/libs/go2idl/client-gen/test_apis/testgroup/v1:go_default_library",
|
||||
"//examples/apiserver:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
180
vendor/k8s.io/kubernetes/test/integration/examples/apiserver_test.go
generated
vendored
Normal file
180
vendor/k8s.io/kubernetes/test/integration/examples/apiserver_test.go
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiserver
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/examples/apiserver"
|
||||
)
|
||||
|
||||
var groupVersion = v1.SchemeGroupVersion
|
||||
|
||||
var groupVersionForDiscovery = metav1.GroupVersionForDiscovery{
|
||||
GroupVersion: groupVersion.String(),
|
||||
Version: groupVersion.Version,
|
||||
}
|
||||
|
||||
func TestRunServer(t *testing.T) {
|
||||
serverIP := fmt.Sprintf("http://localhost:%d", apiserver.InsecurePort)
|
||||
stopCh := make(chan struct{})
|
||||
go func() {
|
||||
if err := apiserver.NewServerRunOptions().Run(stopCh); err != nil {
|
||||
t.Fatalf("Error in bringing up the server: %v", err)
|
||||
}
|
||||
}()
|
||||
defer close(stopCh)
|
||||
if err := waitForApiserverUp(serverIP); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
testSwaggerSpec(t, serverIP)
|
||||
testAPIGroupList(t, serverIP)
|
||||
testAPIGroup(t, serverIP)
|
||||
testAPIResourceList(t, serverIP)
|
||||
}
|
||||
|
||||
func TestRunSecureServer(t *testing.T) {
|
||||
serverIP := fmt.Sprintf("https://localhost:%d", apiserver.SecurePort)
|
||||
stopCh := make(chan struct{})
|
||||
go func() {
|
||||
options := apiserver.NewServerRunOptions()
|
||||
options.InsecureServing.BindPort = 0
|
||||
options.SecureServing.ServingOptions.BindPort = apiserver.SecurePort
|
||||
if err := options.Run(stopCh); err != nil {
|
||||
t.Fatalf("Error in bringing up the server: %v", err)
|
||||
}
|
||||
}()
|
||||
defer close(stopCh)
|
||||
if err := waitForApiserverUp(serverIP); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
testSwaggerSpec(t, serverIP)
|
||||
testAPIGroupList(t, serverIP)
|
||||
testAPIGroup(t, serverIP)
|
||||
testAPIResourceList(t, serverIP)
|
||||
}
|
||||
|
||||
func waitForApiserverUp(serverIP string) error {
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
||||
glog.Errorf("Waiting for : %#v", serverIP)
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
_, err := client.Get(serverIP)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("waiting for apiserver timed out")
|
||||
}
|
||||
|
||||
func readResponse(serverURL string) ([]byte, error) {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
response, err := client.Get(serverURL)
|
||||
if err != nil {
|
||||
glog.Errorf("http get err code : %#v", err)
|
||||
return nil, fmt.Errorf("Error in fetching %s: %v", serverURL, err)
|
||||
}
|
||||
defer response.Body.Close()
|
||||
glog.Errorf("http get response code : %#v", response.StatusCode)
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unexpected status: %d for URL: %s, expected status: %d", response.StatusCode, serverURL, http.StatusOK)
|
||||
}
|
||||
contents, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error reading response from %s: %v", serverURL, err)
|
||||
}
|
||||
return contents, nil
|
||||
}
|
||||
|
||||
func testSwaggerSpec(t *testing.T, serverIP string) {
|
||||
serverURL := serverIP + "/swaggerapi"
|
||||
_, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testAPIGroupList(t *testing.T, serverIP string) {
|
||||
serverURL := serverIP + "/apis"
|
||||
contents, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
var apiGroupList metav1.APIGroupList
|
||||
err = json.Unmarshal(contents, &apiGroupList)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
|
||||
}
|
||||
assert.Equal(t, 1, len(apiGroupList.Groups))
|
||||
assert.Equal(t, apiGroupList.Groups[0].Name, groupVersion.Group)
|
||||
assert.Equal(t, 1, len(apiGroupList.Groups[0].Versions))
|
||||
assert.Equal(t, apiGroupList.Groups[0].Versions[0], groupVersionForDiscovery)
|
||||
assert.Equal(t, apiGroupList.Groups[0].PreferredVersion, groupVersionForDiscovery)
|
||||
}
|
||||
|
||||
func testAPIGroup(t *testing.T, serverIP string) {
|
||||
serverURL := serverIP + "/apis/testgroup.k8s.io"
|
||||
contents, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
var apiGroup metav1.APIGroup
|
||||
err = json.Unmarshal(contents, &apiGroup)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
|
||||
}
|
||||
assert.Equal(t, apiGroup.APIVersion, groupVersion.Version)
|
||||
assert.Equal(t, apiGroup.Name, groupVersion.Group)
|
||||
assert.Equal(t, 1, len(apiGroup.Versions))
|
||||
assert.Equal(t, apiGroup.Versions[0].GroupVersion, groupVersion.String())
|
||||
assert.Equal(t, apiGroup.Versions[0].Version, groupVersion.Version)
|
||||
assert.Equal(t, apiGroup.Versions[0], apiGroup.PreferredVersion)
|
||||
}
|
||||
|
||||
func testAPIResourceList(t *testing.T, serverIP string) {
|
||||
serverURL := serverIP + "/apis/testgroup.k8s.io/v1"
|
||||
contents, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
var apiResourceList metav1.APIResourceList
|
||||
err = json.Unmarshal(contents, &apiResourceList)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
|
||||
}
|
||||
assert.Equal(t, apiResourceList.APIVersion, groupVersion.Version)
|
||||
assert.Equal(t, apiResourceList.GroupVersion, groupVersion.String())
|
||||
assert.Equal(t, 1, len(apiResourceList.APIResources))
|
||||
assert.Equal(t, apiResourceList.APIResources[0].Name, "testtypes")
|
||||
assert.True(t, apiResourceList.APIResources[0].Namespaced)
|
||||
}
|
38
vendor/k8s.io/kubernetes/test/integration/federation/BUILD
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/test/integration/federation/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["server_test.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//federation/apis/federation/v1beta1:go_default_library",
|
||||
"//federation/cmd/federation-apiserver/app:go_default_library",
|
||||
"//federation/cmd/federation-apiserver/app/options:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/batch/v1:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
375
vendor/k8s.io/kubernetes/test/integration/federation/server_test.go
generated
vendored
Normal file
375
vendor/k8s.io/kubernetes/test/integration/federation/server_test.go
generated
vendored
Normal file
|
@ -0,0 +1,375 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
fed_v1b1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
|
||||
"k8s.io/kubernetes/federation/cmd/federation-apiserver/app"
|
||||
"k8s.io/kubernetes/federation/cmd/federation-apiserver/app/options"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch_v1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
ext_v1b1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
)
|
||||
|
||||
var securePort = 6443 + 2
|
||||
var insecurePort = 8080 + 2
|
||||
var serverIP = fmt.Sprintf("http://localhost:%v", insecurePort)
|
||||
var groupVersions = []schema.GroupVersion{
|
||||
fed_v1b1.SchemeGroupVersion,
|
||||
ext_v1b1.SchemeGroupVersion,
|
||||
batch_v1.SchemeGroupVersion,
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
certDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temporary certificate directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(certDir)
|
||||
|
||||
s := options.NewServerRunOptions()
|
||||
s.SecureServing.ServingOptions.BindPort = securePort
|
||||
s.InsecureServing.BindPort = insecurePort
|
||||
s.Etcd.StorageConfig.ServerList = []string{"http://localhost:2379"}
|
||||
s.SecureServing.ServerCert.CertDirectory = certDir
|
||||
|
||||
go func() {
|
||||
if err := app.Run(s); err != nil {
|
||||
t.Fatalf("Error in bringing up the server: %v", err)
|
||||
}
|
||||
}()
|
||||
if err := waitForApiserverUp(); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
testSwaggerSpec(t)
|
||||
testSupport(t)
|
||||
testAPIGroupList(t)
|
||||
testAPIGroup(t)
|
||||
testAPIResourceList(t)
|
||||
}
|
||||
|
||||
func waitForApiserverUp() error {
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
||||
_, err := http.Get(serverIP)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("waiting for apiserver timed out")
|
||||
}
|
||||
|
||||
func readResponse(serverURL string) ([]byte, error) {
|
||||
response, err := http.Get(serverURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error in fetching %s: %v", serverURL, err)
|
||||
}
|
||||
defer response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unexpected status: %d for URL: %s, expected status: %d", response.StatusCode, serverURL, http.StatusOK)
|
||||
}
|
||||
contents, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error reading response from %s: %v", serverURL, err)
|
||||
}
|
||||
return contents, nil
|
||||
}
|
||||
|
||||
func testSwaggerSpec(t *testing.T) {
|
||||
serverURL := serverIP + "/swaggerapi"
|
||||
_, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testSupport(t *testing.T) {
|
||||
serverURL := serverIP + "/version"
|
||||
_, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func findGroup(groups []metav1.APIGroup, groupName string) *metav1.APIGroup {
|
||||
for _, group := range groups {
|
||||
if group.Name == groupName {
|
||||
return &group
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAPIGroupList(t *testing.T) {
|
||||
groupVersionForDiscoveryMap := make(map[string]metav1.GroupVersionForDiscovery)
|
||||
for _, groupVersion := range groupVersions {
|
||||
groupVersionForDiscoveryMap[groupVersion.Group] = metav1.GroupVersionForDiscovery{
|
||||
GroupVersion: groupVersion.String(),
|
||||
Version: groupVersion.Version,
|
||||
}
|
||||
}
|
||||
|
||||
serverURL := serverIP + "/apis"
|
||||
contents, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
var apiGroupList metav1.APIGroupList
|
||||
err = json.Unmarshal(contents, &apiGroupList)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
|
||||
}
|
||||
|
||||
for _, groupVersion := range groupVersions {
|
||||
found := findGroup(apiGroupList.Groups, groupVersion.Group)
|
||||
assert.NotNil(t, found)
|
||||
assert.Equal(t, groupVersion.Group, found.Name)
|
||||
assert.Equal(t, 1, len(found.Versions))
|
||||
groupVersionForDiscovery := groupVersionForDiscoveryMap[groupVersion.Group]
|
||||
assert.Equal(t, groupVersionForDiscovery, found.Versions[0])
|
||||
assert.Equal(t, groupVersionForDiscovery, found.PreferredVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func testAPIGroup(t *testing.T) {
|
||||
for _, groupVersion := range groupVersions {
|
||||
serverURL := serverIP + "/apis/" + groupVersion.Group
|
||||
contents, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
var apiGroup metav1.APIGroup
|
||||
err = json.Unmarshal(contents, &apiGroup)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
|
||||
}
|
||||
// empty APIVersion for extensions group
|
||||
if groupVersion.Group == "extensions" {
|
||||
assert.Equal(t, "", apiGroup.APIVersion)
|
||||
} else {
|
||||
assert.Equal(t, "v1", apiGroup.APIVersion)
|
||||
}
|
||||
assert.Equal(t, apiGroup.Name, groupVersion.Group)
|
||||
assert.Equal(t, 1, len(apiGroup.Versions))
|
||||
assert.Equal(t, groupVersion.String(), apiGroup.Versions[0].GroupVersion)
|
||||
assert.Equal(t, groupVersion.Version, apiGroup.Versions[0].Version)
|
||||
assert.Equal(t, apiGroup.PreferredVersion, apiGroup.Versions[0])
|
||||
}
|
||||
|
||||
testCoreAPIGroup(t)
|
||||
}
|
||||
|
||||
func testCoreAPIGroup(t *testing.T) {
|
||||
serverURL := serverIP + "/api"
|
||||
contents, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
var apiVersions metav1.APIVersions
|
||||
err = json.Unmarshal(contents, &apiVersions)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
|
||||
}
|
||||
assert.Equal(t, 1, len(apiVersions.Versions))
|
||||
assert.Equal(t, "v1", apiVersions.Versions[0])
|
||||
assert.NotEmpty(t, apiVersions.ServerAddressByClientCIDRs)
|
||||
}
|
||||
|
||||
func findResource(resources []metav1.APIResource, resourceName string) *metav1.APIResource {
|
||||
for _, resource := range resources {
|
||||
if resource.Name == resourceName {
|
||||
return &resource
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAPIResourceList(t *testing.T) {
|
||||
testFederationResourceList(t)
|
||||
testCoreResourceList(t)
|
||||
testExtensionsResourceList(t)
|
||||
testBatchResourceList(t)
|
||||
}
|
||||
|
||||
func testFederationResourceList(t *testing.T) {
|
||||
serverURL := serverIP + "/apis/" + fed_v1b1.SchemeGroupVersion.String()
|
||||
contents, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
var apiResourceList metav1.APIResourceList
|
||||
err = json.Unmarshal(contents, &apiResourceList)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
|
||||
}
|
||||
assert.Equal(t, "v1", apiResourceList.APIVersion)
|
||||
assert.Equal(t, fed_v1b1.SchemeGroupVersion.String(), apiResourceList.GroupVersion)
|
||||
// Assert that there are exactly 2 resources.
|
||||
assert.Equal(t, 2, len(apiResourceList.APIResources))
|
||||
|
||||
found := findResource(apiResourceList.APIResources, "clusters")
|
||||
assert.NotNil(t, found)
|
||||
assert.False(t, found.Namespaced)
|
||||
found = findResource(apiResourceList.APIResources, "clusters/status")
|
||||
assert.NotNil(t, found)
|
||||
assert.False(t, found.Namespaced)
|
||||
}
|
||||
|
||||
func testCoreResourceList(t *testing.T) {
|
||||
serverURL := serverIP + "/api/" + v1.SchemeGroupVersion.String()
|
||||
contents, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
var apiResourceList metav1.APIResourceList
|
||||
err = json.Unmarshal(contents, &apiResourceList)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
|
||||
}
|
||||
assert.Equal(t, "", apiResourceList.APIVersion)
|
||||
assert.Equal(t, v1.SchemeGroupVersion.String(), apiResourceList.GroupVersion)
|
||||
// Assert that there are exactly 7 resources.
|
||||
assert.Equal(t, 8, len(apiResourceList.APIResources))
|
||||
|
||||
// Verify services.
|
||||
found := findResource(apiResourceList.APIResources, "services")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
found = findResource(apiResourceList.APIResources, "services/status")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
|
||||
// Verify namespaces.
|
||||
found = findResource(apiResourceList.APIResources, "namespaces")
|
||||
assert.NotNil(t, found)
|
||||
assert.False(t, found.Namespaced)
|
||||
found = findResource(apiResourceList.APIResources, "namespaces/status")
|
||||
assert.NotNil(t, found)
|
||||
assert.False(t, found.Namespaced)
|
||||
found = findResource(apiResourceList.APIResources, "namespaces/finalize")
|
||||
assert.NotNil(t, found)
|
||||
assert.False(t, found.Namespaced)
|
||||
|
||||
// Verify events.
|
||||
found = findResource(apiResourceList.APIResources, "events")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
|
||||
// Verify secrets.
|
||||
found = findResource(apiResourceList.APIResources, "secrets")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
|
||||
// Verify config maps.
|
||||
found = findResource(apiResourceList.APIResources, "configmaps")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
}
|
||||
|
||||
func testExtensionsResourceList(t *testing.T) {
|
||||
serverURL := serverIP + "/apis/" + ext_v1b1.SchemeGroupVersion.String()
|
||||
contents, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
var apiResourceList metav1.APIResourceList
|
||||
err = json.Unmarshal(contents, &apiResourceList)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
|
||||
}
|
||||
// empty APIVersion for extensions group
|
||||
assert.Equal(t, "", apiResourceList.APIVersion)
|
||||
assert.Equal(t, ext_v1b1.SchemeGroupVersion.String(), apiResourceList.GroupVersion)
|
||||
// Assert that there are exactly 11 resources.
|
||||
assert.Equal(t, 11, len(apiResourceList.APIResources))
|
||||
|
||||
// Verify replicasets.
|
||||
found := findResource(apiResourceList.APIResources, "replicasets")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
found = findResource(apiResourceList.APIResources, "replicasets/status")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
found = findResource(apiResourceList.APIResources, "replicasets/scale")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
|
||||
// Verify ingress.
|
||||
found = findResource(apiResourceList.APIResources, "ingresses")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
found = findResource(apiResourceList.APIResources, "ingresses/status")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
|
||||
// Verify daemonsets.
|
||||
found = findResource(apiResourceList.APIResources, "daemonsets")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
found = findResource(apiResourceList.APIResources, "daemonsets/status")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
|
||||
// Verify deployments.
|
||||
found = findResource(apiResourceList.APIResources, "deployments")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
found = findResource(apiResourceList.APIResources, "deployments/status")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
found = findResource(apiResourceList.APIResources, "deployments/scale")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
found = findResource(apiResourceList.APIResources, "deployments/rollback")
|
||||
}
|
||||
|
||||
func testBatchResourceList(t *testing.T) {
|
||||
serverURL := serverIP + "/apis/" + batch_v1.SchemeGroupVersion.String()
|
||||
contents, err := readResponse(serverURL)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
var apiResourceList metav1.APIResourceList
|
||||
err = json.Unmarshal(contents, &apiResourceList)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err)
|
||||
}
|
||||
// empty APIVersion for extensions group
|
||||
assert.Equal(t, "v1", apiResourceList.APIVersion)
|
||||
assert.Equal(t, batch_v1.SchemeGroupVersion.String(), apiResourceList.GroupVersion)
|
||||
// Assert that there are exactly this number of resources.
|
||||
assert.Equal(t, 2, len(apiResourceList.APIResources))
|
||||
|
||||
// Verify jobs
|
||||
found := findResource(apiResourceList.APIResources, "jobs")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
found = findResource(apiResourceList.APIResources, "jobs/status")
|
||||
assert.NotNil(t, found)
|
||||
assert.True(t, found.Namespaced)
|
||||
}
|
79
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
Normal file
79
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,79 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"master_utils.go",
|
||||
"perf_utils.go",
|
||||
"serializer.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/apps/v1beta1:go_default_library",
|
||||
"//pkg/apis/autoscaling/v1:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/certificates/v1alpha1:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/apis/policy/v1alpha1:go_default_library",
|
||||
"//pkg/apis/rbac/v1alpha1:go_default_library",
|
||||
"//pkg/apis/storage/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/generated/openapi:go_default_library",
|
||||
"//pkg/genericapiserver:go_default_library",
|
||||
"//pkg/genericapiserver/authenticator:go_default_library",
|
||||
"//pkg/genericapiserver/authorizer:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubelet/client:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/storage/storagebackend:go_default_library",
|
||||
"//pkg/util/env:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//plugin/pkg/admission/admit:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor:github.com/go-openapi/spec",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/pborman/uuid",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/serializer/versioning",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/authenticator",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/request/union",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/user",
|
||||
"//vendor:k8s.io/apiserver/pkg/authorization/authorizer",
|
||||
"//vendor:k8s.io/apiserver/pkg/authorization/union",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
504
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
Normal file
504
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
Normal file
|
@ -0,0 +1,504 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
goruntime "runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
authauthenticator "k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
authauthorizer "k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
authorizerunion "k8s.io/apiserver/pkg/authorization/union"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
|
||||
rbac "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/pkg/generated/openapi"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver/authenticator"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver/authorizer"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/pkg/storage/storagebackend"
|
||||
"k8s.io/kubernetes/pkg/util/env"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/pborman/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
// Timeout used in benchmarks, to eg: scale an rc
|
||||
DefaultTimeout = 30 * time.Minute
|
||||
|
||||
// Rc manifest used to create pods for benchmarks.
|
||||
// TODO: Convert this to a full path?
|
||||
TestRCManifest = "benchmark-controller.json"
|
||||
)
|
||||
|
||||
// MasterComponents is a control struct for all master components started via NewMasterComponents.
|
||||
// TODO: Include all master components (scheduler, nodecontroller).
|
||||
// TODO: Reconcile with integration.go, currently the master used there doesn't understand
|
||||
// how to restart cleanly, which is required for each iteration of a benchmark. The integration
|
||||
// tests also don't make it easy to isolate and turn off components at will.
|
||||
type MasterComponents struct {
|
||||
// Raw http server in front of the master
|
||||
ApiServer *httptest.Server
|
||||
// Kubernetes master, contains an embedded etcd storage
|
||||
KubeMaster *master.Master
|
||||
// Restclient used to talk to the kubernetes master
|
||||
ClientSet clientset.Interface
|
||||
// Replication controller manager
|
||||
ControllerManager *replicationcontroller.ReplicationManager
|
||||
// Channel for stop signals to rc manager
|
||||
rcStopCh chan struct{}
|
||||
// Used to stop master components individually, and via MasterComponents.Stop
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// Config is a struct of configuration directives for NewMasterComponents.
|
||||
type Config struct {
|
||||
// If nil, a default is used, partially filled configs will not get populated.
|
||||
MasterConfig *master.Config
|
||||
StartReplicationManager bool
|
||||
// Client throttling qps
|
||||
QPS float32
|
||||
// Client burst qps, also burst replicas allowed in rc manager
|
||||
Burst int
|
||||
// TODO: Add configs for endpoints controller, scheduler etc
|
||||
}
|
||||
|
||||
// NewMasterComponents creates, initializes and starts master components based on the given config.
|
||||
func NewMasterComponents(c *Config) *MasterComponents {
|
||||
m, s := startMasterOrDie(c.MasterConfig, nil, nil)
|
||||
// TODO: Allow callers to pipe through a different master url and create a client/start components using it.
|
||||
glog.Infof("Master %+v", s.URL)
|
||||
// TODO: caesarxuchao: remove this client when the refactoring of client libraray is done.
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}, QPS: c.QPS, Burst: c.Burst})
|
||||
rcStopCh := make(chan struct{})
|
||||
controllerManager := replicationcontroller.NewReplicationManagerFromClient(clientset, controller.NoResyncPeriodFunc, c.Burst, 4096)
|
||||
|
||||
// TODO: Support events once we can cleanly shutdown an event recorder.
|
||||
controllerManager.SetEventRecorder(&record.FakeRecorder{})
|
||||
if c.StartReplicationManager {
|
||||
go controllerManager.Run(goruntime.NumCPU(), rcStopCh)
|
||||
}
|
||||
return &MasterComponents{
|
||||
ApiServer: s,
|
||||
KubeMaster: m,
|
||||
ClientSet: clientset,
|
||||
ControllerManager: controllerManager,
|
||||
rcStopCh: rcStopCh,
|
||||
}
|
||||
}
|
||||
|
||||
// alwaysAllow always allows an action
|
||||
type alwaysAllow struct{}
|
||||
|
||||
func (alwaysAllow) Authorize(requestAttributes authauthorizer.Attributes) (bool, string, error) {
|
||||
return true, "always allow", nil
|
||||
}
|
||||
|
||||
// alwaysEmpty simulates "no authentication" for old tests
|
||||
func alwaysEmpty(req *http.Request) (user.Info, bool, error) {
|
||||
return &user.DefaultInfo{
|
||||
Name: "",
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// MasterReceiver can be used to provide the master to a custom incoming server function
|
||||
type MasterReceiver interface {
|
||||
SetMaster(m *master.Master)
|
||||
}
|
||||
|
||||
// MasterHolder implements
|
||||
type MasterHolder struct {
|
||||
Initialized chan struct{}
|
||||
M *master.Master
|
||||
}
|
||||
|
||||
func (h *MasterHolder) SetMaster(m *master.Master) {
|
||||
h.M = m
|
||||
close(h.Initialized)
|
||||
}
|
||||
|
||||
// startMasterOrDie starts a kubernetes master and an httpserver to handle api requests
|
||||
func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server) {
|
||||
var m *master.Master
|
||||
var s *httptest.Server
|
||||
|
||||
if incomingServer != nil {
|
||||
s = incomingServer
|
||||
} else {
|
||||
s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
m.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
}
|
||||
|
||||
if masterConfig == nil {
|
||||
masterConfig = NewMasterConfig()
|
||||
masterConfig.GenericConfig.EnableProfiling = true
|
||||
masterConfig.GenericConfig.EnableMetrics = true
|
||||
masterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.OpenAPIDefinitions)
|
||||
masterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{
|
||||
InfoProps: spec.InfoProps{
|
||||
Title: "Kubernetes",
|
||||
Version: "unversioned",
|
||||
},
|
||||
}
|
||||
masterConfig.GenericConfig.OpenAPIConfig.DefaultResponse = &spec.Response{
|
||||
ResponseProps: spec.ResponseProps{
|
||||
Description: "Default Response.",
|
||||
},
|
||||
}
|
||||
masterConfig.GenericConfig.OpenAPIConfig.Definitions = openapi.OpenAPIDefinitions
|
||||
masterConfig.GenericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig()
|
||||
}
|
||||
|
||||
// set the loopback client config
|
||||
if masterConfig.GenericConfig.LoopbackClientConfig == nil {
|
||||
masterConfig.GenericConfig.LoopbackClientConfig = &restclient.Config{QPS: 50, Burst: 100, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
|
||||
}
|
||||
masterConfig.GenericConfig.LoopbackClientConfig.Host = s.URL
|
||||
|
||||
privilegedLoopbackToken := uuid.NewRandom().String()
|
||||
// wrap any available authorizer
|
||||
tokens := make(map[string]*user.DefaultInfo)
|
||||
tokens[privilegedLoopbackToken] = &user.DefaultInfo{
|
||||
Name: user.APIServerUser,
|
||||
UID: uuid.NewRandom().String(),
|
||||
Groups: []string{user.SystemPrivilegedGroup},
|
||||
}
|
||||
|
||||
tokenAuthenticator := authenticator.NewAuthenticatorFromTokens(tokens)
|
||||
if masterConfig.GenericConfig.Authenticator == nil {
|
||||
masterConfig.GenericConfig.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty))
|
||||
} else {
|
||||
masterConfig.GenericConfig.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authenticator)
|
||||
}
|
||||
|
||||
if masterConfig.GenericConfig.Authorizer != nil {
|
||||
tokenAuthorizer := authorizer.NewPrivilegedGroups(user.SystemPrivilegedGroup)
|
||||
masterConfig.GenericConfig.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorizer)
|
||||
} else {
|
||||
masterConfig.GenericConfig.Authorizer = alwaysAllow{}
|
||||
}
|
||||
|
||||
masterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken
|
||||
|
||||
m, err := masterConfig.Complete().New()
|
||||
if err != nil {
|
||||
glog.Fatalf("error in bringing up the master: %v", err)
|
||||
}
|
||||
if masterReceiver != nil {
|
||||
masterReceiver.SetMaster(m)
|
||||
}
|
||||
|
||||
// TODO have this start method actually use the normal start sequence for the API server
|
||||
// this method never actually calls the `Run` method for the API server
|
||||
// fire the post hooks ourselves
|
||||
m.GenericAPIServer.PrepareRun()
|
||||
m.GenericAPIServer.RunPostStartHooks()
|
||||
|
||||
cfg := *masterConfig.GenericConfig.LoopbackClientConfig
|
||||
cfg.ContentConfig.GroupVersion = &schema.GroupVersion{}
|
||||
privilegedClient, err := restclient.RESTClientFor(&cfg)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
result := privilegedClient.Get().AbsPath("/healthz").Do()
|
||||
status := 0
|
||||
result.StatusCode(&status)
|
||||
if status == 200 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
// wait for services to be ready
|
||||
if masterConfig.EnableCoreControllers {
|
||||
// TODO Once /healthz is updated for posthooks, we'll wait for good health
|
||||
coreClient := coreclient.NewForConfigOrDie(&cfg)
|
||||
svcWatch, err := coreClient.Services(v1.NamespaceDefault).Watch(v1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
_, err = watch.Until(30*time.Second, svcWatch, func(event watch.Event) (bool, error) {
|
||||
if event.Type != watch.Added {
|
||||
return false, nil
|
||||
}
|
||||
if event.Object.(*v1.Service).Name == "kubernetes" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return m, s
|
||||
}
|
||||
|
||||
func parseCIDROrDie(cidr string) *net.IPNet {
|
||||
_, parsed, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
glog.Fatalf("error while parsing CIDR: %s", cidr)
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
// return the EtcdURL
|
||||
func GetEtcdURLFromEnv() string {
|
||||
url := env.GetEnvAsStringOrFallback("KUBE_INTEGRATION_ETCD_URL", "http://127.0.0.1:2379")
|
||||
glog.V(4).Infof("Using KUBE_INTEGRATION_ETCD_URL=%q", url)
|
||||
return url
|
||||
}
|
||||
|
||||
// Returns a basic master config.
|
||||
func NewMasterConfig() *master.Config {
|
||||
config := storagebackend.Config{
|
||||
ServerList: []string{GetEtcdURLFromEnv()},
|
||||
// This causes the integration tests to exercise the etcd
|
||||
// prefix code, so please don't change without ensuring
|
||||
// sufficient coverage in other ways.
|
||||
Prefix: uuid.New(),
|
||||
}
|
||||
|
||||
info, _ := runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON)
|
||||
ns := NewSingleContentTypeSerializer(api.Scheme, info)
|
||||
|
||||
storageFactory := genericapiserver.NewDefaultStorageFactory(config, runtime.ContentTypeJSON, ns, genericapiserver.NewDefaultResourceEncodingConfig(), master.DefaultAPIResourceConfigSource())
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: v1.GroupName, Resource: genericapiserver.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: autoscaling.GroupName, Resource: genericapiserver.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: batch.GroupName, Resource: genericapiserver.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: apps.GroupName, Resource: genericapiserver.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: extensions.GroupName, Resource: genericapiserver.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: policy.GroupName, Resource: genericapiserver.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: rbac.GroupName, Resource: genericapiserver.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: certificates.GroupName, Resource: genericapiserver.AllResources},
|
||||
"",
|
||||
ns)
|
||||
storageFactory.SetSerializer(
|
||||
schema.GroupResource{Group: storage.GroupName, Resource: genericapiserver.AllResources},
|
||||
"",
|
||||
ns)
|
||||
|
||||
genericConfig := genericapiserver.NewConfig()
|
||||
kubeVersion := version.Get()
|
||||
genericConfig.Version = &kubeVersion
|
||||
genericConfig.Authorizer = authorizer.NewAlwaysAllowAuthorizer()
|
||||
genericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
genericConfig.EnableMetrics = true
|
||||
|
||||
return &master.Config{
|
||||
GenericConfig: genericConfig,
|
||||
APIResourceConfigSource: master.DefaultAPIResourceConfigSource(),
|
||||
StorageFactory: storageFactory,
|
||||
EnableCoreControllers: true,
|
||||
EnableWatchCache: true,
|
||||
KubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250},
|
||||
APIServerServicePort: 443,
|
||||
MasterCount: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the master config appropriate for most integration tests.
|
||||
func NewIntegrationTestMasterConfig() *master.Config {
|
||||
masterConfig := NewMasterConfig()
|
||||
masterConfig.EnableCoreControllers = true
|
||||
masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
|
||||
masterConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()
|
||||
return masterConfig
|
||||
}
|
||||
|
||||
func (m *MasterComponents) stopRCManager() {
|
||||
close(m.rcStopCh)
|
||||
}
|
||||
|
||||
func (m *MasterComponents) Stop(apiServer, rcManager bool) {
|
||||
glog.Infof("Stopping master components")
|
||||
if rcManager {
|
||||
// Ordering matters because the apiServer will only shutdown when pending
|
||||
// requests are done
|
||||
m.once.Do(m.stopRCManager)
|
||||
}
|
||||
if apiServer {
|
||||
m.ApiServer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *testing.T) *v1.Namespace {
|
||||
// TODO: Create a namespace with a given basename.
|
||||
// Currently we neither create the namespace nor delete all its contents at the end.
|
||||
// But as long as tests are not using the same namespaces, this should work fine.
|
||||
return &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
// TODO: Once we start creating namespaces, switch to GenerateName.
|
||||
Name: baseName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteTestingNamespace(ns *v1.Namespace, apiserver *httptest.Server, t *testing.T) {
|
||||
// TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace.
|
||||
}
|
||||
|
||||
// RCFromManifest reads a .json file and returns the rc in it.
|
||||
func RCFromManifest(fileName string) *v1.ReplicationController {
|
||||
data, err := ioutil.ReadFile(fileName)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
||||
}
|
||||
var controller v1.ReplicationController
|
||||
if err := runtime.DecodeInto(testapi.Default.Codec(), data, &controller); err != nil {
|
||||
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
||||
}
|
||||
return &controller
|
||||
}
|
||||
|
||||
// StopRC stops the rc via kubectl's stop library
|
||||
func StopRC(rc *v1.ReplicationController, clientset internalclientset.Interface) error {
|
||||
reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientset)
|
||||
if err != nil || reaper == nil {
|
||||
return err
|
||||
}
|
||||
err = reaper.Stop(rc.Namespace, rc.Name, 0, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ScaleRC scales the given rc to the given replicas.
|
||||
func ScaleRC(name, ns string, replicas int32, clientset internalclientset.Interface) (*api.ReplicationController, error) {
|
||||
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
retry := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
|
||||
waitForReplicas := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
|
||||
err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scaled, err := clientset.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return scaled, nil
|
||||
}
|
||||
|
||||
func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server) {
|
||||
if masterConfig == nil {
|
||||
masterConfig = NewMasterConfig()
|
||||
masterConfig.GenericConfig.EnableProfiling = true
|
||||
masterConfig.GenericConfig.EnableMetrics = true
|
||||
}
|
||||
return startMasterOrDie(masterConfig, nil, nil)
|
||||
}
|
||||
|
||||
func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server) {
|
||||
return startMasterOrDie(masterConfig, s, masterReceiver)
|
||||
}
|
||||
|
||||
// Task is a function passed to worker goroutines by RunParallel.
|
||||
// The function needs to implement its own thread safety.
|
||||
type Task func(id int) error
|
||||
|
||||
// RunParallel spawns a goroutine per task in the given queue
|
||||
func RunParallel(task Task, numTasks, numWorkers int) {
|
||||
start := time.Now()
|
||||
if numWorkers <= 0 {
|
||||
numWorkers = numTasks
|
||||
}
|
||||
defer func() {
|
||||
glog.Infof("RunParallel took %v for %d tasks and %d workers", time.Since(start), numTasks, numWorkers)
|
||||
}()
|
||||
var wg sync.WaitGroup
|
||||
semCh := make(chan struct{}, numWorkers)
|
||||
wg.Add(numTasks)
|
||||
for id := 0; id < numTasks; id++ {
|
||||
go func(id int) {
|
||||
semCh <- struct{}{}
|
||||
err := task(id)
|
||||
if err != nil {
|
||||
glog.Fatalf("Worker failed with %v", err)
|
||||
}
|
||||
<-semCh
|
||||
wg.Done()
|
||||
}(id)
|
||||
}
|
||||
wg.Wait()
|
||||
close(semCh)
|
||||
}
|
103
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
Normal file
103
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
e2eframework "k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
retries = 5
|
||||
)
|
||||
|
||||
type IntegrationTestNodePreparer struct {
|
||||
client clientset.Interface
|
||||
countToStrategy []testutils.CountToStrategy
|
||||
nodeNamePrefix string
|
||||
}
|
||||
|
||||
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeNamePrefix string) testutils.TestNodePreparer {
|
||||
return &IntegrationTestNodePreparer{
|
||||
client: client,
|
||||
countToStrategy: countToStrategy,
|
||||
nodeNamePrefix: nodeNamePrefix,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
numNodes := 0
|
||||
for _, v := range p.countToStrategy {
|
||||
numNodes += v.Count
|
||||
}
|
||||
|
||||
glog.Infof("Making %d nodes", numNodes)
|
||||
baseNode := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: p.nodeNamePrefix,
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
// TODO: investigate why this is needed.
|
||||
ExternalID: "foo",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("32Gi"),
|
||||
},
|
||||
Phase: v1.NodeRunning,
|
||||
Conditions: []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
}
|
||||
for i := 0; i < numNodes; i++ {
|
||||
if _, err := p.client.Core().Nodes().Create(baseNode); err != nil {
|
||||
glog.Fatalf("Error creating node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
|
||||
index := 0
|
||||
sum := 0
|
||||
for _, v := range p.countToStrategy {
|
||||
sum += v.Count
|
||||
for ; index < sum; index++ {
|
||||
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
|
||||
glog.Errorf("Aborting node preparation: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
|
||||
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
|
||||
for i := range nodes.Items {
|
||||
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &v1.DeleteOptions{}); err != nil {
|
||||
glog.Errorf("Error while deleting Node: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
53
vendor/k8s.io/kubernetes/test/integration/framework/serializer.go
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/test/integration/framework/serializer.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
|
||||
)
|
||||
|
||||
// NewSingleContentTypeSerializer wraps a serializer in a NegotiatedSerializer that handles one content type
|
||||
func NewSingleContentTypeSerializer(scheme *runtime.Scheme, info runtime.SerializerInfo) runtime.StorageSerializer {
|
||||
return &wrappedSerializer{
|
||||
scheme: scheme,
|
||||
info: info,
|
||||
}
|
||||
}
|
||||
|
||||
type wrappedSerializer struct {
|
||||
scheme *runtime.Scheme
|
||||
info runtime.SerializerInfo
|
||||
}
|
||||
|
||||
var _ runtime.StorageSerializer = &wrappedSerializer{}
|
||||
|
||||
func (s *wrappedSerializer) SupportedMediaTypes() []runtime.SerializerInfo {
|
||||
return []runtime.SerializerInfo{s.info}
|
||||
}
|
||||
|
||||
func (s *wrappedSerializer) UniversalDeserializer() runtime.Decoder {
|
||||
return s.info.Serializer
|
||||
}
|
||||
|
||||
func (s *wrappedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
|
||||
return versioning.NewCodec(encoder, nil, s.scheme, s.scheme, s.scheme, s.scheme, s.scheme, gv, nil)
|
||||
}
|
||||
|
||||
func (s *wrappedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder {
|
||||
return versioning.NewCodec(nil, decoder, s.scheme, s.scheme, s.scheme, s.scheme, s.scheme, nil, gv)
|
||||
}
|
483
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
Normal file
483
vendor/k8s.io/kubernetes/test/integration/garbagecollector/garbage_collector_test.go
generated
vendored
Normal file
|
@ -0,0 +1,483 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package garbagecollector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/typed/discovery"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func getOrphanOptions() *v1.DeleteOptions {
|
||||
var trueVar = true
|
||||
return &v1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
}
|
||||
|
||||
func getNonOrphanOptions() *v1.DeleteOptions {
|
||||
var falseVar = false
|
||||
return &v1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
}
|
||||
|
||||
const garbageCollectedPodName = "test.pod.1"
|
||||
const independentPodName = "test.pod.2"
|
||||
const oneValidOwnerPodName = "test.pod.3"
|
||||
const toBeDeletedRCName = "test.rc.1"
|
||||
const remainingRCName = "test.rc.2"
|
||||
|
||||
func newPod(podName, podNamespace string, ownerReferences []metav1.OwnerReference) *v1.Pod {
|
||||
for i := 0; i < len(ownerReferences); i++ {
|
||||
if len(ownerReferences[i].Kind) == 0 {
|
||||
ownerReferences[i].Kind = "ReplicationController"
|
||||
}
|
||||
ownerReferences[i].APIVersion = "v1"
|
||||
}
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
OwnerReferences: ownerReferences,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newOwnerRC(name, namespace string) *v1.ReplicationController {
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{"name": "test"},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{"name": "test"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func setup(t *testing.T) (*httptest.Server, *garbagecollector.GarbageCollector, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.EnableCoreControllers = false
|
||||
masterConfig.GenericConfig.EnableGarbageCollection = true
|
||||
_, s := framework.RunAMaster(masterConfig)
|
||||
|
||||
clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL})
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
preferredResources, err := clientSet.Discovery().ServerPreferredResources()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get supported resources from server: %v", err)
|
||||
}
|
||||
deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete"}}, preferredResources)
|
||||
deletableGroupVersionResources, err := discovery.GroupVersionResources(deletableResources)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse supported resources from server: %v", err)
|
||||
}
|
||||
config := &restclient.Config{Host: s.URL}
|
||||
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
|
||||
metaOnlyClientPool := dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
config.ContentConfig.NegotiatedSerializer = nil
|
||||
clientPool := dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
gc, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, api.Registry.RESTMapper(), deletableGroupVersionResources)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create garbage collector")
|
||||
}
|
||||
return s, gc, clientSet
|
||||
}
|
||||
|
||||
// This test simulates the cascading deletion.
|
||||
func TestCascadingDeletion(t *testing.T) {
|
||||
glog.V(6).Infof("TestCascadingDeletion starts")
|
||||
defer glog.V(6).Infof("TestCascadingDeletion ends")
|
||||
s, gc, clientSet := setup(t)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("gc-cascading-deletion", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
|
||||
toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
remainingRC, err := rcClient.Create(newOwnerRC(remainingRCName, ns.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
|
||||
rcs, err := rcClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list replication controllers: %v", err)
|
||||
}
|
||||
if len(rcs.Items) != 2 {
|
||||
t.Fatalf("Expect only 2 replication controller")
|
||||
}
|
||||
|
||||
// this pod should be cascadingly deleted.
|
||||
pod := newPod(garbageCollectedPodName, ns.Name, []metav1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}})
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
|
||||
// this pod shouldn't be cascadingly deleted, because it has a valid reference.
|
||||
pod = newPod(oneValidOwnerPodName, ns.Name, []metav1.OwnerReference{
|
||||
{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName},
|
||||
{UID: remainingRC.ObjectMeta.UID, Name: remainingRCName},
|
||||
})
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
|
||||
// this pod shouldn't be cascadingly deleted, because it doesn't have an owner.
|
||||
pod = newPod(independentPodName, ns.Name, []metav1.OwnerReference{})
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
|
||||
// set up watch
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list pods: %v", err)
|
||||
}
|
||||
if len(pods.Items) != 3 {
|
||||
t.Fatalf("Expect only 3 pods")
|
||||
}
|
||||
stopCh := make(chan struct{})
|
||||
go gc.Run(5, stopCh)
|
||||
defer close(stopCh)
|
||||
// delete one of the replication controller
|
||||
if err := rcClient.Delete(toBeDeletedRCName, getNonOrphanOptions()); err != nil {
|
||||
t.Fatalf("failed to delete replication controller: %v", err)
|
||||
}
|
||||
// sometimes the deletion of the RC takes long time to be observed by
|
||||
// the gc, so wait for the garbage collector to observe the deletion of
|
||||
// the toBeDeletedRC
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return !gc.GraphHasUID([]types.UID{toBeDeletedRC.ObjectMeta.UID}), nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := integration.WaitForPodToDisappear(podClient, garbageCollectedPodName, 5*time.Second, 30*time.Second); err != nil {
|
||||
t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err)
|
||||
}
|
||||
// checks the garbage collect doesn't delete pods it shouldn't delete.
|
||||
if _, err := podClient.Get(independentPodName, metav1.GetOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := podClient.Get(oneValidOwnerPodName, metav1.GetOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// This test simulates the case where an object is created with an owner that
|
||||
// doesn't exist. It verifies the GC will delete such an object.
|
||||
func TestCreateWithNonExistentOwner(t *testing.T) {
|
||||
s, gc, clientSet := setup(t)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("gc-non-existing-owner", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
|
||||
pod := newPod(garbageCollectedPodName, ns.Name, []metav1.OwnerReference{{UID: "doesn't matter", Name: toBeDeletedRCName}})
|
||||
_, err := podClient.Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
|
||||
// set up watch
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list pods: %v", err)
|
||||
}
|
||||
if len(pods.Items) != 1 {
|
||||
t.Fatalf("Expect only 1 pod")
|
||||
}
|
||||
stopCh := make(chan struct{})
|
||||
go gc.Run(5, stopCh)
|
||||
defer close(stopCh)
|
||||
// wait for the garbage collector to delete the pod
|
||||
if err := integration.WaitForPodToDisappear(podClient, garbageCollectedPodName, 5*time.Second, 30*time.Second); err != nil {
|
||||
t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err)
|
||||
}
|
||||
}
|
||||
|
||||
func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options *v1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) {
|
||||
defer wg.Done()
|
||||
rcClient := clientSet.Core().ReplicationControllers(namespace)
|
||||
podClient := clientSet.Core().Pods(namespace)
|
||||
// create rc.
|
||||
rcName := "test.rc." + nameSuffix
|
||||
rc := newOwnerRC(rcName, namespace)
|
||||
rc.ObjectMeta.Finalizers = initialFinalizers
|
||||
rc, err := rcClient.Create(rc)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
rcUIDs <- rc.ObjectMeta.UID
|
||||
// create pods.
|
||||
var podUIDs []types.UID
|
||||
for j := 0; j < 3; j++ {
|
||||
podName := "test.pod." + nameSuffix + "-" + strconv.Itoa(j)
|
||||
pod := newPod(podName, namespace, []metav1.OwnerReference{{UID: rc.ObjectMeta.UID, Name: rc.ObjectMeta.Name}})
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
podUIDs = append(podUIDs, pod.ObjectMeta.UID)
|
||||
}
|
||||
orphan := (options != nil && options.OrphanDependents != nil && *options.OrphanDependents) || (options == nil && len(initialFinalizers) != 0 && initialFinalizers[0] == api.FinalizerOrphan)
|
||||
// if we intend to orphan the pods, we need wait for the gc to observe the
|
||||
// creation of the pods, otherwise if the deletion of RC is observed before
|
||||
// the creation of the pods, the pods will not be orphaned.
|
||||
if orphan {
|
||||
wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { return gc.GraphHasUID(podUIDs), nil })
|
||||
}
|
||||
// delete the rc
|
||||
if err := rcClient.Delete(rc.ObjectMeta.Name, options); err != nil {
|
||||
t.Fatalf("failed to delete replication controller: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) {
|
||||
rcClient := clientSet.Core().ReplicationControllers(namespace)
|
||||
podClient := clientSet.Core().Pods(namespace)
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
var ret = true
|
||||
if len(pods.Items) != podNum {
|
||||
ret = false
|
||||
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
|
||||
}
|
||||
rcs, err := rcClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
|
||||
}
|
||||
if len(rcs.Items) != rcNum {
|
||||
ret = false
|
||||
t.Logf("expect %d RCs, got %d RCs", rcNum, len(rcs.Items))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// The stress test is not very stressful, because we need to control the running
|
||||
// time of our pre-submit tests to increase submit-queue throughput. We'll add
|
||||
// e2e tests that put more stress.
|
||||
func TestStressingCascadingDeletion(t *testing.T) {
|
||||
t.Logf("starts garbage collector stress test")
|
||||
s, gc, clientSet := setup(t)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("gc-stressing-cascading-deletion", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go gc.Run(5, stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
const collections = 10
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(collections * 4)
|
||||
rcUIDs := make(chan types.UID, collections*4)
|
||||
for i := 0; i < collections; i++ {
|
||||
// rc is created with empty finalizers, deleted with nil delete options, pods will remain.
|
||||
go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, nil, &wg, rcUIDs)
|
||||
// rc is created with the orphan finalizer, deleted with nil options, pods will remain.
|
||||
go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{api.FinalizerOrphan}, nil, &wg, rcUIDs)
|
||||
// rc is created with the orphan finalizer, deleted with DeleteOptions.OrphanDependents=false, pods will be deleted.
|
||||
go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), ns.Name, []string{api.FinalizerOrphan}, getNonOrphanOptions(), &wg, rcUIDs)
|
||||
// rc is created with empty finalizers, deleted with DeleteOptions.OrphanDependents=true, pods will remain.
|
||||
go setupRCsPods(t, gc, clientSet, "collection4-"+strconv.Itoa(i), ns.Name, []string{}, getOrphanOptions(), &wg, rcUIDs)
|
||||
}
|
||||
wg.Wait()
|
||||
t.Logf("all pods are created, all replications controllers are created then deleted")
|
||||
// wait for the RCs and Pods to reach the expected numbers.
|
||||
if err := wait.Poll(5*time.Second, 300*time.Second, func() (bool, error) {
|
||||
podsInEachCollection := 3
|
||||
// see the comments on the calls to setupRCsPods for details
|
||||
remainingGroups := 3
|
||||
return verifyRemainingObjects(t, clientSet, ns.Name, 0, collections*podsInEachCollection*remainingGroups)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("number of remaining replication controllers and pods are as expected")
|
||||
|
||||
// verify the remaining pods all have "orphan" in their names.
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if !strings.Contains(pod.ObjectMeta.Name, "collection1-") && !strings.Contains(pod.ObjectMeta.Name, "collection2-") && !strings.Contains(pod.ObjectMeta.Name, "collection4-") {
|
||||
t.Errorf("got unexpected remaining pod: %#v", pod)
|
||||
}
|
||||
}
|
||||
|
||||
// verify there is no node representing replication controllers in the gc's graph
|
||||
uids := make([]types.UID, 0, collections)
|
||||
for i := 0; i < collections; i++ {
|
||||
uid := <-rcUIDs
|
||||
uids = append(uids, uid)
|
||||
}
|
||||
if gc.GraphHasUID(uids) {
|
||||
t.Errorf("Expect all nodes representing replication controllers are removed from the Propagator's graph")
|
||||
}
|
||||
metric := &dto.Metric{}
|
||||
garbagecollector.EventProcessingLatency.Write(metric)
|
||||
count := float64(metric.Summary.GetSampleCount())
|
||||
sum := metric.Summary.GetSampleSum()
|
||||
t.Logf("Average time spent in GC's eventQueue is %.1f microseconds", sum/count)
|
||||
garbagecollector.DirtyProcessingLatency.Write(metric)
|
||||
count = float64(metric.Summary.GetSampleCount())
|
||||
sum = metric.Summary.GetSampleSum()
|
||||
t.Logf("Average time spent in GC's dirtyQueue is %.1f microseconds", sum/count)
|
||||
garbagecollector.OrphanProcessingLatency.Write(metric)
|
||||
count = float64(metric.Summary.GetSampleCount())
|
||||
sum = metric.Summary.GetSampleSum()
|
||||
t.Logf("Average time spent in GC's orphanQueue is %.1f microseconds", sum/count)
|
||||
}
|
||||
|
||||
func TestOrphaning(t *testing.T) {
|
||||
s, gc, clientSet := setup(t)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("gc-orphaning", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
|
||||
// create the RC with the orphan finalizer set
|
||||
toBeDeletedRC := newOwnerRC(toBeDeletedRCName, ns.Name)
|
||||
toBeDeletedRC, err := rcClient.Create(toBeDeletedRC)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
|
||||
// these pods should be orphaned.
|
||||
var podUIDs []types.UID
|
||||
podsNum := 3
|
||||
for i := 0; i < podsNum; i++ {
|
||||
podName := garbageCollectedPodName + strconv.Itoa(i)
|
||||
pod := newPod(podName, ns.Name, []metav1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}})
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
podUIDs = append(podUIDs, pod.ObjectMeta.UID)
|
||||
}
|
||||
stopCh := make(chan struct{})
|
||||
go gc.Run(5, stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
// we need wait for the gc to observe the creation of the pods, otherwise if
|
||||
// the deletion of RC is observed before the creation of the pods, the pods
|
||||
// will not be orphaned.
|
||||
wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { return gc.GraphHasUID(podUIDs), nil })
|
||||
|
||||
err = rcClient.Delete(toBeDeletedRCName, getOrphanOptions())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to gracefully delete the rc: %v", err)
|
||||
}
|
||||
// verify the toBeDeleteRC is deleted
|
||||
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
rcs, err := rcClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(rcs.Items) == 0 {
|
||||
t.Logf("Still has %d RCs", len(rcs.Items))
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// verify pods don't have the ownerPod as an owner anymore
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list pods: %v", err)
|
||||
}
|
||||
if len(pods.Items) != podsNum {
|
||||
t.Errorf("Expect %d pod(s), but got %#v", podsNum, pods)
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if len(pod.ObjectMeta.OwnerReferences) != 0 {
|
||||
t.Errorf("pod %s still has non-empty OwnerReferences: %v", pod.ObjectMeta.Name, pod.ObjectMeta.OwnerReferences)
|
||||
}
|
||||
}
|
||||
}
|
78
vendor/k8s.io/kubernetes/test/integration/kubectl/kubectl_test.go
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/test/integration/kubectl/kubectl_test.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubectl
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestKubectlValidation(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data string
|
||||
// Validation should not fail on missing type information.
|
||||
err bool
|
||||
}{
|
||||
{`{"apiVersion": "v1", "kind": "thisObjectShouldNotExistInAnyGroup"}`, true},
|
||||
{`{"apiVersion": "invalidVersion", "kind": "Pod"}`, false},
|
||||
{`{"apiVersion": "v1", "kind": "Pod"}`, false},
|
||||
|
||||
// The following test the experimental api.
|
||||
// TODO: Replace with something more robust. These may move.
|
||||
{`{"apiVersion": "extensions/v1beta1", "kind": "Ingress"}`, false},
|
||||
{`{"apiVersion": "extensions/v1beta1", "kind": "DaemonSet"}`, false},
|
||||
{`{"apiVersion": "vNotAVersion", "kind": "DaemonSet"}`, false},
|
||||
}
|
||||
components := framework.NewMasterComponents(&framework.Config{})
|
||||
defer components.Stop(true, true)
|
||||
ctx := clientcmdapi.NewContext()
|
||||
cfg := clientcmdapi.NewConfig()
|
||||
cluster := clientcmdapi.NewCluster()
|
||||
|
||||
cluster.Server = components.ApiServer.URL
|
||||
cluster.InsecureSkipTLSVerify = true
|
||||
cfg.Contexts = map[string]*clientcmdapi.Context{"test": ctx}
|
||||
cfg.CurrentContext = "test"
|
||||
overrides := clientcmd.ConfigOverrides{
|
||||
ClusterInfo: *cluster,
|
||||
}
|
||||
cmdConfig := clientcmd.NewNonInteractiveClientConfig(*cfg, "test", &overrides, nil)
|
||||
factory := util.NewFactory(cmdConfig)
|
||||
schema, err := factory.Validator(true, "")
|
||||
if err != nil {
|
||||
t.Errorf("failed to get validator: %v", err)
|
||||
return
|
||||
}
|
||||
for i, test := range testCases {
|
||||
err := schema.ValidateBytes([]byte(test.data))
|
||||
if err == nil {
|
||||
if test.err {
|
||||
t.Errorf("case %d: expected error", i)
|
||||
}
|
||||
} else {
|
||||
if !test.err {
|
||||
t.Errorf("case %d: unexpected error: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
201
vendor/k8s.io/kubernetes/test/integration/master/master_benchmark_test.go
generated
vendored
Normal file
201
vendor/k8s.io/kubernetes/test/integration/master/master_benchmark_test.go
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
// +build benchmark,!no-etcd,!integration
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// Command line flag globals, parsed in init and used by the benchmarks:
|
||||
// * pods && !tasks: Start -pods, scale number of parallel tasks with b.N
|
||||
// * !pods && tasks: Start -tasks, scale pods with b.N
|
||||
// * pods && tasks: Ignore b.N, benchmark behaves like a test constrained by -benchtime.
|
||||
// * !pods && !tasks: scale pods and workers with b.N.
|
||||
// -workers specifies the number of workers to shard tasks across.
|
||||
// Eg: go test bench . -bench-pods 3000 -bench-tasks 100 -bench-tasks 10:
|
||||
// Create 100 tasks each listing 3000 pods, and run them 10 at a time.
|
||||
var (
|
||||
Workers int
|
||||
Pods int
|
||||
Tasks int
|
||||
)
|
||||
|
||||
const Glog_fatalf = 3
|
||||
|
||||
func init() {
|
||||
q := flag.Int("bench-quiet", 3, "Quietness, don't glog severities <= given level during the benchmark.")
|
||||
pods := flag.Int("bench-pods", -1, "Number of pods for the benchmark. If unspecified, uses b.N.")
|
||||
workers := flag.Int("bench-workers", -1, "Number workers for the benchmark. If unspecified, uses tasks.")
|
||||
tasks := flag.Int("bench-tasks", -1, "Number of tasks for the benchmark. These tasks are sharded across the workers. If unspecified, uses b.N.")
|
||||
flag.Parse()
|
||||
|
||||
// Unfortunately this v level goes in the opposite direction as stderrthreshold.
|
||||
flag.Set("v", fmt.Sprintf("%d", *q))
|
||||
|
||||
// We need quiet logs to parse benchmark results, which includes Errorf.
|
||||
flag.Set("logtostderr", "false")
|
||||
flag.Set("stderrthreshold", fmt.Sprintf("%d", Glog_fatalf-*q))
|
||||
Pods = *pods
|
||||
Workers = *workers
|
||||
Tasks = *tasks
|
||||
}
|
||||
|
||||
// getPods returns the cmd line -pods or b.N if -pods wasn't specified.
|
||||
// Benchmarks can call getPods to get the number of pods they need to
|
||||
// create for a given benchmark.
|
||||
func getPods(bN int) int {
|
||||
if Pods < 0 {
|
||||
return bN
|
||||
}
|
||||
return Pods
|
||||
}
|
||||
|
||||
// getTasks returns the cmd line -workers or b.N if -workers wasn't specified.
|
||||
// Benchmarks would call getTasks to get the number of workers required to
|
||||
// perform the benchmark in parallel.
|
||||
func getTasks(bN int) int {
|
||||
if Tasks < 0 {
|
||||
return bN
|
||||
}
|
||||
return Tasks
|
||||
}
|
||||
|
||||
// getIterations returns the number of iterations required by each benchmark for
|
||||
// go to produce reliable timing results.
|
||||
func getIterations(bN int) int {
|
||||
// Anything with constant pods is only linear if we iterate b.N times.
|
||||
if Pods > 0 {
|
||||
return bN
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// startPodsOnNodes creates numPods sharded across numNodes
|
||||
func startPodsOnNodes(ns string, numPods, numNodes int, restClient clientset.Interface) {
|
||||
podsPerNode := numPods / numNodes
|
||||
if podsPerNode < 1 {
|
||||
podsPerNode = 1
|
||||
}
|
||||
framework.RunParallel(func(id int) error {
|
||||
return framework.StartPods(podsPerNode, fmt.Sprintf("host.%d", id), restClient)
|
||||
}, numNodes, -1)
|
||||
}
|
||||
|
||||
// Benchmark pod listing by waiting on `Tasks` listers to list `Pods` pods via `Workers`.
|
||||
func BenchmarkPodList(b *testing.B) {
|
||||
b.StopTimer()
|
||||
m := framework.NewMasterComponents(&framework.Config{nil, true, false, 250.0, 500})
|
||||
defer m.Stop(true, true)
|
||||
|
||||
ns := framework.CreateTestingNamespace("benchmark-pod-list", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
numPods, numTasks, iter := getPods(b.N), getTasks(b.N), getIterations(b.N)
|
||||
podsPerNode := numPods / numTasks
|
||||
if podsPerNode < 1 {
|
||||
podsPerNode = 1
|
||||
}
|
||||
glog.Infof("Starting benchmark: b.N %d, pods %d, workers %d, podsPerNode %d",
|
||||
b.N, numPods, numTasks, podsPerNode)
|
||||
|
||||
startPodsOnNodes(ns.Name, numPods, numTasks, m.RestClient)
|
||||
// Stop the rc manager so it doesn't steal resources
|
||||
m.Stop(false, true)
|
||||
|
||||
b.StartTimer()
|
||||
for i := 0; i < iter; i++ {
|
||||
framework.RunParallel(func(id int) error {
|
||||
host := fmt.Sprintf("host.%d", id)
|
||||
now := time.Now()
|
||||
defer func() {
|
||||
glog.V(3).Infof("Worker %d: Node %v listing pods took %v", id, host, time.Since(now))
|
||||
}()
|
||||
if pods, err := m.ClientSet.Core().Pods(ns.Name).List(api.ListOptions{
|
||||
LabelSelector: labels.Everything(),
|
||||
FieldSelector: fields.OneTermEqualSelector(api.PodHostField, host),
|
||||
}); err != nil {
|
||||
return err
|
||||
} else if len(pods.Items) < podsPerNode {
|
||||
glog.Fatalf("List retrieved %d pods, which is less than %d", len(pods.Items), podsPerNode)
|
||||
}
|
||||
return nil
|
||||
}, numTasks, Workers)
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
// Benchmark pod listing by waiting on `Tasks` listers to list `Pods` pods via `Workers`.
|
||||
func BenchmarkPodListEtcd(b *testing.B) {
|
||||
b.StopTimer()
|
||||
m := framework.NewMasterComponents(&framework.Config{nil, true, false, 250.0, 500})
|
||||
defer m.Stop(true, true)
|
||||
|
||||
ns := framework.CreateTestingNamespace("benchmark-pod-list-etcd", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
numPods, numTasks, iter := getPods(b.N), getTasks(b.N), getIterations(b.N)
|
||||
podsPerNode := numPods / numTasks
|
||||
if podsPerNode < 1 {
|
||||
podsPerNode = 1
|
||||
}
|
||||
|
||||
startPodsOnNodes(ns.Name, numPods, numTasks, m.RestClient)
|
||||
// Stop the rc manager so it doesn't steal resources
|
||||
m.Stop(false, true)
|
||||
|
||||
glog.Infof("Starting benchmark: b.N %d, pods %d, workers %d, podsPerNode %d",
|
||||
b.N, numPods, numTasks, podsPerNode)
|
||||
|
||||
b.StartTimer()
|
||||
for i := 0; i < iter; i++ {
|
||||
framework.RunParallel(func(id int) error {
|
||||
now := time.Now()
|
||||
defer func() {
|
||||
glog.V(3).Infof("Worker %d: listing pods took %v", id, time.Since(now))
|
||||
}()
|
||||
pods, err := m.ClientSet.Core().Pods(ns.Name).List(api.ListOptions{
|
||||
LabelSelector: labels.Everything(),
|
||||
FieldSelector: fields.Everything(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(pods.Items) < numPods {
|
||||
glog.Fatalf("List retrieved %d pods, which is less than %d", len(pods.Items), numPods)
|
||||
}
|
||||
return nil
|
||||
}, numTasks, Workers)
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
os.Exit(m.Run())
|
||||
}
|
536
vendor/k8s.io/kubernetes/test/integration/master/master_test.go
generated
vendored
Normal file
536
vendor/k8s.io/kubernetes/test/integration/master/master_test.go
generated
vendored
Normal file
|
@ -0,0 +1,536 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clienttypedv1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func testPrefix(t *testing.T, prefix string) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
resp, err := http.Get(s.URL + prefix)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting %s prefix: %v", prefix, err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoscalingPrefix(t *testing.T) {
|
||||
testPrefix(t, "/apis/autoscaling/")
|
||||
}
|
||||
|
||||
func TestBatchPrefix(t *testing.T) {
|
||||
testPrefix(t, "/apis/batch/")
|
||||
}
|
||||
|
||||
func TestAppsPrefix(t *testing.T) {
|
||||
testPrefix(t, "/apis/apps/")
|
||||
}
|
||||
|
||||
func TestExtensionsPrefix(t *testing.T) {
|
||||
testPrefix(t, "/apis/extensions/")
|
||||
}
|
||||
|
||||
func TestEmptyList(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
u := s.URL + "/api/v1/namespaces/default/pods"
|
||||
resp, err := http.Get(u)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting %s: %v", u, err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
data, _ := ioutil.ReadAll(resp.Body)
|
||||
decodedData := map[string]interface{}{}
|
||||
if err := json.Unmarshal(data, &decodedData); err != nil {
|
||||
t.Logf("body: %s", string(data))
|
||||
t.Fatalf("got error decoding data: %v", err)
|
||||
}
|
||||
if items, ok := decodedData["items"]; !ok {
|
||||
t.Logf("body: %s", string(data))
|
||||
t.Fatalf("missing items field in empty list (all lists should return an items field)")
|
||||
} else if items == nil {
|
||||
t.Logf("body: %s", string(data))
|
||||
t.Fatalf("nil items field from empty list (all lists should return non-nil empty items lists)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchSucceedsWithoutArgs(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
resp, err := http.Get(s.URL + "/api/v1/namespaces?watch=1")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting experimental prefix: %v", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
var hpaV1 string = `
|
||||
{
|
||||
"apiVersion": "autoscaling/v1",
|
||||
"kind": "HorizontalPodAutoscaler",
|
||||
"metadata": {
|
||||
"name": "test-hpa",
|
||||
"namespace": "default"
|
||||
},
|
||||
"spec": {
|
||||
"scaleTargetRef": {
|
||||
"kind": "ReplicationController",
|
||||
"name": "test-hpa",
|
||||
"namespace": "default"
|
||||
},
|
||||
"minReplicas": 1,
|
||||
"maxReplicas": 10,
|
||||
"targetCPUUtilizationPercentage": 50
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
func autoscalingPath(resource, namespace, name string) string {
|
||||
return testapi.Autoscaling.ResourcePath(resource, namespace, name)
|
||||
}
|
||||
|
||||
func batchPath(resource, namespace, name string) string {
|
||||
return testapi.Batch.ResourcePath(resource, namespace, name)
|
||||
}
|
||||
|
||||
func extensionsPath(resource, namespace, name string) string {
|
||||
return testapi.Extensions.ResourcePath(resource, namespace, name)
|
||||
}
|
||||
|
||||
func TestAutoscalingGroupBackwardCompatibility(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
transport := http.DefaultTransport
|
||||
|
||||
requests := []struct {
|
||||
verb string
|
||||
URL string
|
||||
body string
|
||||
expectedStatusCodes map[int]bool
|
||||
expectedVersion string
|
||||
}{
|
||||
{"POST", autoscalingPath("horizontalpodautoscalers", api.NamespaceDefault, ""), hpaV1, integration.Code201, ""},
|
||||
{"GET", autoscalingPath("horizontalpodautoscalers", api.NamespaceDefault, ""), "", integration.Code200, testapi.Autoscaling.GroupVersion().String()},
|
||||
{"GET", extensionsPath("horizontalpodautoscalers", api.NamespaceDefault, ""), "", integration.Code200, testapi.Extensions.GroupVersion().String()},
|
||||
}
|
||||
|
||||
for _, r := range requests {
|
||||
bodyBytes := bytes.NewReader([]byte(r.body))
|
||||
req, err := http.NewRequest(r.verb, s.URL+r.URL, bodyBytes)
|
||||
if err != nil {
|
||||
t.Logf("case %v", r)
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
func() {
|
||||
resp, err := transport.RoundTrip(req)
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
t.Logf("case %v", r)
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
b, _ := ioutil.ReadAll(resp.Body)
|
||||
body := string(b)
|
||||
if _, ok := r.expectedStatusCodes[resp.StatusCode]; !ok {
|
||||
t.Logf("case %v", r)
|
||||
t.Errorf("Expected status one of %v, but got %v", r.expectedStatusCodes, resp.StatusCode)
|
||||
t.Errorf("Body: %v", body)
|
||||
}
|
||||
if !strings.Contains(body, "\"apiVersion\":\""+r.expectedVersion) {
|
||||
t.Logf("case %v", r)
|
||||
t.Errorf("Expected version %v, got body %v", r.expectedVersion, body)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccept(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
resp, err := http.Get(s.URL + "/api/")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting api: %v", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
|
||||
}
|
||||
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
if resp.Header.Get("Content-Type") != "application/json" {
|
||||
t.Errorf("unexpected content: %s", body)
|
||||
}
|
||||
if err := json.Unmarshal(body, &map[string]interface{}{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", s.URL+"/api/", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req.Header.Set("Accept", "application/yaml")
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
body, _ = ioutil.ReadAll(resp.Body)
|
||||
if resp.Header.Get("Content-Type") != "application/yaml" {
|
||||
t.Errorf("unexpected content: %s", body)
|
||||
}
|
||||
t.Logf("body: %s", body)
|
||||
if err := yaml.Unmarshal(body, &map[string]interface{}{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req, err = http.NewRequest("GET", s.URL+"/api/", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req.Header.Set("Accept", "application/json, application/yaml")
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
body, _ = ioutil.ReadAll(resp.Body)
|
||||
if resp.Header.Get("Content-Type") != "application/json" {
|
||||
t.Errorf("unexpected content: %s", body)
|
||||
}
|
||||
t.Logf("body: %s", body)
|
||||
if err := yaml.Unmarshal(body, &map[string]interface{}{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req, err = http.NewRequest("GET", s.URL+"/api/", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req.Header.Set("Accept", "application") // not a valid media type
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusNotAcceptable {
|
||||
t.Errorf("unexpected error from the server")
|
||||
}
|
||||
}
|
||||
|
||||
func countEndpoints(eps *api.Endpoints) int {
|
||||
count := 0
|
||||
for i := range eps.Subsets {
|
||||
count += len(eps.Subsets[i].Addresses) * len(eps.Subsets[i].Ports)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func TestMasterService(t *testing.T) {
|
||||
_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
|
||||
defer s.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
|
||||
err := wait.Poll(time.Second, time.Minute, func() (bool, error) {
|
||||
svcList, err := client.Core().Services(api.NamespaceDefault).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
found := false
|
||||
for i := range svcList.Items {
|
||||
if svcList.Items[i].Name == "kubernetes" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
ep, err := client.Core().Endpoints(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
if countEndpoints(ep) == 0 {
|
||||
return false, fmt.Errorf("no endpoints for kubernetes service: %v", ep)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceAlloc(t *testing.T) {
|
||||
cfg := framework.NewIntegrationTestMasterConfig()
|
||||
_, cidr, err := net.ParseCIDR("192.168.0.0/29")
|
||||
if err != nil {
|
||||
t.Fatalf("bad cidr: %v", err)
|
||||
}
|
||||
cfg.ServiceIPRange = *cidr
|
||||
_, s := framework.RunAMaster(cfg)
|
||||
defer s.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
|
||||
svc := func(i int) *api.Service {
|
||||
return &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf("svc-%v", i),
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Type: api.ServiceTypeClusterIP,
|
||||
Ports: []api.ServicePort{
|
||||
{Port: 80},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Wait until the default "kubernetes" service is created.
|
||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||
_, err := client.Core().Services(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
return !errors.IsNotFound(err), nil
|
||||
}); err != nil {
|
||||
t.Fatalf("creating kubernetes service timed out")
|
||||
}
|
||||
|
||||
// make 5 more services to take up all IPs
|
||||
for i := 0; i < 5; i++ {
|
||||
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(i)); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make another service. It will fail because we're out of cluster IPs
|
||||
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(8)); err != nil {
|
||||
if !strings.Contains(err.Error(), "range is full") {
|
||||
t.Errorf("unexpected error text: %v", err)
|
||||
}
|
||||
} else {
|
||||
svcs, err := client.Core().Services(api.NamespaceAll).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected success, and error getting the services: %v", err)
|
||||
}
|
||||
allIPs := []string{}
|
||||
for _, s := range svcs.Items {
|
||||
allIPs = append(allIPs, s.Spec.ClusterIP)
|
||||
}
|
||||
t.Fatalf("unexpected creation success. The following IPs exist: %#v. It should only be possible to allocate 2 IP addresses in this cluster.\n\n%#v", allIPs, svcs)
|
||||
}
|
||||
|
||||
// Delete the first service.
|
||||
if err := client.Core().Services(api.NamespaceDefault).Delete(svc(1).ObjectMeta.Name, nil); err != nil {
|
||||
t.Fatalf("got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// This time creating the second service should work.
|
||||
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(8)); err != nil {
|
||||
t.Fatalf("got unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateNodeObjects represents a simple version of the behavior of node checkins at steady
|
||||
// state. This test allows for easy profiling of a realistic master scenario for baseline CPU
|
||||
// in very large clusters. It is disabled by default - start a kube-apiserver and pass
|
||||
// UPDATE_NODE_APISERVER as the host value.
|
||||
func TestUpdateNodeObjects(t *testing.T) {
|
||||
server := os.Getenv("UPDATE_NODE_APISERVER")
|
||||
if len(server) == 0 {
|
||||
t.Skip("UPDATE_NODE_APISERVER is not set")
|
||||
}
|
||||
c := clienttypedv1.NewForConfigOrDie(&restclient.Config{
|
||||
QPS: 10000,
|
||||
Host: server,
|
||||
ContentConfig: restclient.ContentConfig{
|
||||
AcceptContentTypes: "application/vnd.kubernetes.protobuf",
|
||||
ContentType: "application/vnd.kubernetes.protobuf",
|
||||
},
|
||||
})
|
||||
|
||||
nodes := 400
|
||||
listers := 5
|
||||
watchers := 50
|
||||
iterations := 10000
|
||||
|
||||
for i := 0; i < nodes*6; i++ {
|
||||
c.Nodes().Delete(fmt.Sprintf("node-%d", i), nil)
|
||||
_, err := c.Nodes().Create(&v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprintf("node-%d", i),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for k := 0; k < listers; k++ {
|
||||
go func(lister int) {
|
||||
for i := 0; i < iterations; i++ {
|
||||
_, err := c.Nodes().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[list:%d] error after %d: %v\n", lister, i, err)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Duration(lister)*10*time.Millisecond + 1500*time.Millisecond)
|
||||
}
|
||||
}(k)
|
||||
}
|
||||
|
||||
for k := 0; k < watchers; k++ {
|
||||
go func(lister int) {
|
||||
w, err := c.Nodes().Watch(v1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[watch:%d] error: %v", k, err)
|
||||
return
|
||||
}
|
||||
i := 0
|
||||
for r := range w.ResultChan() {
|
||||
i++
|
||||
if _, ok := r.Object.(*v1.Node); !ok {
|
||||
fmt.Printf("[watch:%d] unexpected object after %d: %#v\n", lister, i, r)
|
||||
}
|
||||
if i%100 == 0 {
|
||||
fmt.Printf("[watch:%d] iteration %d ...\n", lister, i)
|
||||
}
|
||||
}
|
||||
fmt.Printf("[watch:%d] done\n", lister)
|
||||
}(k)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(nodes - listers)
|
||||
|
||||
for j := 0; j < nodes; j++ {
|
||||
go func(node int) {
|
||||
var lastCount int
|
||||
for i := 0; i < iterations; i++ {
|
||||
if i%100 == 0 {
|
||||
fmt.Printf("[%d] iteration %d ...\n", node, i)
|
||||
}
|
||||
if i%20 == 0 {
|
||||
_, err := c.Nodes().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
r, err := c.Nodes().List(v1.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("metadata.name=node-%d", node),
|
||||
ResourceVersion: "0",
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
|
||||
break
|
||||
}
|
||||
if len(r.Items) != 1 {
|
||||
fmt.Printf("[%d] error after %d: unexpected list count\n", node, i)
|
||||
break
|
||||
}
|
||||
|
||||
n, err := c.Nodes().Get(fmt.Sprintf("node-%d", node), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
|
||||
break
|
||||
}
|
||||
if len(n.Status.Conditions) != lastCount {
|
||||
fmt.Printf("[%d] worker set %d, read %d conditions\n", node, lastCount, len(n.Status.Conditions))
|
||||
break
|
||||
}
|
||||
previousCount := lastCount
|
||||
switch {
|
||||
case i%4 == 0:
|
||||
lastCount = 1
|
||||
n.Status.Conditions = []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "foo",
|
||||
},
|
||||
}
|
||||
case i%4 == 1:
|
||||
lastCount = 2
|
||||
n.Status.Conditions = []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "foo",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "bar",
|
||||
},
|
||||
}
|
||||
case i%4 == 1:
|
||||
lastCount = 0
|
||||
n.Status.Conditions = nil
|
||||
}
|
||||
if _, err := c.Nodes().UpdateStatus(n); err != nil {
|
||||
if !errors.IsConflict(err) {
|
||||
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
|
||||
break
|
||||
}
|
||||
lastCount = previousCount
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
fmt.Printf("[%d] done\n", node)
|
||||
}(j)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
27
vendor/k8s.io/kubernetes/test/integration/metrics/BUILD
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/integration/metrics/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["doc.go"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
17
vendor/k8s.io/kubernetes/test/integration/metrics/doc.go
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/test/integration/metrics/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
124
vendor/k8s.io/kubernetes/test/integration/metrics/metrics_test.go
generated
vendored
Normal file
124
vendor/k8s.io/kubernetes/test/integration/metrics/metrics_test.go
generated
vendored
Normal file
|
@ -0,0 +1,124 @@
|
|||
// +build integration,!no-etcd,linux
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/golang/protobuf/proto"
|
||||
prometheuspb "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
const scrapeRequestHeader = "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text"
|
||||
|
||||
func scrapeMetrics(s *httptest.Server) ([]*prometheuspb.MetricFamily, error) {
|
||||
req, err := http.NewRequest("GET", s.URL+"/metrics", nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to create http request: %v", err)
|
||||
}
|
||||
// Ask the prometheus exporter for its text protocol buffer format, since it's
|
||||
// much easier to parse than its plain-text format. Don't use the serialized
|
||||
// proto representation since it uses a non-standard varint delimiter between
|
||||
// metric families.
|
||||
req.Header.Add("Accept", scrapeRequestHeader)
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to contact metrics endpoint of master: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("Non-200 response trying to scrape metrics from master: %v", resp)
|
||||
}
|
||||
|
||||
// Each line in the response body should contain all the data for a single metric.
|
||||
var metrics []*prometheuspb.MetricFamily
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
for scanner.Scan() {
|
||||
var metric prometheuspb.MetricFamily
|
||||
if err := proto.UnmarshalText(scanner.Text(), &metric); err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal line of metrics response: %v", err)
|
||||
}
|
||||
glog.V(4).Infof("Got metric %q", metric.GetName())
|
||||
metrics = append(metrics, &metric)
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func checkForExpectedMetrics(t *testing.T, metrics []*prometheuspb.MetricFamily, expectedMetrics []string) {
|
||||
foundMetrics := make(map[string]bool)
|
||||
for _, metric := range metrics {
|
||||
foundMetrics[metric.GetName()] = true
|
||||
}
|
||||
for _, expected := range expectedMetrics {
|
||||
if _, found := foundMetrics[expected]; !found {
|
||||
t.Errorf("Master metrics did not include expected metric %q", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMasterProcessMetrics(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
metrics, err := scrapeMetrics(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkForExpectedMetrics(t, metrics, []string{
|
||||
"process_start_time_seconds",
|
||||
"process_cpu_seconds_total",
|
||||
"go_goroutines",
|
||||
"process_open_fds",
|
||||
"process_resident_memory_bytes",
|
||||
})
|
||||
}
|
||||
|
||||
func TestApiserverMetrics(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
// Make a request to the apiserver to ensure there's at least one data point
|
||||
// for the metrics we're expecting -- otherwise, they won't be exported.
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
if _, err := client.Core().Pods(v1.NamespaceDefault).List(v1.ListOptions{}); err != nil {
|
||||
t.Fatalf("unexpected error getting pods: %v", err)
|
||||
}
|
||||
|
||||
metrics, err := scrapeMetrics(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkForExpectedMetrics(t, metrics, []string{
|
||||
"apiserver_request_count",
|
||||
"apiserver_request_latencies",
|
||||
})
|
||||
}
|
35
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
Normal file
35
vendor/k8s.io/kubernetes/test/integration/objectmeta/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["objectmeta_test.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
52
vendor/k8s.io/kubernetes/test/integration/objectmeta/objectmeta_test.go
generated
vendored
Normal file
52
vendor/k8s.io/kubernetes/test/integration/objectmeta/objectmeta_test.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package objectmeta
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestIgnoreClusterName(t *testing.T) {
|
||||
config := framework.NewMasterConfig()
|
||||
_, s := framework.RunAMaster(config)
|
||||
defer s.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "test-namespace",
|
||||
ClusterName: "cluster-name-to-ignore",
|
||||
},
|
||||
}
|
||||
nsNew, err := client.Core().Namespaces().Create(&ns)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ns.Name, nsNew.Name)
|
||||
assert.Empty(t, nsNew.ClusterName)
|
||||
|
||||
nsNew, err = client.Core().Namespaces().Update(&ns)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ns.Name, nsNew.Name)
|
||||
assert.Empty(t, nsNew.ClusterName)
|
||||
}
|
31
vendor/k8s.io/kubernetes/test/integration/openshift/BUILD
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/test/integration/openshift/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["openshift_test.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/genericapiserver:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
40
vendor/k8s.io/kubernetes/test/integration/openshift/openshift_test.go
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/test/integration/openshift/openshift_test.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openshift
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/genericapiserver"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
)
|
||||
|
||||
// This test references methods that OpenShift uses to customize the master on startup, that
|
||||
// are not referenced directly by a master.
|
||||
func TestMasterExportsSymbols(t *testing.T) {
|
||||
_ = &master.Config{
|
||||
GenericConfig: &genericapiserver.Config{
|
||||
EnableMetrics: true,
|
||||
},
|
||||
EnableCoreControllers: false,
|
||||
EnableUISupport: false,
|
||||
EnableLogsSupport: false,
|
||||
}
|
||||
_ = &master.Master{
|
||||
GenericAPIServer: &genericapiserver.GenericAPIServer{},
|
||||
}
|
||||
}
|
182
vendor/k8s.io/kubernetes/test/integration/pods/pods_test.go
generated
vendored
Normal file
182
vendor/k8s.io/kubernetes/test/integration/pods/pods_test.go
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pods
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestPodUpdateActiveDeadlineSeconds(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("pod-activedeadline-update", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
var (
|
||||
iZero = int64(0)
|
||||
i30 = int64(30)
|
||||
i60 = int64(60)
|
||||
iNeg = int64(-1)
|
||||
)
|
||||
|
||||
prototypePod := func() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "xxx",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
original *int64
|
||||
update *int64
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
name: "no change, nil",
|
||||
original: nil,
|
||||
update: nil,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "no change, set",
|
||||
original: &i30,
|
||||
update: &i30,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "change to positive from nil",
|
||||
original: nil,
|
||||
update: &i60,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "change to smaller positive",
|
||||
original: &i60,
|
||||
update: &i30,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "change to larger positive",
|
||||
original: &i30,
|
||||
update: &i60,
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "change to negative from positive",
|
||||
original: &i30,
|
||||
update: &iNeg,
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "change to negative from nil",
|
||||
original: nil,
|
||||
update: &iNeg,
|
||||
valid: false,
|
||||
},
|
||||
// zero is not allowed, must be a positive integer
|
||||
{
|
||||
name: "change to zero from positive",
|
||||
original: &i30,
|
||||
update: &iZero,
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "change to nil from positive",
|
||||
original: &i30,
|
||||
update: nil,
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range cases {
|
||||
pod := prototypePod()
|
||||
pod.Spec.ActiveDeadlineSeconds = tc.original
|
||||
pod.ObjectMeta.Name = fmt.Sprintf("activedeadlineseconds-test-%v", i)
|
||||
|
||||
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
pod.Spec.ActiveDeadlineSeconds = tc.update
|
||||
|
||||
_, err := client.Core().Pods(ns.Name).Update(pod)
|
||||
if tc.valid && err != nil {
|
||||
t.Errorf("%v: failed to update pod: %v", tc.name, err)
|
||||
} else if !tc.valid && err == nil {
|
||||
t.Errorf("%v: unexpected allowed update to pod", tc.name)
|
||||
}
|
||||
|
||||
integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodReadOnlyFilesystem(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
isReadOnly := true
|
||||
ns := framework.CreateTestingNamespace("pod-readonly-root", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "xxx",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
ReadOnlyRootFilesystem: &isReadOnly,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
|
||||
}
|
221
vendor/k8s.io/kubernetes/test/integration/quota/quota_test.go
generated
vendored
Normal file
221
vendor/k8s.io/kubernetes/test/integration/quota/quota_test.go
generated
vendored
Normal file
|
@ -0,0 +1,221 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
quotainstall "k8s.io/kubernetes/pkg/quota/install"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/resourcequota"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// 1.2 code gets:
|
||||
// quota_test.go:95: Took 4.218619579s to scale up without quota
|
||||
// quota_test.go:199: unexpected error: timed out waiting for the condition, ended with 342 pods (1 minute)
|
||||
// 1.3+ code gets:
|
||||
// quota_test.go:100: Took 4.196205966s to scale up without quota
|
||||
// quota_test.go:115: Took 12.021640372s to scale up with quota
|
||||
func TestQuota(t *testing.T) {
|
||||
// Set up a master
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
admissionCh := make(chan struct{})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
admission, err := resourcequota.NewResourceQuota(quotainstall.NewRegistry(nil, nil), 5, admissionCh)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
admission.(kubeadmission.WantsInternalClientSet).SetInternalClientSet(internalClientset)
|
||||
defer close(admissionCh)
|
||||
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
masterConfig.GenericConfig.AdmissionControl = admission
|
||||
framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
|
||||
ns := framework.CreateTestingNamespace("quotaed", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
ns2 := framework.CreateTestingNamespace("non-quotaed", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns2, s, t)
|
||||
|
||||
controllerCh := make(chan struct{})
|
||||
defer close(controllerCh)
|
||||
|
||||
informers := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc())
|
||||
podInformer := informers.Pods().Informer()
|
||||
rcInformer := informers.ReplicationControllers().Informer()
|
||||
rm := replicationcontroller.NewReplicationManager(podInformer, rcInformer, clientset, replicationcontroller.BurstReplicas, 4096, false)
|
||||
rm.SetEventRecorder(&record.FakeRecorder{})
|
||||
informers.Start(controllerCh)
|
||||
go rm.Run(3, controllerCh)
|
||||
|
||||
resourceQuotaRegistry := quotainstall.NewRegistry(clientset, nil)
|
||||
groupKindsToReplenish := []schema.GroupKind{
|
||||
api.Kind("Pod"),
|
||||
}
|
||||
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
|
||||
KubeClient: clientset,
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
Registry: resourceQuotaRegistry,
|
||||
GroupKindsToReplenish: groupKindsToReplenish,
|
||||
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
ControllerFactory: resourcequotacontroller.NewReplenishmentControllerFactoryFromClient(clientset),
|
||||
}
|
||||
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(2, controllerCh)
|
||||
|
||||
startTime := time.Now()
|
||||
scale(t, ns2.Name, clientset)
|
||||
endTime := time.Now()
|
||||
t.Logf("Took %v to scale up without quota", endTime.Sub(startTime))
|
||||
|
||||
quota := &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "quota",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourcePods: resource.MustParse("1000"),
|
||||
},
|
||||
},
|
||||
}
|
||||
waitForQuota(t, quota, clientset)
|
||||
|
||||
startTime = time.Now()
|
||||
scale(t, "quotaed", clientset)
|
||||
endTime = time.Now()
|
||||
t.Logf("Took %v to scale up with quota", endTime.Sub(startTime))
|
||||
}
|
||||
|
||||
func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) {
|
||||
w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: quota.Name}))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := clientset.Core().ResourceQuotas(quota.Namespace).Create(quota); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, err = watch.Until(1*time.Minute, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Modified:
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
|
||||
switch cast := event.Object.(type) {
|
||||
case *v1.ResourceQuota:
|
||||
if len(cast.Status.Hard) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
||||
target := int32(100)
|
||||
rc := &v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: &target,
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: "busybox",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w, err := clientset.Core().ReplicationControllers(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: rc.Name}))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := clientset.Core().ReplicationControllers(namespace).Create(rc); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, err = watch.Until(3*time.Minute, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Modified:
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
|
||||
switch cast := event.Object.(type) {
|
||||
case *v1.ReplicationController:
|
||||
fmt.Printf("Found %v of %v replicas\n", int(cast.Status.Replicas), target)
|
||||
if cast.Status.Replicas == target {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
pods, _ := clientset.Core().Pods(namespace).List(v1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()})
|
||||
t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items))
|
||||
}
|
||||
}
|
466
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
Normal file
466
vendor/k8s.io/kubernetes/test/integration/replicaset/replicaset_test.go
generated
vendored
Normal file
|
@ -0,0 +1,466 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package replicaset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func testLabels() map[string]string {
|
||||
return map[string]string{"name": "test"}
|
||||
}
|
||||
|
||||
func newRS(name, namespace string, replicas int) *v1beta1.ReplicaSet {
|
||||
replicasCopy := int32(replicas)
|
||||
return &v1beta1.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicaSet",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1beta1.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: testLabels(),
|
||||
},
|
||||
Replicas: &replicasCopy,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: testLabels(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newMatchingPod(podName, namespace string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: testLabels(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// verifyRemainingObjects verifies if the number of the remaining replica
|
||||
// sets and pods are rsNum and podNum. It returns error if the
|
||||
// communication with the API server fails.
|
||||
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rsNum, podNum int) (bool, error) {
|
||||
rsClient := clientSet.Extensions().ReplicaSets(namespace)
|
||||
podClient := clientSet.Core().Pods(namespace)
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
var ret = true
|
||||
if len(pods.Items) != podNum {
|
||||
ret = false
|
||||
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
|
||||
}
|
||||
rss, err := rsClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list replica sets: %v", err)
|
||||
}
|
||||
if len(rss.Items) != rsNum {
|
||||
ret = false
|
||||
t.Logf("expect %d RSs, got %d RSs", rsNum, len(rss.Items))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, cache.SharedIndexInformer, cache.SharedIndexInformer, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, s := framework.RunAMaster(masterConfig)
|
||||
|
||||
config := restclient.Config{Host: s.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), nil, resyncPeriod)
|
||||
|
||||
rm := replicaset.NewReplicaSetController(
|
||||
informers.ReplicaSets(),
|
||||
informers.Pods(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
|
||||
replicaset.BurstReplicas,
|
||||
4096,
|
||||
enableGarbageCollector,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replicaset controller")
|
||||
}
|
||||
return s, rm, informers.ReplicaSets().Informer(), informers.Pods().Informer(), clientSet
|
||||
}
|
||||
|
||||
// wait for the podInformer to observe the pods. Call this function before
|
||||
// running the RS controller to prevent the rc manager from creating new pods
|
||||
// rather than adopting the existing ones.
|
||||
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
if len(objects) == podNum {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdoption(t *testing.T) {
|
||||
var trueVar = true
|
||||
testCases := []struct {
|
||||
name string
|
||||
existingOwnerReferences func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference
|
||||
expectedOwnerReferences func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference
|
||||
}{
|
||||
{
|
||||
"pod refers rs as an owner, not a controller",
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet"}}
|
||||
},
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod doesn't have owner references",
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{}
|
||||
},
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod refers rs as a controller",
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar}}
|
||||
},
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod refers other rs as the controller, refers the rs as an owner",
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{
|
||||
{UID: "1", Name: "anotherRS", APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar},
|
||||
{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet"},
|
||||
}
|
||||
},
|
||||
func(rs *v1beta1.ReplicaSet) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{
|
||||
{UID: "1", Name: "anotherRS", APIVersion: "extensions/v1beta1", Kind: "ReplicaSet", Controller: &trueVar},
|
||||
{UID: rs.UID, Name: rs.Name, APIVersion: "extensions/v1beta1", Kind: "ReplicaSet"},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
|
||||
ns := framework.CreateTestingNamespace(fmt.Sprintf("rs-adoption-%d", i), s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
const rsName = "rs"
|
||||
rs, err := rsClient.Create(newRS(rsName, ns.Name, 1))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replica set: %v", err)
|
||||
}
|
||||
podName := fmt.Sprintf("pod%d", i)
|
||||
pod := newMatchingPod(podName, ns.Name)
|
||||
pod.OwnerReferences = tc.existingOwnerReferences(rs)
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go rsInformer.Run(stopCh)
|
||||
go podInformer.Run(stopCh)
|
||||
waitToObservePods(t, podInformer, 1)
|
||||
go rm.Run(5, stopCh)
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if e, a := tc.expectedOwnerReferences(rs), updatedPod.OwnerReferences; reflect.DeepEqual(e, a) {
|
||||
return true, nil
|
||||
} else {
|
||||
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.ReplicaSet, pods []*v1.Pod, ns string) {
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns)
|
||||
podClient := clientSet.Core().Pods(ns)
|
||||
for _, rs := range rss {
|
||||
if _, err := rsClient.Create(rs); err != nil {
|
||||
t.Fatalf("Failed to create replica set %s: %v", rs.Name, err)
|
||||
}
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.ReplicaSet, ns string) {
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns)
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
updatedRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if updatedRS.Status.Replicas != *rs.Spec.Replicas {
|
||||
return false, nil
|
||||
} else {
|
||||
return true, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateSelectorToAdopt(t *testing.T) {
|
||||
// We have pod1, pod2 and rs. rs.spec.replicas=1. At first rs.Selector
|
||||
// matches pod1 only; change the selector to match pod2 as well. Verify
|
||||
// there is only one pod left.
|
||||
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
|
||||
ns := framework.CreateTestingNamespace("rs-update-selector-to-adopt", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rs := newRS("rs", ns.Name, 1)
|
||||
// let rs's selector only match pod1
|
||||
rs.Spec.Selector.MatchLabels["uniqueKey"] = "1"
|
||||
rs.Spec.Template.Labels["uniqueKey"] = "1"
|
||||
pod1 := newMatchingPod("pod1", ns.Name)
|
||||
pod1.Labels["uniqueKey"] = "1"
|
||||
pod2 := newMatchingPod("pod2", ns.Name)
|
||||
pod2.Labels["uniqueKey"] = "2"
|
||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go rsInformer.Run(stopCh)
|
||||
go podInformer.Run(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
waitRSStable(t, clientSet, rs, ns.Name)
|
||||
|
||||
// change the rs's selector to match both pods
|
||||
patch := `{"spec":{"selector":{"matchLabels": {"uniqueKey":null}}}}`
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
rs, err := rsClient.Patch(rs.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch replica set: %v", err)
|
||||
}
|
||||
t.Logf("patched rs = %#v", rs)
|
||||
// wait for the rs select both pods and delete one of them
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 1)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
|
||||
// We have pod1, pod2 and rs. rs.spec.replicas=2. At first rs.Selector
|
||||
// matches pod1 and pod2; change the selector to match only pod1. Verify
|
||||
// that rs creates one more pod, so there are 3 pods. Also verify that
|
||||
// pod2's controllerRef is cleared.
|
||||
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
|
||||
ns := framework.CreateTestingNamespace("rs-update-selector-to-remove-controllerref", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rs := newRS("rs", ns.Name, 2)
|
||||
pod1 := newMatchingPod("pod1", ns.Name)
|
||||
pod1.Labels["uniqueKey"] = "1"
|
||||
pod2 := newMatchingPod("pod2", ns.Name)
|
||||
pod2.Labels["uniqueKey"] = "2"
|
||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go rsInformer.Run(stopCh)
|
||||
go podInformer.Run(stopCh)
|
||||
waitToObservePods(t, podInformer, 2)
|
||||
go rm.Run(5, stopCh)
|
||||
waitRSStable(t, clientSet, rs, ns.Name)
|
||||
|
||||
// change the rs's selector to match both pods
|
||||
patch := `{"spec":{"selector":{"matchLabels": {"uniqueKey":"1"}},"template":{"metadata":{"labels":{"uniqueKey":"1"}}}}}`
|
||||
rsClient := clientSet.Extensions().ReplicaSets(ns.Name)
|
||||
rs, err := rsClient.Patch(rs.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch replica set: %v", err)
|
||||
}
|
||||
t.Logf("patched rs = %#v", rs)
|
||||
// wait for the rs to create one more pod
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 3)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get pod2: %v", err)
|
||||
}
|
||||
if len(pod2.OwnerReferences) != 0 {
|
||||
t.Fatalf("ownerReferences of pod2 is not cleared, got %#v", pod2.OwnerReferences)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
|
||||
// We have pod1, pod2 and rs. rs.spec.replicas=2. At first rs.Selector
|
||||
// matches pod1 and pod2; change pod2's labels to non-matching. Verify
|
||||
// that rs creates one more pod, so there are 3 pods. Also verify that
|
||||
// pod2's controllerRef is cleared.
|
||||
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
|
||||
ns := framework.CreateTestingNamespace("rs-update-label-to-remove-controllerref", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rs := newRS("rs", ns.Name, 2)
|
||||
pod1 := newMatchingPod("pod1", ns.Name)
|
||||
pod2 := newMatchingPod("pod2", ns.Name)
|
||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go rsInformer.Run(stopCh)
|
||||
go podInformer.Run(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
waitRSStable(t, clientSet, rs, ns.Name)
|
||||
|
||||
// change the rs's selector to match both pods
|
||||
patch := `{"metadata":{"labels":{"name":null}}}`
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err := podClient.Patch(pod2.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch pod2: %v", err)
|
||||
}
|
||||
t.Logf("patched pod2 = %#v", pod2)
|
||||
// wait for the rs to create one more pod
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 3)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get pod2: %v", err)
|
||||
}
|
||||
if len(pod2.OwnerReferences) != 0 {
|
||||
t.Fatalf("ownerReferences of pod2 is not cleared, got %#v", pod2.OwnerReferences)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
func TestUpdateLabelToBeAdopted(t *testing.T) {
|
||||
// We have pod1, pod2 and rs. rs.spec.replicas=1. At first rs.Selector
|
||||
// matches pod1 only; change pod2's labels to be matching. Verify the RS
|
||||
// controller adopts pod2 and delete one of them, so there is only 1 pod
|
||||
// left.
|
||||
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
|
||||
ns := framework.CreateTestingNamespace("rs-update-label-to-be-adopted", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rs := newRS("rs", ns.Name, 1)
|
||||
// let rs's selector only matches pod1
|
||||
rs.Spec.Selector.MatchLabels["uniqueKey"] = "1"
|
||||
rs.Spec.Template.Labels["uniqueKey"] = "1"
|
||||
pod1 := newMatchingPod("pod1", ns.Name)
|
||||
pod1.Labels["uniqueKey"] = "1"
|
||||
pod2 := newMatchingPod("pod2", ns.Name)
|
||||
pod2.Labels["uniqueKey"] = "2"
|
||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go rsInformer.Run(stopCh)
|
||||
go podInformer.Run(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
waitRSStable(t, clientSet, rs, ns.Name)
|
||||
|
||||
// change the rs's selector to match both pods
|
||||
patch := `{"metadata":{"labels":{"uniqueKey":"1"}}}`
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err := podClient.Patch(pod2.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch pod2: %v", err)
|
||||
}
|
||||
t.Logf("patched pod2 = %#v", pod2)
|
||||
// wait for the rs to select both pods and delete one of them
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 1)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
447
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/replicationcontroller_test.go
generated
vendored
Normal file
447
vendor/k8s.io/kubernetes/test/integration/replicationcontroller/replicationcontroller_test.go
generated
vendored
Normal file
|
@ -0,0 +1,447 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package replicationcontroller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func testLabels() map[string]string {
|
||||
return map[string]string{"name": "test"}
|
||||
}
|
||||
|
||||
func newRC(name, namespace string, replicas int) *v1.ReplicationController {
|
||||
replicasCopy := int32(replicas)
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: testLabels(),
|
||||
Replicas: &replicasCopy,
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: testLabels(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newMatchingPod(podName, namespace string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: testLabels(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// verifyRemainingObjects verifies if the number of the remaining replication
|
||||
// controllers and pods are rcNum and podNum. It returns error if the
|
||||
// communication with the API server fails.
|
||||
func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) {
|
||||
rcClient := clientSet.Core().ReplicationControllers(namespace)
|
||||
podClient := clientSet.Core().Pods(namespace)
|
||||
pods, err := podClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
var ret = true
|
||||
if len(pods.Items) != podNum {
|
||||
ret = false
|
||||
t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items))
|
||||
}
|
||||
rcs, err := rcClient.List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
|
||||
}
|
||||
if len(rcs.Items) != rcNum {
|
||||
ret = false
|
||||
t.Logf("expect %d RCs, got %d RCs", rcNum, len(rcs.Items))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func rmSetup(t *testing.T, stopCh chan struct{}, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, cache.SharedIndexInformer, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, s := framework.RunAMaster(masterConfig)
|
||||
|
||||
config := restclient.Config{Host: s.URL}
|
||||
clientSet, err := clientset.NewForConfig(&config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
|
||||
informers := informers.NewSharedInformerFactory(clientSet, nil, resyncPeriod)
|
||||
podInformer := informers.Pods().Informer()
|
||||
rcInformer := informers.ReplicationControllers().Informer()
|
||||
rm := replication.NewReplicationManager(podInformer, rcInformer, clientSet, replication.BurstReplicas, 4096, enableGarbageCollector)
|
||||
informers.Start(stopCh)
|
||||
|
||||
return s, rm, podInformer, clientSet
|
||||
}
|
||||
|
||||
// wait for the podInformer to observe the pods. Call this function before
|
||||
// running the RC manager to prevent the rc manager from creating new pods
|
||||
// rather than adopting the existing ones.
|
||||
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
if len(objects) == podNum {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdoption(t *testing.T) {
|
||||
var trueVar = true
|
||||
testCases := []struct {
|
||||
name string
|
||||
existingOwnerReferences func(rc *v1.ReplicationController) []metav1.OwnerReference
|
||||
expectedOwnerReferences func(rc *v1.ReplicationController) []metav1.OwnerReference
|
||||
}{
|
||||
{
|
||||
"pod refers rc as an owner, not a controller",
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"}}
|
||||
},
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod doesn't have owner references",
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{}
|
||||
},
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod refers rc as a controller",
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar}}
|
||||
},
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar}}
|
||||
},
|
||||
},
|
||||
{
|
||||
"pod refers other rc as the controller, refers the rc as an owner",
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{
|
||||
{UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar},
|
||||
{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"},
|
||||
}
|
||||
},
|
||||
func(rc *v1.ReplicationController) []metav1.OwnerReference {
|
||||
return []metav1.OwnerReference{
|
||||
{UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: &trueVar},
|
||||
{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
stopCh := make(chan struct{})
|
||||
s, rm, podInformer, clientSet := rmSetup(t, stopCh, true)
|
||||
ns := framework.CreateTestingNamespace(fmt.Sprintf("adoption-%d", i), s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
const rcName = "rc"
|
||||
rc, err := rcClient.Create(newRC(rcName, ns.Name, 1))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
podName := fmt.Sprintf("pod%d", i)
|
||||
pod := newMatchingPod(podName, ns.Name)
|
||||
pod.OwnerReferences = tc.existingOwnerReferences(rc)
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Pod: %v", err)
|
||||
}
|
||||
|
||||
go podInformer.Run(stopCh)
|
||||
waitToObservePods(t, podInformer, 1)
|
||||
go rm.Run(5, stopCh)
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if e, a := tc.expectedOwnerReferences(rc), updatedPod.OwnerReferences; reflect.DeepEqual(e, a) {
|
||||
return true, nil
|
||||
} else {
|
||||
t.Logf("ownerReferences don't match, expect %v, got %v", e, a)
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
func createRCsPods(t *testing.T, clientSet clientset.Interface, rcs []*v1.ReplicationController, pods []*v1.Pod, ns string) {
|
||||
rcClient := clientSet.Core().ReplicationControllers(ns)
|
||||
podClient := clientSet.Core().Pods(ns)
|
||||
for _, rc := range rcs {
|
||||
if _, err := rcClient.Create(rc); err != nil {
|
||||
t.Fatalf("Failed to create replication controller %s: %v", rc.Name, err)
|
||||
}
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitRCStable(t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController, ns string) {
|
||||
rcClient := clientSet.Core().ReplicationControllers(ns)
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
updatedRC, err := rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if updatedRC.Status.Replicas != *rc.Spec.Replicas {
|
||||
return false, nil
|
||||
} else {
|
||||
return true, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateSelectorToAdopt(t *testing.T) {
|
||||
// We have pod1, pod2 and rc. rc.spec.replicas=1. At first rc.Selector
|
||||
// matches pod1 only; change the selector to match pod2 as well. Verify
|
||||
// there is only one pod left.
|
||||
stopCh := make(chan struct{})
|
||||
s, rm, _, clientSet := rmSetup(t, stopCh, true)
|
||||
ns := framework.CreateTestingNamespace("update-selector-to-adopt", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rc := newRC("rc", ns.Name, 1)
|
||||
// let rc's selector only match pod1
|
||||
rc.Spec.Selector["uniqueKey"] = "1"
|
||||
rc.Spec.Template.Labels["uniqueKey"] = "1"
|
||||
pod1 := newMatchingPod("pod1", ns.Name)
|
||||
pod1.Labels["uniqueKey"] = "1"
|
||||
pod2 := newMatchingPod("pod2", ns.Name)
|
||||
pod2.Labels["uniqueKey"] = "2"
|
||||
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||
|
||||
go rm.Run(5, stopCh)
|
||||
waitRCStable(t, clientSet, rc, ns.Name)
|
||||
|
||||
// change the rc's selector to match both pods
|
||||
patch := `{"spec":{"selector":{"uniqueKey":null}}}`
|
||||
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
|
||||
rc, err := rcClient.Patch(rc.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch replication controller: %v", err)
|
||||
}
|
||||
t.Logf("patched rc = %#v", rc)
|
||||
// wait for the rc select both pods and delete one of them
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 1)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
|
||||
// We have pod1, pod2 and rc. rc.spec.replicas=2. At first rc.Selector
|
||||
// matches pod1 and pod2; change the selector to match only pod1. Verify
|
||||
// that rc creates one more pod, so there are 3 pods. Also verify that
|
||||
// pod2's controllerRef is cleared.
|
||||
stopCh := make(chan struct{})
|
||||
s, rm, podInformer, clientSet := rmSetup(t, stopCh, true)
|
||||
ns := framework.CreateTestingNamespace("update-selector-to-remove-controllerref", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rc := newRC("rc", ns.Name, 2)
|
||||
pod1 := newMatchingPod("pod1", ns.Name)
|
||||
pod1.Labels["uniqueKey"] = "1"
|
||||
pod2 := newMatchingPod("pod2", ns.Name)
|
||||
pod2.Labels["uniqueKey"] = "2"
|
||||
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||
|
||||
waitToObservePods(t, podInformer, 2)
|
||||
go rm.Run(5, stopCh)
|
||||
waitRCStable(t, clientSet, rc, ns.Name)
|
||||
|
||||
// change the rc's selector to match both pods
|
||||
patch := `{"spec":{"selector":{"uniqueKey":"1"},"template":{"metadata":{"labels":{"uniqueKey":"1"}}}}}`
|
||||
rcClient := clientSet.Core().ReplicationControllers(ns.Name)
|
||||
rc, err := rcClient.Patch(rc.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch replication controller: %v", err)
|
||||
}
|
||||
t.Logf("patched rc = %#v", rc)
|
||||
// wait for the rc to create one more pod
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 3)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get pod2: %v", err)
|
||||
}
|
||||
if len(pod2.OwnerReferences) != 0 {
|
||||
t.Fatalf("ownerReferences of pod2 is not cleared, got %#v", pod2.OwnerReferences)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
|
||||
// We have pod1, pod2 and rc. rc.spec.replicas=2. At first rc.Selector
|
||||
// matches pod1 and pod2; change pod2's labels to non-matching. Verify
|
||||
// that rc creates one more pod, so there are 3 pods. Also verify that
|
||||
// pod2's controllerRef is cleared.
|
||||
stopCh := make(chan struct{})
|
||||
s, rm, _, clientSet := rmSetup(t, stopCh, true)
|
||||
ns := framework.CreateTestingNamespace("update-label-to-remove-controllerref", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rc := newRC("rc", ns.Name, 2)
|
||||
pod1 := newMatchingPod("pod1", ns.Name)
|
||||
pod2 := newMatchingPod("pod2", ns.Name)
|
||||
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||
|
||||
go rm.Run(5, stopCh)
|
||||
waitRCStable(t, clientSet, rc, ns.Name)
|
||||
|
||||
// change the rc's selector to match both pods
|
||||
patch := `{"metadata":{"labels":{"name":null}}}`
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err := podClient.Patch(pod2.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch pod2: %v", err)
|
||||
}
|
||||
t.Logf("patched pod2 = %#v", pod2)
|
||||
// wait for the rc to create one more pod
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 3)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get pod2: %v", err)
|
||||
}
|
||||
if len(pod2.OwnerReferences) != 0 {
|
||||
t.Fatalf("ownerReferences of pod2 is not cleared, got %#v", pod2.OwnerReferences)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
func TestUpdateLabelToBeAdopted(t *testing.T) {
|
||||
// We have pod1, pod2 and rc. rc.spec.replicas=1. At first rc.Selector
|
||||
// matches pod1 only; change pod2's labels to be matching. Verify the RC
|
||||
// controller adopts pod2 and delete one of them, so there is only 1 pod
|
||||
// left.
|
||||
stopCh := make(chan struct{})
|
||||
s, rm, _, clientSet := rmSetup(t, stopCh, true)
|
||||
ns := framework.CreateTestingNamespace("update-label-to-be-adopted", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
rc := newRC("rc", ns.Name, 1)
|
||||
// let rc's selector only matches pod1
|
||||
rc.Spec.Selector["uniqueKey"] = "1"
|
||||
rc.Spec.Template.Labels["uniqueKey"] = "1"
|
||||
pod1 := newMatchingPod("pod1", ns.Name)
|
||||
pod1.Labels["uniqueKey"] = "1"
|
||||
pod2 := newMatchingPod("pod2", ns.Name)
|
||||
pod2.Labels["uniqueKey"] = "2"
|
||||
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||
|
||||
go rm.Run(5, stopCh)
|
||||
waitRCStable(t, clientSet, rc, ns.Name)
|
||||
|
||||
// change the rc's selector to match both pods
|
||||
patch := `{"metadata":{"labels":{"uniqueKey":"1"}}}`
|
||||
podClient := clientSet.Core().Pods(ns.Name)
|
||||
pod2, err := podClient.Patch(pod2.Name, api.StrategicMergePatchType, []byte(patch))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to patch pod2: %v", err)
|
||||
}
|
||||
t.Logf("patched pod2 = %#v", pod2)
|
||||
// wait for the rc to select both pods and delete one of them
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return verifyRemainingObjects(t, clientSet, ns.Name, 1, 1)
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
close(stopCh)
|
||||
}
|
307
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
Normal file
307
vendor/k8s.io/kubernetes/test/integration/scheduler/extender_test.go
generated
vendored
Normal file
|
@ -0,0 +1,307 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
// This file tests scheduler extender.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
filter = "filter"
|
||||
prioritize = "prioritize"
|
||||
)
|
||||
|
||||
type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
|
||||
type priorityFunc func(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error)
|
||||
|
||||
type priorityConfig struct {
|
||||
function priorityFunc
|
||||
weight int
|
||||
}
|
||||
|
||||
type Extender struct {
|
||||
name string
|
||||
predicates []fitPredicate
|
||||
prioritizers []priorityConfig
|
||||
}
|
||||
|
||||
func (e *Extender) serveHTTP(t *testing.T, w http.ResponseWriter, req *http.Request) {
|
||||
var args schedulerapi.ExtenderArgs
|
||||
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
defer req.Body.Close()
|
||||
|
||||
if err := decoder.Decode(&args); err != nil {
|
||||
http.Error(w, "Decode error", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
encoder := json.NewEncoder(w)
|
||||
|
||||
if strings.Contains(req.URL.Path, filter) {
|
||||
resp := &schedulerapi.ExtenderFilterResult{}
|
||||
nodes, failedNodes, err := e.Filter(&args.Pod, &args.Nodes)
|
||||
if err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Nodes = *nodes
|
||||
resp.FailedNodes = failedNodes
|
||||
}
|
||||
|
||||
if err := encoder.Encode(resp); err != nil {
|
||||
t.Fatalf("Failed to encode %+v", resp)
|
||||
}
|
||||
} else if strings.Contains(req.URL.Path, prioritize) {
|
||||
// Prioritize errors are ignored. Default k8s priorities or another extender's
|
||||
// priorities may be applied.
|
||||
priorities, _ := e.Prioritize(&args.Pod, &args.Nodes)
|
||||
|
||||
if err := encoder.Encode(priorities); err != nil {
|
||||
t.Fatalf("Failed to encode %+v", priorities)
|
||||
}
|
||||
} else {
|
||||
http.Error(w, "Unknown method", http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Extender) Filter(pod *v1.Pod, nodes *v1.NodeList) (*v1.NodeList, schedulerapi.FailedNodesMap, error) {
|
||||
filtered := []v1.Node{}
|
||||
failedNodesMap := schedulerapi.FailedNodesMap{}
|
||||
for _, node := range nodes.Items {
|
||||
fits := true
|
||||
for _, predicate := range e.predicates {
|
||||
fit, err := predicate(pod, &node)
|
||||
if err != nil {
|
||||
return &v1.NodeList{}, schedulerapi.FailedNodesMap{}, err
|
||||
}
|
||||
if !fit {
|
||||
fits = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if fits {
|
||||
filtered = append(filtered, node)
|
||||
} else {
|
||||
failedNodesMap[node.Name] = fmt.Sprintf("extender failed: %s", e.name)
|
||||
}
|
||||
}
|
||||
return &v1.NodeList{Items: filtered}, failedNodesMap, nil
|
||||
}
|
||||
|
||||
func (e *Extender) Prioritize(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
combinedScores := map[string]int{}
|
||||
for _, prioritizer := range e.prioritizers {
|
||||
weight := prioritizer.weight
|
||||
if weight == 0 {
|
||||
continue
|
||||
}
|
||||
priorityFunc := prioritizer.function
|
||||
prioritizedList, err := priorityFunc(pod, nodes)
|
||||
if err != nil {
|
||||
return &schedulerapi.HostPriorityList{}, err
|
||||
}
|
||||
for _, hostEntry := range *prioritizedList {
|
||||
combinedScores[hostEntry.Host] += hostEntry.Score * weight
|
||||
}
|
||||
}
|
||||
for host, score := range combinedScores {
|
||||
result = append(result, schedulerapi.HostPriority{Host: host, Score: score})
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func machine_1_2_3_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if node.Name == "machine1" || node.Name == "machine2" || node.Name == "machine3" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func machine_2_3_5_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if node.Name == "machine2" || node.Name == "machine3" || node.Name == "machine5" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func machine_2_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
score := 1
|
||||
if node.Name == "machine2" {
|
||||
score = 10
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{node.Name, score})
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func machine_3_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
score := 1
|
||||
if node.Name == "machine3" {
|
||||
score = 10
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{node.Name, score})
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func TestSchedulerExtender(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("scheduler-extender", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
extender1 := &Extender{
|
||||
name: "extender1",
|
||||
predicates: []fitPredicate{machine_1_2_3_Predicate},
|
||||
prioritizers: []priorityConfig{{machine_2_Prioritizer, 1}},
|
||||
}
|
||||
es1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
extender1.serveHTTP(t, w, req)
|
||||
}))
|
||||
defer es1.Close()
|
||||
|
||||
extender2 := &Extender{
|
||||
name: "extender2",
|
||||
predicates: []fitPredicate{machine_2_3_5_Predicate},
|
||||
prioritizers: []priorityConfig{{machine_3_Prioritizer, 1}},
|
||||
}
|
||||
es2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
extender2.serveHTTP(t, w, req)
|
||||
}))
|
||||
defer es2.Close()
|
||||
|
||||
policy := schedulerapi.Policy{
|
||||
ExtenderConfigs: []schedulerapi.ExtenderConfig{
|
||||
{
|
||||
URLPrefix: es1.URL,
|
||||
FilterVerb: filter,
|
||||
PrioritizeVerb: prioritize,
|
||||
Weight: 3,
|
||||
EnableHttps: false,
|
||||
},
|
||||
{
|
||||
URLPrefix: es2.URL,
|
||||
FilterVerb: filter,
|
||||
PrioritizeVerb: prioritize,
|
||||
Weight: 4,
|
||||
EnableHttps: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
policy.APIVersion = api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.CreateFromConfig(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events("")})
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
|
||||
defer close(schedulerConfig.StopEverything)
|
||||
|
||||
DoTestPodScheduling(ns, t, clientSet)
|
||||
}
|
||||
|
||||
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer cs.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
goodCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
LastHeartbeatTime: metav1.Time{time.Now()},
|
||||
}
|
||||
node := &v1.Node{
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{goodCondition},
|
||||
},
|
||||
}
|
||||
|
||||
for ii := 0; ii < 5; ii++ {
|
||||
node.Name = fmt.Sprintf("machine%d", ii+1)
|
||||
if _, err := cs.Core().Nodes().Create(node); err != nil {
|
||||
t.Fatalf("Failed to create nodes: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "extender-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||
},
|
||||
}
|
||||
|
||||
myPod, err := cs.Core().Pods(ns.Name).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to schedule pod: %v", err)
|
||||
}
|
||||
|
||||
if myPod, err := cs.Core().Pods(ns.Name).Get(myPod.Name, metav1.GetOptions{}); err != nil {
|
||||
t.Fatalf("Failed to get pod: %v", err)
|
||||
} else if myPod.Spec.NodeName != "machine3" {
|
||||
t.Fatalf("Failed to schedule using extender, expected machine3, got %v", myPod.Spec.NodeName)
|
||||
}
|
||||
t.Logf("Scheduled pod using extenders")
|
||||
}
|
594
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
Normal file
594
vendor/k8s.io/kubernetes/test/integration/scheduler/scheduler_test.go
generated
vendored
Normal file
|
@ -0,0 +1,594 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
// This file tests the scheduler.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
type nodeMutationFunc func(t *testing.T, n *v1.Node, nodeStore cache.Store, c clientset.Interface)
|
||||
|
||||
type nodeStateManager struct {
|
||||
makeSchedulable nodeMutationFunc
|
||||
makeUnSchedulable nodeMutationFunc
|
||||
}
|
||||
|
||||
func TestUnschedulableNodes(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("unschedulable-nodes", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
|
||||
defer close(schedulerConfig.StopEverything)
|
||||
|
||||
DoTestUnschedulableNodes(t, clientSet, ns, schedulerConfigFactory.NodeLister.Store)
|
||||
}
|
||||
|
||||
func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.Core().Pods(podNamespace).Get(podName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
// This could be a connection error so we want to retry.
|
||||
return false, nil
|
||||
}
|
||||
if pod.Spec.NodeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Wait till the passFunc confirms that the object it expects to see is in the store.
|
||||
// Used to observe reflected events.
|
||||
func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n interface{}) bool) error {
|
||||
nodes := []*v1.Node{}
|
||||
err := wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
if n, _, err := s.GetByKey(key); err == nil && passFunc(n) {
|
||||
return true, nil
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
} else {
|
||||
if n == nil {
|
||||
nodes = append(nodes, nil)
|
||||
} else {
|
||||
nodes = append(nodes, n.(*v1.Node))
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
t.Logf("Logging consecutive node versions received from store:")
|
||||
for i, n := range nodes {
|
||||
t.Logf("%d: %#v", i, n)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Namespace, nodeStore cache.Store) {
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer cs.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
goodCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
LastHeartbeatTime: metav1.Time{time.Now()},
|
||||
}
|
||||
badCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
Reason: fmt.Sprintf("unschedulable condition"),
|
||||
LastHeartbeatTime: metav1.Time{time.Now()},
|
||||
}
|
||||
// Create a new schedulable node, since we're first going to apply
|
||||
// the unschedulable condition and verify that pods aren't scheduled.
|
||||
node := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node-scheduling-test-node"},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{goodCondition},
|
||||
},
|
||||
}
|
||||
nodeKey, err := cache.MetaNamespaceKeyFunc(node)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't retrieve key for node %v", node.Name)
|
||||
}
|
||||
|
||||
// The test does the following for each nodeStateManager in this list:
|
||||
// 1. Create a new node
|
||||
// 2. Apply the makeUnSchedulable function
|
||||
// 3. Create a new pod
|
||||
// 4. Check that the pod doesn't get assigned to the node
|
||||
// 5. Apply the schedulable function
|
||||
// 6. Check that the pod *does* get assigned to the node
|
||||
// 7. Delete the pod and node.
|
||||
|
||||
nodeModifications := []nodeStateManager{
|
||||
// Test node.Spec.Unschedulable=true/false
|
||||
{
|
||||
makeUnSchedulable: func(t *testing.T, n *v1.Node, s cache.Store, c clientset.Interface) {
|
||||
n.Spec.Unschedulable = true
|
||||
if _, err := c.Core().Nodes().Update(n); err != nil {
|
||||
t.Fatalf("Failed to update node with unschedulable=true: %v", err)
|
||||
}
|
||||
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
|
||||
// An unschedulable node should still be present in the store
|
||||
// Nodes that are unschedulable or that are not ready or
|
||||
// have their disk full (Node.Spec.Conditions) are excluded
|
||||
// based on NodeConditionPredicate, a separate check
|
||||
return node != nil && node.(*v1.Node).Spec.Unschedulable == true
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for setting unschedulable=true: %v", err)
|
||||
}
|
||||
},
|
||||
makeSchedulable: func(t *testing.T, n *v1.Node, s cache.Store, c clientset.Interface) {
|
||||
n.Spec.Unschedulable = false
|
||||
if _, err := c.Core().Nodes().Update(n); err != nil {
|
||||
t.Fatalf("Failed to update node with unschedulable=false: %v", err)
|
||||
}
|
||||
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
|
||||
return node != nil && node.(*v1.Node).Spec.Unschedulable == false
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for setting unschedulable=false: %v", err)
|
||||
}
|
||||
},
|
||||
},
|
||||
// Test node.Status.Conditions=ConditionTrue/Unknown
|
||||
{
|
||||
makeUnSchedulable: func(t *testing.T, n *v1.Node, s cache.Store, c clientset.Interface) {
|
||||
n.Status = v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{badCondition},
|
||||
}
|
||||
if _, err = c.Core().Nodes().UpdateStatus(n); err != nil {
|
||||
t.Fatalf("Failed to update node with bad status condition: %v", err)
|
||||
}
|
||||
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
|
||||
return node != nil && node.(*v1.Node).Status.Conditions[0].Status == v1.ConditionUnknown
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
|
||||
}
|
||||
},
|
||||
makeSchedulable: func(t *testing.T, n *v1.Node, s cache.Store, c clientset.Interface) {
|
||||
n.Status = v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{goodCondition},
|
||||
}
|
||||
if _, err = c.Core().Nodes().UpdateStatus(n); err != nil {
|
||||
t.Fatalf("Failed to update node with healthy status condition: %v", err)
|
||||
}
|
||||
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
|
||||
return node != nil && node.(*v1.Node).Status.Conditions[0].Status == v1.ConditionTrue
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, mod := range nodeModifications {
|
||||
unSchedNode, err := cs.Core().Nodes().Create(node)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// Apply the unschedulable modification to the node, and wait for the reflection
|
||||
mod.makeUnSchedulable(t, unSchedNode, nodeStore, cs)
|
||||
|
||||
// Create the new pod, note that this needs to happen post unschedulable
|
||||
// modification or we have a race in the test.
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node-scheduling-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||
},
|
||||
}
|
||||
myPod, err := cs.Core().Pods(ns.Name).Create(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// There are no schedulable nodes - the pod shouldn't be scheduled.
|
||||
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name))
|
||||
if err == nil {
|
||||
t.Errorf("Pod scheduled successfully on unschedulable nodes")
|
||||
}
|
||||
if err != wait.ErrWaitTimeout {
|
||||
t.Errorf("Test %d: failed while trying to confirm the pod does not get scheduled on the node: %v", i, err)
|
||||
} else {
|
||||
t.Logf("Test %d: Pod did not get scheduled on an unschedulable node", i)
|
||||
}
|
||||
|
||||
// Apply the schedulable modification to the node, and wait for the reflection
|
||||
schedNode, err := cs.Core().Nodes().Get(unSchedNode.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get node: %v", err)
|
||||
}
|
||||
mod.makeSchedulable(t, schedNode, nodeStore, cs)
|
||||
|
||||
// Wait until the pod is scheduled.
|
||||
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: failed to schedule a pod: %v", i, err)
|
||||
} else {
|
||||
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
|
||||
}
|
||||
|
||||
err = cs.Core().Pods(ns.Name).Delete(myPod.Name, v1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
err = cs.Core().Nodes().Delete(schedNode.Name, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete node: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiScheduler(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
// TODO: Uncomment when fix #19254
|
||||
// This seems to be a different issue - it still doesn't work.
|
||||
// defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("multi-scheduler", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
/*
|
||||
This integration tests the multi-scheduler feature in the following way:
|
||||
1. create a default scheduler
|
||||
2. create a node
|
||||
3. create 3 pods: testPodNoAnnotation, testPodWithAnnotationFitsDefault and testPodWithAnnotationFitsFoo
|
||||
- note: the first two should be picked and scheduled by default scheduler while the last one should be
|
||||
picked by scheduler of name "foo-scheduler" which does not exist yet.
|
||||
4. **check point-1**:
|
||||
- testPodNoAnnotation, testPodWithAnnotationFitsDefault should be scheduled
|
||||
- testPodWithAnnotationFitsFoo should NOT be scheduled
|
||||
5. create a scheduler with name "foo-scheduler"
|
||||
6. **check point-2**:
|
||||
- testPodWithAnnotationFitsFoo should be scheduled
|
||||
7. stop default scheduler
|
||||
8. create 2 pods: testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2
|
||||
- note: these two pods belong to default scheduler which no longer exists
|
||||
9. **check point-3**:
|
||||
- testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled
|
||||
*/
|
||||
// 1. create and start default-scheduler
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer clientSet.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
// default-scheduler will be stopped later
|
||||
|
||||
// 2. create a node
|
||||
node := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node-multi-scheduler-test-node"},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
clientSet.Core().Nodes().Create(node)
|
||||
|
||||
// 3. create 3 pods for testing
|
||||
podWithNoAnnotation := createPod(clientSet, "pod-with-no-annotation", nil)
|
||||
testPodNoAnnotation, err := clientSet.Core().Pods(ns.Name).Create(podWithNoAnnotation)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"}
|
||||
podWithAnnotationFitsDefault := createPod(clientSet, "pod-with-annotation-fits-default", schedulerAnnotationFitsDefault)
|
||||
testPodWithAnnotationFitsDefault, err := clientSet.Core().Pods(ns.Name).Create(podWithAnnotationFitsDefault)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"}
|
||||
podWithAnnotationFitsFoo := createPod(clientSet, "pod-with-annotation-fits-foo", schedulerAnnotationFitsFoo)
|
||||
testPodWithAnnotationFitsFoo, err := clientSet.Core().Pods(ns.Name).Create(podWithAnnotationFitsFoo)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 4. **check point-1**:
|
||||
// - testPodNoAnnotation, testPodWithAnnotationFitsDefault should be scheduled
|
||||
// - testPodWithAnnotationFitsFoo should NOT be scheduled
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodNoAnnotation.Namespace, testPodNoAnnotation.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodNoAnnotation.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodNoAnnotation.Name)
|
||||
}
|
||||
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsDefault.Namespace, testPodWithAnnotationFitsDefault.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodWithAnnotationFitsDefault.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodWithAnnotationFitsDefault.Name)
|
||||
}
|
||||
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name))
|
||||
if err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodWithAnnotationFitsFoo.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodWithAnnotationFitsFoo.Name)
|
||||
}
|
||||
|
||||
// 5. create and start a scheduler with name "foo-scheduler"
|
||||
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
schedulerConfigFactory2 := factory.NewConfigFactory(clientSet2, "foo-scheduler", v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
schedulerConfig2, err := schedulerConfigFactory2.Create()
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster2 := record.NewBroadcaster()
|
||||
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(v1.EventSource{Component: "foo-scheduler"})
|
||||
eventBroadcaster2.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet2.Core().Events(ns.Name)})
|
||||
scheduler.New(schedulerConfig2).Run()
|
||||
|
||||
defer close(schedulerConfig2.StopEverything)
|
||||
|
||||
// 6. **check point-2**:
|
||||
// - testPodWithAnnotationFitsFoo should be scheduled
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsFoo.Namespace, testPodWithAnnotationFitsFoo.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled, %v", testPodWithAnnotationFitsFoo.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodWithAnnotationFitsFoo.Name)
|
||||
}
|
||||
|
||||
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
|
||||
err = clientSet.Core().Pods(ns.Name).Delete(testPodNoAnnotation.Name, v1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
err = clientSet.Core().Pods(ns.Name).Delete(testPodWithAnnotationFitsDefault.Name, v1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
|
||||
// The rest of this test assumes that closing StopEverything will cause the
|
||||
// scheduler thread to stop immediately. It won't, and in fact it will often
|
||||
// schedule 1 more pod before finally exiting. Comment out until we fix that.
|
||||
//
|
||||
// See https://github.com/kubernetes/kubernetes/issues/23715 for more details.
|
||||
|
||||
/*
|
||||
close(schedulerConfig.StopEverything)
|
||||
|
||||
// 8. create 2 pods: testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2
|
||||
// - note: these two pods belong to default scheduler which no longer exists
|
||||
podWithNoAnnotation2 := createPod("pod-with-no-annotation2", nil)
|
||||
podWithAnnotationFitsDefault2 := createPod("pod-with-annotation-fits-default2", schedulerAnnotationFitsDefault)
|
||||
testPodNoAnnotation2, err := clientSet.Core().Pods(ns.Name).Create(podWithNoAnnotation2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
testPodWithAnnotationFitsDefault2, err := clientSet.Core().Pods(ns.Name).Create(podWithAnnotationFitsDefault2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 9. **check point-3**:
|
||||
// - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodNoAnnotation2.Namespace, testPodNoAnnotation2.Name))
|
||||
if err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodNoAnnotation2.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodNoAnnotation2.Name)
|
||||
}
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsDefault2.Namespace, testPodWithAnnotationFitsDefault2.Name))
|
||||
if err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodWithAnnotationFitsDefault2.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodWithAnnotationFitsDefault2.Name)
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
func createPod(client clientset.Interface, name string, annotation map[string]string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: name, Annotations: annotation},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(client)}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
|
||||
func TestAllocatable(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
ns := framework.CreateTestingNamespace("allocatable", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
// 1. create and start default-scheduler
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer clientSet.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events(ns.Name)})
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
// default-scheduler will be stopped later
|
||||
defer close(schedulerConfig.StopEverything)
|
||||
|
||||
// 2. create a node without allocatable awareness
|
||||
node := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node-allocatable-scheduler-test-node"},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
allocNode, err := clientSet.Core().Nodes().Create(node)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// 3. create resource pod which requires less than Capacity
|
||||
podResource := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "pod-test-allocatable"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: e2e.GetPauseImageName(clientSet),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testAllocPod, err := clientSet.Core().Pods(ns.Name).Create(podResource)
|
||||
if err != nil {
|
||||
t.Fatalf("Test allocatable unawareness failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 4. Test: this test pod should be scheduled since api-server will use Capacity as Allocatable
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testAllocPod.Namespace, testAllocPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test allocatable unawareness: %s Pod not scheduled: %v", testAllocPod.Name, err)
|
||||
} else {
|
||||
t.Logf("Test allocatable unawareness: %s Pod scheduled", testAllocPod.Name)
|
||||
}
|
||||
|
||||
// 5. Change the node status to allocatable aware, note that Allocatable is less than Pod's requirement
|
||||
allocNode.Status = v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := clientSet.Core().Nodes().UpdateStatus(allocNode); err != nil {
|
||||
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
|
||||
}
|
||||
|
||||
if err := clientSet.Core().Pods(ns.Name).Delete(podResource.Name, &v1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("Failed to remove first resource pod: %v", err)
|
||||
}
|
||||
|
||||
// 6. Make another pod with different name, same resource request
|
||||
podResource.ObjectMeta.Name = "pod-test-allocatable2"
|
||||
testAllocPod2, err := clientSet.Core().Pods(ns.Name).Create(podResource)
|
||||
if err != nil {
|
||||
t.Fatalf("Test allocatable awareness failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 7. Test: this test pod should not be scheduled since it request more than Allocatable
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testAllocPod2.Namespace, testAllocPod2.Name))
|
||||
if err == nil {
|
||||
t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectedly, %v", testAllocPod2.Name, err)
|
||||
} else {
|
||||
t.Logf("Test allocatable awareness: %s Pod not scheduled as expected", testAllocPod2.Name)
|
||||
}
|
||||
}
|
59
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
Normal file
59
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["util.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"scheduler_bench_test.go",
|
||||
"scheduler_test.go",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//plugin/pkg/scheduler/factory:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/renstrom/dedent",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
45
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/README.md
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/README.md
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
Scheduler Performance Test
|
||||
======
|
||||
|
||||
Motivation
|
||||
------
|
||||
We already have a performance testing system -- Kubemark. However, Kubemark requires setting up and bootstrapping a whole cluster, which takes a lot of time.
|
||||
|
||||
We want to have a standard way to reproduce scheduling latency metrics result and benchmark scheduler as simple and fast as possible. We have the following goals:
|
||||
|
||||
- Save time on testing
|
||||
- The test and benchmark can be run in a single box.
|
||||
We only set up components necessary to scheduling without booting up a cluster.
|
||||
- Profiling runtime metrics to find out bottleneck
|
||||
- Write scheduler integration test but focus on performance measurement.
|
||||
Take advantage of go profiling tools and collect fine-grained metrics,
|
||||
like cpu-profiling, memory-profiling and block-profiling.
|
||||
- Reproduce test result easily
|
||||
- We want to have a known place to do the performance related test for scheduler.
|
||||
Developers should just run one script to collect all the information they need.
|
||||
|
||||
Currently the test suite has the following:
|
||||
|
||||
- density test (by adding a new Go test)
|
||||
- schedule 30k pods on 1000 (fake) nodes and 3k pods on 100 (fake) nodes
|
||||
- print out scheduling rate every second
|
||||
- let you learn the rate changes vs number of scheduled pods
|
||||
- benchmark
|
||||
- make use of `go test -bench` and report nanosecond/op.
|
||||
- schedule b.N pods when the cluster has N nodes and P scheduled pods. Since it takes relatively long time to finish one round, b.N is small: 10 - 100.
|
||||
|
||||
|
||||
How To Run
|
||||
------
|
||||
```
|
||||
cd kubernetes/test/component/scheduler/perf
|
||||
./test-performance.sh
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
||||
|
||||
[]()
|
100
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_bench_test.go
generated
vendored
Normal file
100
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_bench_test.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package benchmark
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// BenchmarkScheduling100Nodes0Pods benchmarks the scheduling rate
|
||||
// when the cluster has 100 nodes and 0 scheduled pods
|
||||
func BenchmarkScheduling100Nodes0Pods(b *testing.B) {
|
||||
benchmarkScheduling(100, 0, b)
|
||||
}
|
||||
|
||||
// BenchmarkScheduling100Nodes1000Pods benchmarks the scheduling rate
|
||||
// when the cluster has 100 nodes and 1000 scheduled pods
|
||||
func BenchmarkScheduling100Nodes1000Pods(b *testing.B) {
|
||||
benchmarkScheduling(100, 1000, b)
|
||||
}
|
||||
|
||||
// BenchmarkScheduling1000Nodes0Pods benchmarks the scheduling rate
|
||||
// when the cluster has 1000 nodes and 0 scheduled pods
|
||||
func BenchmarkScheduling1000Nodes0Pods(b *testing.B) {
|
||||
benchmarkScheduling(1000, 0, b)
|
||||
}
|
||||
|
||||
// BenchmarkScheduling1000Nodes1000Pods benchmarks the scheduling rate
|
||||
// when the cluster has 1000 nodes and 1000 scheduled pods
|
||||
func BenchmarkScheduling1000Nodes1000Pods(b *testing.B) {
|
||||
benchmarkScheduling(1000, 1000, b)
|
||||
}
|
||||
|
||||
// benchmarkScheduling benchmarks scheduling rate with specific number of nodes
|
||||
// and specific number of pods already scheduled. Since an operation takes relatively
|
||||
// long time, b.N should be small: 10 - 100.
|
||||
func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
|
||||
schedulerConfigFactory, finalFunc := mustSetupScheduler()
|
||||
defer finalFunc()
|
||||
c := schedulerConfigFactory.Client
|
||||
|
||||
nodePreparer := framework.NewIntegrationTestNodePreparer(
|
||||
c,
|
||||
[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
|
||||
"scheduler-perf-",
|
||||
)
|
||||
if err := nodePreparer.PrepareNodes(); err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
defer nodePreparer.CleanupNodes()
|
||||
|
||||
config := testutils.NewTestPodCreatorConfig()
|
||||
config.AddStrategy("sched-test", numScheduledPods, testutils.NewSimpleWithControllerCreatePodStrategy("rc1"))
|
||||
podCreator := testutils.NewTestPodCreator(c, config)
|
||||
podCreator.CreatePods()
|
||||
|
||||
for {
|
||||
scheduled := schedulerConfigFactory.ScheduledPodLister.Indexer.List()
|
||||
if len(scheduled) >= numScheduledPods {
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
// start benchmark
|
||||
b.ResetTimer()
|
||||
config = testutils.NewTestPodCreatorConfig()
|
||||
config.AddStrategy("sched-test", b.N, testutils.NewSimpleWithControllerCreatePodStrategy("rc2"))
|
||||
podCreator = testutils.NewTestPodCreator(c, config)
|
||||
podCreator.CreatePods()
|
||||
for {
|
||||
// This can potentially affect performance of scheduler, since List() is done under mutex.
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled := schedulerConfigFactory.ScheduledPodLister.Indexer.List()
|
||||
if len(scheduled) >= numScheduledPods+b.N {
|
||||
break
|
||||
}
|
||||
// Note: This might introduce slight deviation in accuracy of benchmark results.
|
||||
// Since the total amount of time is relatively large, it might not be a concern.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
241
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
Normal file
241
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/scheduler_test.go
generated
vendored
Normal file
|
@ -0,0 +1,241 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package benchmark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/renstrom/dedent"
|
||||
)
|
||||
|
||||
const (
|
||||
threshold3K = 100
|
||||
threshold30K = 30
|
||||
threshold60K = 30
|
||||
)
|
||||
|
||||
// TestSchedule100Node3KPods schedules 3k pods on 100 nodes.
|
||||
func TestSchedule100Node3KPods(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping because we want to run short tests")
|
||||
}
|
||||
|
||||
config := defaultSchedulerBenchmarkConfig(100, 3000)
|
||||
if min := schedulePods(config); min < threshold3K {
|
||||
t.Errorf("Too small pod scheduling throughput for 3k pods. Expected %v got %v", threshold3K, min)
|
||||
} else {
|
||||
fmt.Printf("Minimal observed throughput for 3k pod test: %v\n", min)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSchedule100Node3KNodeAffinityPods schedules 3k pods using Node affinity on 100 nodes.
|
||||
func TestSchedule100Node3KNodeAffinityPods(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping because we want to run short tests")
|
||||
}
|
||||
|
||||
config := baseConfig()
|
||||
config.numNodes = 100
|
||||
config.numPods = 3000
|
||||
|
||||
// number of Node-Pod sets with Pods NodeAffinity matching given Nodes.
|
||||
numGroups := 10
|
||||
nodeAffinityKey := "kubernetes.io/sched-perf-node-affinity"
|
||||
|
||||
nodeStrategies := make([]testutils.CountToStrategy, 0, 10)
|
||||
for i := 0; i < numGroups; i++ {
|
||||
nodeStrategies = append(nodeStrategies, testutils.CountToStrategy{
|
||||
Count: config.numNodes / numGroups,
|
||||
Strategy: testutils.NewLabelNodePrepareStrategy(nodeAffinityKey, fmt.Sprintf("%v", i)),
|
||||
})
|
||||
}
|
||||
config.nodePreparer = framework.NewIntegrationTestNodePreparer(
|
||||
config.schedulerConfigFactory.Client,
|
||||
nodeStrategies,
|
||||
"scheduler-perf-",
|
||||
)
|
||||
|
||||
affinityTemplate := dedent.Dedent(`
|
||||
{
|
||||
"nodeAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [{
|
||||
"matchExpressions": [{
|
||||
"key": "` + nodeAffinityKey + `",
|
||||
"operator": "In",
|
||||
"values": ["%v"]
|
||||
}]
|
||||
}]
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
podCreatorConfig := testutils.NewTestPodCreatorConfig()
|
||||
for i := 0; i < numGroups; i++ {
|
||||
podCreatorConfig.AddStrategy("sched-perf-node-affinity", config.numPods/numGroups,
|
||||
testutils.NewCustomCreatePodStrategy(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "sched-perf-node-affinity-pod-",
|
||||
Annotations: map[string]string{v1.AffinityAnnotationKey: fmt.Sprintf(affinityTemplate, i)},
|
||||
},
|
||||
Spec: testutils.MakePodSpec(),
|
||||
}),
|
||||
)
|
||||
}
|
||||
config.podCreator = testutils.NewTestPodCreator(config.schedulerConfigFactory.Client, podCreatorConfig)
|
||||
|
||||
if min := schedulePods(config); min < threshold30K {
|
||||
t.Errorf("Too small pod scheduling throughput for 30k pods. Expected %v got %v", threshold30K, min)
|
||||
} else {
|
||||
fmt.Printf("Minimal observed throughput for 30k pod test: %v\n", min)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSchedule1000Node30KPods schedules 30k pods on 1000 nodes.
|
||||
func TestSchedule1000Node30KPods(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping because we want to run short tests")
|
||||
}
|
||||
|
||||
config := defaultSchedulerBenchmarkConfig(1000, 30000)
|
||||
if min := schedulePods(config); min < threshold30K {
|
||||
t.Errorf("To small pod scheduling throughput for 30k pods. Expected %v got %v", threshold30K, min)
|
||||
} else {
|
||||
fmt.Printf("Minimal observed throughput for 30k pod test: %v\n", min)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSchedule2000Node60KPods schedules 60k pods on 2000 nodes.
|
||||
// This test won't fit in normal 10 minutes time window.
|
||||
// func TestSchedule2000Node60KPods(t *testing.T) {
|
||||
// if testing.Short() {
|
||||
// t.Skip("Skipping because we want to run short tests")
|
||||
// }
|
||||
// config := defaultSchedulerBenchmarkConfig(2000, 60000)
|
||||
// if min := schedulePods(config); min < threshold60K {
|
||||
// t.Errorf("To small pod scheduling throughput for 60k pods. Expected %v got %v", threshold60K, min)
|
||||
// } else {
|
||||
// fmt.Printf("Minimal observed throughput for 60k pod test: %v\n", min)
|
||||
// }
|
||||
// }
|
||||
|
||||
type testConfig struct {
|
||||
numPods int
|
||||
numNodes int
|
||||
nodePreparer testutils.TestNodePreparer
|
||||
podCreator *testutils.TestPodCreator
|
||||
schedulerConfigFactory *factory.ConfigFactory
|
||||
destroyFunc func()
|
||||
}
|
||||
|
||||
func baseConfig() *testConfig {
|
||||
schedulerConfigFactory, destroyFunc := mustSetupScheduler()
|
||||
return &testConfig{
|
||||
schedulerConfigFactory: schedulerConfigFactory,
|
||||
destroyFunc: destroyFunc,
|
||||
}
|
||||
}
|
||||
|
||||
func defaultSchedulerBenchmarkConfig(numNodes, numPods int) *testConfig {
|
||||
baseConfig := baseConfig()
|
||||
|
||||
nodePreparer := framework.NewIntegrationTestNodePreparer(
|
||||
baseConfig.schedulerConfigFactory.Client,
|
||||
[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
|
||||
"scheduler-perf-",
|
||||
)
|
||||
|
||||
config := testutils.NewTestPodCreatorConfig()
|
||||
config.AddStrategy("sched-test", numPods, testutils.NewSimpleWithControllerCreatePodStrategy("rc1"))
|
||||
podCreator := testutils.NewTestPodCreator(baseConfig.schedulerConfigFactory.Client, config)
|
||||
|
||||
baseConfig.nodePreparer = nodePreparer
|
||||
baseConfig.podCreator = podCreator
|
||||
baseConfig.numPods = numPods
|
||||
baseConfig.numNodes = numNodes
|
||||
|
||||
return baseConfig
|
||||
}
|
||||
|
||||
// schedulePods schedules specific number of pods on specific number of nodes.
|
||||
// This is used to learn the scheduling throughput on various
|
||||
// sizes of cluster and changes as more and more pods are scheduled.
|
||||
// It won't stop until all pods are scheduled.
|
||||
// It returns the minimum of throughput over whole run.
|
||||
func schedulePods(config *testConfig) int32 {
|
||||
defer config.destroyFunc()
|
||||
if err := config.nodePreparer.PrepareNodes(); err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
defer config.nodePreparer.CleanupNodes()
|
||||
config.podCreator.CreatePods()
|
||||
|
||||
prev := 0
|
||||
// On startup there may be a latent period where NO scheduling occurs (qps = 0).
|
||||
// We are interested in low scheduling rates (i.e. qps=2),
|
||||
minQps := int32(math.MaxInt32)
|
||||
start := time.Now()
|
||||
|
||||
// Bake in time for the first pod scheduling event.
|
||||
for {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
scheduled := config.schedulerConfigFactory.ScheduledPodLister.Indexer.List()
|
||||
// 30,000 pods -> wait till @ least 300 are scheduled to start measuring.
|
||||
// TODO Find out why sometimes there may be scheduling blips in the beggining.
|
||||
if len(scheduled) > config.numPods/100 {
|
||||
break
|
||||
}
|
||||
}
|
||||
// map minimum QPS entries in a counter, useful for debugging tests.
|
||||
qpsStats := map[int]int{}
|
||||
|
||||
// Now that scheduling has started, lets start taking the pulse on how many pods are happening per second.
|
||||
for {
|
||||
// This can potentially affect performance of scheduler, since List() is done under mutex.
|
||||
// Listing 10000 pods is an expensive operation, so running it frequently may impact scheduler.
|
||||
// TODO: Setup watch on apiserver and wait until all pods scheduled.
|
||||
scheduled := config.schedulerConfigFactory.ScheduledPodLister.Indexer.List()
|
||||
|
||||
// We will be completed when all pods are done being scheduled.
|
||||
// return the worst-case-scenario interval that was seen during this time.
|
||||
// Note this should never be low due to cold-start, so allow bake in sched time if necessary.
|
||||
if len(scheduled) >= config.numPods {
|
||||
fmt.Printf("Scheduled %v Pods in %v seconds (%v per second on average). min QPS was %v\n",
|
||||
config.numPods, int(time.Since(start)/time.Second), config.numPods/int(time.Since(start)/time.Second), minQps)
|
||||
return minQps
|
||||
}
|
||||
|
||||
// There's no point in printing it for the last iteration, as the value is random
|
||||
qps := len(scheduled) - prev
|
||||
qpsStats[qps] += 1
|
||||
if int32(qps) < minQps {
|
||||
minQps = int32(qps)
|
||||
}
|
||||
fmt.Printf("%ds\trate: %d\ttotal: %d (qps frequency: %v)\n", time.Since(start)/time.Second, qps, len(scheduled), qpsStats)
|
||||
prev = len(scheduled)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
52
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/test-performance.sh
generated
vendored
Executable file
52
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/test-performance.sh
generated
vendored
Executable file
|
@ -0,0 +1,52 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../../
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
kube::golang::setup_env
|
||||
|
||||
DIR_BASENAME=$(dirname "${BASH_SOURCE}")
|
||||
pushd ${DIR_BASENAME}
|
||||
|
||||
cleanup() {
|
||||
popd 2> /dev/null
|
||||
kube::etcd::cleanup
|
||||
kube::log::status "performance test cleanup complete"
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
kube::etcd::start
|
||||
|
||||
# We are using the benchmark suite to do profiling. Because it only runs a few pods and
|
||||
# theoretically it has less variance.
|
||||
if ${RUN_BENCHMARK:-false}; then
|
||||
kube::log::status "performance test (benchmark) compiling"
|
||||
go test -c -o "perf.test"
|
||||
|
||||
kube::log::status "performance test (benchmark) start"
|
||||
"./perf.test" -test.bench=. -test.run=xxxx -test.cpuprofile=prof.out -test.short=false
|
||||
kube::log::status "...benchmark tests finished"
|
||||
fi
|
||||
# Running density tests. It might take a long time.
|
||||
kube::log::status "performance test (density) start"
|
||||
go test -test.run=. -test.timeout=60m -test.short=false
|
||||
kube::log::status "...density tests finished"
|
78
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/util.go
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/test/integration/scheduler_perf/util.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package benchmark
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// mustSetupScheduler starts the following components:
|
||||
// - k8s api server (a.k.a. master)
|
||||
// - scheduler
|
||||
// It returns scheduler config factory and destroyFunc which should be used to
|
||||
// remove resources after finished.
|
||||
// Notes on rate limiter:
|
||||
// - client rate limit is set to 5000.
|
||||
func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destroyFunc func()) {
|
||||
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
framework.RunAMasterUsingServer(framework.NewIntegrationTestMasterConfig(), s, h)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
|
||||
Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion},
|
||||
QPS: 5000.0,
|
||||
Burst: 5000,
|
||||
})
|
||||
|
||||
schedulerConfigFactory = factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
|
||||
schedulerConfig, err := schedulerConfigFactory.Create()
|
||||
if err != nil {
|
||||
panic("Couldn't create scheduler config")
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: "scheduler"})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events("")})
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
|
||||
destroyFunc = func() {
|
||||
glog.Infof("destroying")
|
||||
close(schedulerConfig.StopEverything)
|
||||
s.Close()
|
||||
glog.Infof("destroyed")
|
||||
}
|
||||
return
|
||||
}
|
122
vendor/k8s.io/kubernetes/test/integration/secrets/secrets_test.go
generated
vendored
Normal file
122
vendor/k8s.io/kubernetes/test/integration/secrets/secrets_test.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package secrets
|
||||
|
||||
// This file tests use of the secrets API resource.
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func deleteSecretOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
|
||||
if err := c.Core().Secrets(ns).Delete(name, nil); err != nil {
|
||||
t.Errorf("unable to delete secret %v: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSecrets tests apiserver-side behavior of creation of secret objects and their use by pods.
|
||||
func TestSecrets(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("secret", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
DoTestSecrets(t, client, ns)
|
||||
}
|
||||
|
||||
// DoTestSecrets test secrets for one api version.
|
||||
func DoTestSecrets(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
|
||||
// Make a secret object.
|
||||
s := v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "secret",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data": []byte("value1\n"),
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := client.Core().Secrets(s.Namespace).Create(&s); err != nil {
|
||||
t.Errorf("unable to create test secret: %v", err)
|
||||
}
|
||||
defer deleteSecretOrErrorf(t, client, s.Namespace, s.Name)
|
||||
|
||||
// Template for pods that use a secret.
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "XXX",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "secvol",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: "secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "secvol",
|
||||
MountPath: "/fake/path",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a pod to consume secret.
|
||||
pod.ObjectMeta.Name = "uses-secret"
|
||||
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod: %v", err)
|
||||
}
|
||||
defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
|
||||
|
||||
// Create a pod that consumes non-existent secret.
|
||||
pod.ObjectMeta.Name = "uses-non-existent-secret"
|
||||
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod: %v", err)
|
||||
}
|
||||
defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
|
||||
// This pod may fail to run, but we don't currently prevent this, and this
|
||||
// test can't check whether the kubelet actually pulls the secret.
|
||||
|
||||
// Verifying contents of the volumes is out of scope for a
|
||||
// apiserver<->kubelet integration test. It is covered by an e2e test.
|
||||
}
|
567
vendor/k8s.io/kubernetes/test/integration/serviceaccount/service_account_test.go
generated
vendored
Normal file
567
vendor/k8s.io/kubernetes/test/integration/serviceaccount/service_account_test.go
generated
vendored
Normal file
|
@ -0,0 +1,567 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serviceaccount
|
||||
|
||||
// This file tests authentication and (soon) authorization of HTTP requests to a master object.
|
||||
// It does not use the client in pkg/client/... because authentication and authorization needs
|
||||
// to work for any client of the HTTP interface.
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||
"k8s.io/apiserver/pkg/authentication/request/union"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
serviceaccountadmission "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
rootUserName = "root"
|
||||
rootToken = "root-user-token"
|
||||
|
||||
readOnlyServiceAccountName = "ro"
|
||||
readWriteServiceAccountName = "rw"
|
||||
)
|
||||
|
||||
func TestServiceAccountAutoCreate(t *testing.T) {
|
||||
c, _, stopFunc := startServiceAccountTestServer(t)
|
||||
defer stopFunc()
|
||||
|
||||
ns := "test-service-account-creation"
|
||||
|
||||
// Create namespace
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: ns}})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Get service account
|
||||
defaultUser, err := getServiceAccount(c, ns, "default", true)
|
||||
if err != nil {
|
||||
t.Fatalf("Default serviceaccount not created: %v", err)
|
||||
}
|
||||
|
||||
// Delete service account
|
||||
err = c.Core().ServiceAccounts(ns).Delete(defaultUser.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not delete default serviceaccount: %v", err)
|
||||
}
|
||||
|
||||
// Get recreated service account
|
||||
defaultUser2, err := getServiceAccount(c, ns, "default", true)
|
||||
if err != nil {
|
||||
t.Fatalf("Default serviceaccount not created: %v", err)
|
||||
}
|
||||
if defaultUser2.UID == defaultUser.UID {
|
||||
t.Fatalf("Expected different UID with recreated serviceaccount")
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
||||
c, _, stopFunc := startServiceAccountTestServer(t)
|
||||
defer stopFunc()
|
||||
|
||||
ns := "test-service-account-token-creation"
|
||||
name := "my-service-account"
|
||||
|
||||
// Create namespace
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: ns}})
|
||||
if err != nil {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Create service account
|
||||
serviceAccount, err := c.Core().ServiceAccounts(ns).Create(&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Name: name}})
|
||||
if err != nil {
|
||||
t.Fatalf("Service Account not created: %v", err)
|
||||
}
|
||||
|
||||
// Get token
|
||||
token1Name, token1, err := getReferencedServiceAccountToken(c, ns, name, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Delete token
|
||||
err = c.Core().Secrets(ns).Delete(token1Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not delete token: %v", err)
|
||||
}
|
||||
|
||||
// Get recreated token
|
||||
token2Name, token2, err := getReferencedServiceAccountToken(c, ns, name, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if token1Name == token2Name {
|
||||
t.Fatalf("Expected new auto-created token name")
|
||||
}
|
||||
if token1 == token2 {
|
||||
t.Fatalf("Expected new auto-created token value")
|
||||
}
|
||||
|
||||
// Trigger creation of a new referenced token
|
||||
serviceAccount, err = c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
serviceAccount.Secrets = []v1.ObjectReference{}
|
||||
_, err = c.Core().ServiceAccounts(ns).Update(serviceAccount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Get rotated token
|
||||
token3Name, token3, err := getReferencedServiceAccountToken(c, ns, name, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if token3Name == token2Name {
|
||||
t.Fatalf("Expected new auto-created token name")
|
||||
}
|
||||
if token3 == token2 {
|
||||
t.Fatalf("Expected new auto-created token value")
|
||||
}
|
||||
|
||||
// Delete service account
|
||||
err = c.Core().ServiceAccounts(ns).Delete(name, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for tokens to be deleted
|
||||
tokensToCleanup := sets.NewString(token1Name, token2Name, token3Name)
|
||||
err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) {
|
||||
// Get all secrets in the namespace
|
||||
secrets, err := c.Core().Secrets(ns).List(v1.ListOptions{})
|
||||
// Retrieval errors should fail
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, s := range secrets.Items {
|
||||
if tokensToCleanup.Has(s.Name) {
|
||||
// Still waiting for tokens to be cleaned up
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
// All clean
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error waiting for tokens to be deleted: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceAccountTokenAutoMount(t *testing.T) {
|
||||
c, _, stopFunc := startServiceAccountTestServer(t)
|
||||
defer stopFunc()
|
||||
|
||||
ns := "auto-mount-ns"
|
||||
|
||||
// Create "my" namespace
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: ns}})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Get default token
|
||||
defaultTokenName, _, err := getReferencedServiceAccountToken(c, ns, serviceaccountadmission.DefaultServiceAccountName, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Pod to create
|
||||
protoPod := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "protopod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container-1",
|
||||
Image: "container-1-image",
|
||||
},
|
||||
{
|
||||
Name: "container-2",
|
||||
Image: "container-2-image",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{Name: "empty-dir", MountPath: serviceaccountadmission.DefaultAPITokenMountPath},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "empty-dir",
|
||||
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Pod we expect to get created
|
||||
defaultMode := int32(0644)
|
||||
expectedServiceAccount := serviceaccountadmission.DefaultServiceAccountName
|
||||
expectedVolumes := append(protoPod.Spec.Volumes, v1.Volume{
|
||||
Name: defaultTokenName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: defaultTokenName,
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
})
|
||||
expectedContainer1VolumeMounts := []v1.VolumeMount{
|
||||
{Name: defaultTokenName, MountPath: serviceaccountadmission.DefaultAPITokenMountPath, ReadOnly: true},
|
||||
}
|
||||
expectedContainer2VolumeMounts := protoPod.Spec.Containers[1].VolumeMounts
|
||||
|
||||
createdPod, err := c.Core().Pods(ns).Create(&protoPod)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if createdPod.Spec.ServiceAccountName != expectedServiceAccount {
|
||||
t.Fatalf("Expected %s, got %s", expectedServiceAccount, createdPod.Spec.ServiceAccountName)
|
||||
}
|
||||
if !api.Semantic.DeepEqual(&expectedVolumes, &createdPod.Spec.Volumes) {
|
||||
t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedVolumes, createdPod.Spec.Volumes)
|
||||
}
|
||||
if !api.Semantic.DeepEqual(&expectedContainer1VolumeMounts, &createdPod.Spec.Containers[0].VolumeMounts) {
|
||||
t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedContainer1VolumeMounts, createdPod.Spec.Containers[0].VolumeMounts)
|
||||
}
|
||||
if !api.Semantic.DeepEqual(&expectedContainer2VolumeMounts, &createdPod.Spec.Containers[1].VolumeMounts) {
|
||||
t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedContainer2VolumeMounts, createdPod.Spec.Containers[1].VolumeMounts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceAccountTokenAuthentication(t *testing.T) {
|
||||
c, config, stopFunc := startServiceAccountTestServer(t)
|
||||
defer stopFunc()
|
||||
|
||||
myns := "auth-ns"
|
||||
otherns := "other-ns"
|
||||
|
||||
// Create "my" namespace
|
||||
_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: myns}})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Create "other" namespace
|
||||
_, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: otherns}})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
t.Fatalf("could not create namespace: %v", err)
|
||||
}
|
||||
|
||||
// Create "ro" user in myns
|
||||
_, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Name: readOnlyServiceAccountName}})
|
||||
if err != nil {
|
||||
t.Fatalf("Service Account not created: %v", err)
|
||||
}
|
||||
roTokenName, roToken, err := getReferencedServiceAccountToken(c, myns, readOnlyServiceAccountName, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
roClientConfig := config
|
||||
roClientConfig.BearerToken = roToken
|
||||
roClient := clientset.NewForConfigOrDie(&roClientConfig)
|
||||
doServiceAccountAPIRequests(t, roClient, myns, true, true, false)
|
||||
doServiceAccountAPIRequests(t, roClient, otherns, true, false, false)
|
||||
err = c.Core().Secrets(myns).Delete(roTokenName, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("could not delete token: %v", err)
|
||||
}
|
||||
doServiceAccountAPIRequests(t, roClient, myns, false, false, false)
|
||||
|
||||
// Create "rw" user in myns
|
||||
_, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Name: readWriteServiceAccountName}})
|
||||
if err != nil {
|
||||
t.Fatalf("Service Account not created: %v", err)
|
||||
}
|
||||
_, rwToken, err := getReferencedServiceAccountToken(c, myns, readWriteServiceAccountName, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rwClientConfig := config
|
||||
rwClientConfig.BearerToken = rwToken
|
||||
rwClient := clientset.NewForConfigOrDie(&rwClientConfig)
|
||||
doServiceAccountAPIRequests(t, rwClient, myns, true, true, true)
|
||||
doServiceAccountAPIRequests(t, rwClient, otherns, true, false, false)
|
||||
|
||||
// Get default user and token which should have been automatically created
|
||||
_, defaultToken, err := getReferencedServiceAccountToken(c, myns, "default", true)
|
||||
if err != nil {
|
||||
t.Fatalf("could not get default user and token: %v", err)
|
||||
}
|
||||
defaultClientConfig := config
|
||||
defaultClientConfig.BearerToken = defaultToken
|
||||
defaultClient := clientset.NewForConfigOrDie(&defaultClientConfig)
|
||||
doServiceAccountAPIRequests(t, defaultClient, myns, true, false, false)
|
||||
}
|
||||
|
||||
// startServiceAccountTestServer returns a started server
|
||||
// It is the responsibility of the caller to ensure the returned stopFunc is called
|
||||
func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclient.Config, func()) {
|
||||
// Listener
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
apiServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
// Anonymous client config
|
||||
clientConfig := restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}
|
||||
// Root client
|
||||
// TODO: remove rootClient after we refactor pkg/admission to use the clientset.
|
||||
rootClientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}, BearerToken: rootToken})
|
||||
internalRootClientset := internalclientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}, BearerToken: rootToken})
|
||||
// Set up two authenticators:
|
||||
// 1. A token authenticator that maps the rootToken to the "root" user
|
||||
// 2. A ServiceAccountToken authenticator that validates ServiceAccount tokens
|
||||
rootTokenAuth := authenticator.TokenFunc(func(token string) (user.Info, bool, error) {
|
||||
if token == rootToken {
|
||||
return &user.DefaultInfo{Name: rootUserName}, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
})
|
||||
serviceAccountKey, _ := rsa.GenerateKey(rand.Reader, 2048)
|
||||
serviceAccountTokenGetter := serviceaccountcontroller.NewGetterFromClient(rootClientset)
|
||||
serviceAccountTokenAuth := serviceaccount.JWTTokenAuthenticator([]interface{}{&serviceAccountKey.PublicKey}, true, serviceAccountTokenGetter)
|
||||
authenticator := union.New(
|
||||
bearertoken.New(rootTokenAuth),
|
||||
bearertoken.New(serviceAccountTokenAuth),
|
||||
)
|
||||
|
||||
// Set up a stub authorizer:
|
||||
// 1. The "root" user is allowed to do anything
|
||||
// 2. ServiceAccounts named "ro" are allowed read-only operations in their namespace
|
||||
// 3. ServiceAccounts named "rw" are allowed any operation in their namespace
|
||||
authorizer := authorizer.AuthorizerFunc(func(attrs authorizer.Attributes) (bool, string, error) {
|
||||
username := ""
|
||||
if user := attrs.GetUser(); user != nil {
|
||||
username = user.GetName()
|
||||
}
|
||||
ns := attrs.GetNamespace()
|
||||
|
||||
// If the user is "root"...
|
||||
if username == rootUserName {
|
||||
// allow them to do anything
|
||||
return true, "", nil
|
||||
}
|
||||
|
||||
// If the user is a service account...
|
||||
if serviceAccountNamespace, serviceAccountName, err := serviceaccount.SplitUsername(username); err == nil {
|
||||
// Limit them to their own namespace
|
||||
if serviceAccountNamespace == ns {
|
||||
switch serviceAccountName {
|
||||
case readOnlyServiceAccountName:
|
||||
if attrs.IsReadOnly() {
|
||||
return true, "", nil
|
||||
}
|
||||
case readWriteServiceAccountName:
|
||||
return true, "", nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, fmt.Sprintf("User %s is denied (ns=%s, readonly=%v, resource=%s)", username, ns, attrs.IsReadOnly(), attrs.GetResource()), nil
|
||||
})
|
||||
|
||||
// Set up admission plugin to auto-assign serviceaccounts to pods
|
||||
serviceAccountAdmission := serviceaccountadmission.NewServiceAccount()
|
||||
serviceAccountAdmission.SetInternalClientSet(internalRootClientset)
|
||||
|
||||
masterConfig := framework.NewMasterConfig()
|
||||
masterConfig.GenericConfig.EnableIndex = true
|
||||
masterConfig.GenericConfig.Authenticator = authenticator
|
||||
masterConfig.GenericConfig.Authorizer = authorizer
|
||||
masterConfig.GenericConfig.AdmissionControl = serviceAccountAdmission
|
||||
framework.RunAMasterUsingServer(masterConfig, apiServer, h)
|
||||
|
||||
// Start the service account and service account token controllers
|
||||
stopCh := make(chan struct{})
|
||||
tokenController := serviceaccountcontroller.NewTokensController(rootClientset, serviceaccountcontroller.TokensControllerOptions{TokenGenerator: serviceaccount.JWTTokenGenerator(serviceAccountKey)})
|
||||
go tokenController.Run(1, stopCh)
|
||||
|
||||
informers := informers.NewSharedInformerFactory(rootClientset, nil, controller.NoResyncPeriodFunc())
|
||||
serviceAccountController := serviceaccountcontroller.NewServiceAccountsController(informers.ServiceAccounts(), informers.Namespaces(), rootClientset, serviceaccountcontroller.DefaultServiceAccountsControllerOptions())
|
||||
informers.Start(stopCh)
|
||||
go serviceAccountController.Run(5, stopCh)
|
||||
// Start the admission plugin reflectors
|
||||
serviceAccountAdmission.Run()
|
||||
|
||||
stop := func() {
|
||||
close(stopCh)
|
||||
serviceAccountAdmission.Stop()
|
||||
apiServer.Close()
|
||||
}
|
||||
|
||||
return rootClientset, clientConfig, stop
|
||||
}
|
||||
|
||||
func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*v1.ServiceAccount, error) {
|
||||
if !shouldWait {
|
||||
return c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
var user *v1.ServiceAccount
|
||||
var err error
|
||||
err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) {
|
||||
user, err = c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return user, err
|
||||
}
|
||||
|
||||
func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name string, shouldWait bool) (string, string, error) {
|
||||
tokenName := ""
|
||||
token := ""
|
||||
|
||||
findToken := func() (bool, error) {
|
||||
user, err := c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, ref := range user.Secrets {
|
||||
secret, err := c.Core().Secrets(ns).Get(ref.Name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if secret.Type != v1.SecretTypeServiceAccountToken {
|
||||
continue
|
||||
}
|
||||
name := secret.Annotations[v1.ServiceAccountNameKey]
|
||||
uid := secret.Annotations[v1.ServiceAccountUIDKey]
|
||||
tokenData := secret.Data[v1.ServiceAccountTokenKey]
|
||||
if name == user.Name && uid == string(user.UID) && len(tokenData) > 0 {
|
||||
tokenName = secret.Name
|
||||
token = string(tokenData)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if shouldWait {
|
||||
err := wait.Poll(time.Second, 10*time.Second, findToken)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
} else {
|
||||
ok, err := findToken()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("No token found for %s/%s", ns, name)
|
||||
}
|
||||
}
|
||||
return tokenName, token, nil
|
||||
}
|
||||
|
||||
type testOperation func() error
|
||||
|
||||
func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string, authenticated bool, canRead bool, canWrite bool) {
|
||||
testSecret := &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "testSecret"},
|
||||
Data: map[string][]byte{"test": []byte("data")},
|
||||
}
|
||||
|
||||
readOps := []testOperation{
|
||||
func() error {
|
||||
_, err := c.Core().Secrets(ns).List(v1.ListOptions{})
|
||||
return err
|
||||
},
|
||||
func() error {
|
||||
_, err := c.Core().Pods(ns).List(v1.ListOptions{})
|
||||
return err
|
||||
},
|
||||
}
|
||||
writeOps := []testOperation{
|
||||
func() error { _, err := c.Core().Secrets(ns).Create(testSecret); return err },
|
||||
func() error { return c.Core().Secrets(ns).Delete(testSecret.Name, nil) },
|
||||
}
|
||||
|
||||
for _, op := range readOps {
|
||||
err := op()
|
||||
unauthorizedError := errors.IsUnauthorized(err)
|
||||
forbiddenError := errors.IsForbidden(err)
|
||||
|
||||
switch {
|
||||
case !authenticated && !unauthorizedError:
|
||||
t.Fatalf("expected unauthorized error, got %v", err)
|
||||
case authenticated && unauthorizedError:
|
||||
t.Fatalf("unexpected unauthorized error: %v", err)
|
||||
case authenticated && canRead && forbiddenError:
|
||||
t.Fatalf("unexpected forbidden error: %v", err)
|
||||
case authenticated && !canRead && !forbiddenError:
|
||||
t.Fatalf("expected forbidden error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, op := range writeOps {
|
||||
err := op()
|
||||
unauthorizedError := errors.IsUnauthorized(err)
|
||||
forbiddenError := errors.IsForbidden(err)
|
||||
|
||||
switch {
|
||||
case !authenticated && !unauthorizedError:
|
||||
t.Fatalf("expected unauthorized error, got %v", err)
|
||||
case authenticated && unauthorizedError:
|
||||
t.Fatalf("unexpected unauthorized error: %v", err)
|
||||
case authenticated && canWrite && forbiddenError:
|
||||
t.Fatalf("unexpected forbidden error: %v", err)
|
||||
case authenticated && !canWrite && !forbiddenError:
|
||||
t.Fatalf("expected forbidden error, got: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
102
vendor/k8s.io/kubernetes/test/integration/storageclasses/storage_classes_test.go
generated
vendored
Normal file
102
vendor/k8s.io/kubernetes/test/integration/storageclasses/storage_classes_test.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storageclasses
|
||||
|
||||
// This file contains tests for the storage classes API resource.
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const provisionerPluginName = "kubernetes.io/mock-provisioner"
|
||||
|
||||
// TestStorageClasses tests apiserver-side behavior of creation of storage class objects and their use by pvcs.
|
||||
func TestStorageClasses(t *testing.T) {
|
||||
_, s := framework.RunAMaster(nil)
|
||||
defer s.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
ns := framework.CreateTestingNamespace("storageclass", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
DoTestStorageClasses(t, client, ns)
|
||||
}
|
||||
|
||||
// DoTestStorageClasses tests storage classes for one api version.
|
||||
func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *v1.Namespace) {
|
||||
// Make a storage class object.
|
||||
s := storage.StorageClass{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "gold",
|
||||
},
|
||||
Provisioner: provisionerPluginName,
|
||||
}
|
||||
|
||||
if _, err := client.Storage().StorageClasses().Create(&s); err != nil {
|
||||
t.Errorf("unable to create test storage class: %v", err)
|
||||
}
|
||||
defer deleteStorageClassOrErrorf(t, client, s.Namespace, s.Name)
|
||||
|
||||
// Template for pvcs that specify a storage class
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "XXX",
|
||||
Namespace: ns.Name,
|
||||
Annotations: map[string]string{
|
||||
storageutil.StorageClassAnnotation: "gold",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G")}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
},
|
||||
}
|
||||
|
||||
pvc.ObjectMeta.Name = "uses-storageclass"
|
||||
if _, err := client.Core().PersistentVolumeClaims(ns.Name).Create(pvc); err != nil {
|
||||
t.Errorf("Failed to create pvc: %v", err)
|
||||
}
|
||||
defer deletePersistentVolumeClaimOrErrorf(t, client, ns.Name, pvc.Name)
|
||||
}
|
||||
|
||||
func deleteStorageClassOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
|
||||
if err := c.Storage().StorageClasses().Delete(name, nil); err != nil {
|
||||
t.Errorf("unable to delete storage class %v: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func deletePersistentVolumeClaimOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
|
||||
if err := c.Core().PersistentVolumeClaims(ns).Delete(name, nil); err != nil {
|
||||
t.Errorf("unable to delete persistent volume claim %v: %v", name, err)
|
||||
}
|
||||
}
|
287
vendor/k8s.io/kubernetes/test/integration/thirdparty/thirdparty_test.go
generated
vendored
Normal file
287
vendor/k8s.io/kubernetes/test/integration/thirdparty/thirdparty_test.go
generated
vendored
Normal file
|
@ -0,0 +1,287 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package thirdparty
|
||||
|
||||
// This file contains tests for the storage classes API resource.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestThirdPartyDiscovery(t *testing.T) {
|
||||
group := "company.com"
|
||||
version := "v1"
|
||||
|
||||
_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
|
||||
defer s.Close()
|
||||
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
|
||||
client := clientset.NewForConfigOrDie(clientConfig)
|
||||
|
||||
// install thirdparty resource
|
||||
once := sync.Once{}
|
||||
deleteFoo := installThirdParty(t, client, clientConfig,
|
||||
&extensions.ThirdPartyResource{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo.company.com"},
|
||||
Versions: []extensions.APIVersion{{Name: version}},
|
||||
}, group, version, "foos",
|
||||
)
|
||||
defer once.Do(deleteFoo)
|
||||
|
||||
// check whether it shows up in discovery properly
|
||||
resources, err := client.Discovery().ServerResourcesForGroupVersion("company.com/" + version)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(resources.APIResources) != 1 {
|
||||
t.Fatalf("Expected exactly the resource \"foos\" in group version %v/%v via discovery, got: %v", group, version, resources.APIResources)
|
||||
}
|
||||
r := resources.APIResources[0]
|
||||
if r.Name != "foos" {
|
||||
t.Fatalf("Expected exactly the resource \"foos\" in group version %v/%v via discovery, got: %v", group, version, r)
|
||||
}
|
||||
sort.Strings(r.Verbs)
|
||||
expectedVerbs := []string{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"}
|
||||
if !reflect.DeepEqual([]string(r.Verbs), expectedVerbs) {
|
||||
t.Fatalf("Unexpected verbs for resource \"foos\" in group version %v/%v via discovery: expected=%v got=%v", group, version, expectedVerbs, r.Verbs)
|
||||
}
|
||||
|
||||
// delete
|
||||
once.Do(deleteFoo)
|
||||
|
||||
// check whether resource is also gone from discovery
|
||||
resources, err = client.Discovery().ServerResourcesForGroupVersion(group + "/" + version)
|
||||
if err == nil {
|
||||
for _, r := range resources.APIResources {
|
||||
if r.Name == "foos" {
|
||||
t.Fatalf("unexpected resource \"foos\" in group version %v/%v after deletion", group, version)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO these tests will eventually be runnable in a single test
|
||||
func TestThirdPartyDelete(t *testing.T) {
|
||||
_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
|
||||
defer s.Close()
|
||||
|
||||
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
|
||||
client := clientset.NewForConfigOrDie(clientConfig)
|
||||
|
||||
DoTestInstallThirdPartyAPIDelete(t, client, clientConfig)
|
||||
}
|
||||
|
||||
func TestThirdPartyMultiple(t *testing.T) {
|
||||
_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
|
||||
defer s.Close()
|
||||
|
||||
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
|
||||
client := clientset.NewForConfigOrDie(clientConfig)
|
||||
|
||||
DoTestInstallMultipleAPIs(t, client, clientConfig)
|
||||
}
|
||||
|
||||
// TODO make multiple versions work. they've been broken
|
||||
var versionsToTest = []string{"v1"}
|
||||
|
||||
type Foo struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
v1.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"`
|
||||
|
||||
SomeField string `json:"someField"`
|
||||
OtherField int `json:"otherField"`
|
||||
}
|
||||
|
||||
type FooList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" description:"standard list metadata; see http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata"`
|
||||
|
||||
Items []Foo `json:"items"`
|
||||
}
|
||||
|
||||
// installThirdParty installs a third party resource and returns a defer func
|
||||
func installThirdParty(t *testing.T, client clientset.Interface, clientConfig *restclient.Config, tpr *extensions.ThirdPartyResource, group, version, resource string) func() {
|
||||
var err error
|
||||
_, err = client.Extensions().ThirdPartyResources().Create(tpr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fooClientConfig := *clientConfig
|
||||
fooClientConfig.APIPath = "apis"
|
||||
fooClientConfig.GroupVersion = &schema.GroupVersion{Group: group, Version: version}
|
||||
fooClient, err := restclient.RESTClientFor(&fooClientConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = wait.Poll(100*time.Millisecond, 60*time.Second, func() (bool, error) {
|
||||
_, err := fooClient.Get().Namespace("default").Resource(resource).DoRaw()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return func() {
|
||||
client.Extensions().ThirdPartyResources().Delete(tpr.Name, nil)
|
||||
err = wait.Poll(100*time.Millisecond, 60*time.Second, func() (bool, error) {
|
||||
_, err := fooClient.Get().Namespace("default").Resource(resource).DoRaw()
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func DoTestInstallMultipleAPIs(t *testing.T, client clientset.Interface, clientConfig *restclient.Config) {
|
||||
group := "company.com"
|
||||
version := "v1"
|
||||
|
||||
deleteFoo := installThirdParty(t, client, clientConfig,
|
||||
&extensions.ThirdPartyResource{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo.company.com"},
|
||||
Versions: []extensions.APIVersion{{Name: version}},
|
||||
}, group, version, "foos",
|
||||
)
|
||||
defer deleteFoo()
|
||||
|
||||
// TODO make multiple resources in one version work
|
||||
// deleteBar = installThirdParty(t, client, clientConfig,
|
||||
// &extensions.ThirdPartyResource{
|
||||
// ObjectMeta: v1.ObjectMeta{Name: "bar.company.com"},
|
||||
// Versions: []extensions.APIVersion{{Name: version}},
|
||||
// }, group, version, "bars",
|
||||
// )
|
||||
// defer deleteBar()
|
||||
}
|
||||
|
||||
func DoTestInstallThirdPartyAPIDelete(t *testing.T, client clientset.Interface, clientConfig *restclient.Config) {
|
||||
for _, version := range versionsToTest {
|
||||
testInstallThirdPartyAPIDeleteVersion(t, client, clientConfig, version)
|
||||
}
|
||||
}
|
||||
|
||||
func testInstallThirdPartyAPIDeleteVersion(t *testing.T, client clientset.Interface, clientConfig *restclient.Config, version string) {
|
||||
group := "company.com"
|
||||
|
||||
deleteFoo := installThirdParty(t, client, clientConfig,
|
||||
&extensions.ThirdPartyResource{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo.company.com"},
|
||||
Versions: []extensions.APIVersion{{Name: version}},
|
||||
}, group, version, "foos",
|
||||
)
|
||||
defer deleteFoo()
|
||||
|
||||
fooClientConfig := *clientConfig
|
||||
fooClientConfig.APIPath = "apis"
|
||||
fooClientConfig.GroupVersion = &schema.GroupVersion{Group: group, Version: version}
|
||||
fooClient, err := restclient.RESTClientFor(&fooClientConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedObj := Foo{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Foo",
|
||||
},
|
||||
SomeField: "test field",
|
||||
OtherField: 10,
|
||||
}
|
||||
objBytes, err := json.Marshal(&expectedObj)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := fooClient.Post().Namespace("default").Resource("foos").Body(objBytes).DoRaw(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
apiBytes, err := fooClient.Get().Namespace("default").Resource("foos").Name("test").DoRaw()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
item := Foo{}
|
||||
err = json.Unmarshal(apiBytes, &item)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Fill in fields set by the apiserver
|
||||
item.SelfLink = expectedObj.SelfLink
|
||||
item.ResourceVersion = expectedObj.ResourceVersion
|
||||
item.Namespace = expectedObj.Namespace
|
||||
item.UID = expectedObj.UID
|
||||
item.CreationTimestamp = expectedObj.CreationTimestamp
|
||||
if !reflect.DeepEqual(item, expectedObj) {
|
||||
t.Fatalf("expected:\n%v\n", diff.ObjectGoPrintSideBySide(expectedObj, item))
|
||||
}
|
||||
|
||||
listBytes, err := fooClient.Get().Namespace("default").Resource("foos").DoRaw()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
list := FooList{}
|
||||
err = json.Unmarshal(listBytes, &list)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(list.Items) != 1 {
|
||||
t.Fatalf("wrong item: %v", list)
|
||||
}
|
||||
|
||||
if _, err := fooClient.Delete().Namespace("default").Resource("foos").Name("test").DoRaw(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := fooClient.Get().Namespace("default").Resource("foos").Name("test").DoRaw(); !apierrors.IsNotFound(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
63
vendor/k8s.io/kubernetes/test/integration/utils.go
generated
vendored
Normal file
63
vendor/k8s.io/kubernetes/test/integration/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
)
|
||||
|
||||
func DeletePodOrErrorf(t *testing.T, c clientset.Interface, ns, name string) {
|
||||
if err := c.Core().Pods(ns).Delete(name, nil); err != nil {
|
||||
t.Errorf("unable to delete pod %v: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Requests to try. Each one should be forbidden or not forbidden
|
||||
// depending on the authentication and authorization setup of the master.
|
||||
var Code200 = map[int]bool{200: true}
|
||||
var Code201 = map[int]bool{201: true}
|
||||
var Code400 = map[int]bool{400: true}
|
||||
var Code403 = map[int]bool{403: true}
|
||||
var Code404 = map[int]bool{404: true}
|
||||
var Code405 = map[int]bool{405: true}
|
||||
var Code409 = map[int]bool{409: true}
|
||||
var Code422 = map[int]bool{422: true}
|
||||
var Code500 = map[int]bool{500: true}
|
||||
var Code503 = map[int]bool{503: true}
|
||||
|
||||
// WaitForPodToDisappear polls the API server if the pod has been deleted.
|
||||
func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, interval, timeout time.Duration) error {
|
||||
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
_, err := podClient.Get(podName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return false, nil
|
||||
} else {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
203
vendor/k8s.io/kubernetes/test/integration/volume/attach_detach_test.go
generated
vendored
Normal file
203
vendor/k8s.io/kubernetes/test/integration/volume/attach_detach_test.go
generated
vendored
Normal file
|
@ -0,0 +1,203 @@
|
|||
// +build integration,!no-etcd
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package volume
|
||||
|
||||
import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func fakePodWithVol(namespace string) *v1.Pod {
|
||||
fakePod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: "fakepod",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-container",
|
||||
Image: "nginx",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "fake-mount",
|
||||
MountPath: "/var/www/html",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "fake-mount",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: "/var/www/html",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeName: "node-sandbox",
|
||||
},
|
||||
}
|
||||
return fakePod
|
||||
}
|
||||
|
||||
// Via integration test we can verify that if pod delete
|
||||
// event is somehow missed by AttachDetach controller - it still
|
||||
// gets cleaned up by Desired State of World populator.
|
||||
func TestPodDeletionWithDswp(t *testing.T) {
|
||||
_, server := framework.RunAMaster(nil)
|
||||
defer server.Close()
|
||||
namespaceName := "test-pod-deletion"
|
||||
|
||||
node := &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "node-sandbox",
|
||||
Annotations: map[string]string{
|
||||
volumehelper.ControllerManagedAttachAnnotation: "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ns := framework.CreateTestingNamespace(namespaceName, server, t)
|
||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||
|
||||
testClient, ctrl, podInformer, nodeInformer := createAdClients(ns, t, server, defaultSyncPeriod)
|
||||
|
||||
pod := fakePodWithVol(namespaceName)
|
||||
podStopCh := make(chan struct{})
|
||||
|
||||
if _, err := testClient.Core().Nodes().Create(node); err != nil {
|
||||
t.Fatalf("Failed to created node : %v", err)
|
||||
}
|
||||
|
||||
go nodeInformer.Run(podStopCh)
|
||||
|
||||
if _, err := testClient.Core().Pods(ns.Name).Create(pod); err != nil {
|
||||
t.Errorf("Failed to create pod : %v", err)
|
||||
}
|
||||
|
||||
go podInformer.Run(podStopCh)
|
||||
|
||||
// start controller loop
|
||||
stopCh := make(chan struct{})
|
||||
go ctrl.Run(stopCh)
|
||||
|
||||
waitToObservePods(t, podInformer, 1)
|
||||
podKey, err := cache.MetaNamespaceKeyFunc(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("MetaNamespaceKeyFunc failed with : %v", err)
|
||||
}
|
||||
|
||||
podInformerObj, _, err := podInformer.GetStore().GetByKey(podKey)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Pod not found in Pod Informer cache : %v", err)
|
||||
}
|
||||
|
||||
podsToAdd := ctrl.GetDesiredStateOfWorld().GetPodToAdd()
|
||||
|
||||
if len(podsToAdd) == 0 {
|
||||
t.Fatalf("Pod not added to desired state of world")
|
||||
}
|
||||
|
||||
// let's stop pod events from getting triggered
|
||||
close(podStopCh)
|
||||
err = podInformer.GetStore().Delete(podInformerObj)
|
||||
if err != nil {
|
||||
t.Fatalf("Error deleting pod : %v", err)
|
||||
}
|
||||
|
||||
waitToObservePods(t, podInformer, 0)
|
||||
// the populator loop turns every 1 minute
|
||||
time.Sleep(80 * time.Second)
|
||||
podsToAdd = ctrl.GetDesiredStateOfWorld().GetPodToAdd()
|
||||
if len(podsToAdd) != 0 {
|
||||
t.Fatalf("All pods should have been removed")
|
||||
}
|
||||
|
||||
close(stopCh)
|
||||
}
|
||||
|
||||
// wait for the podInformer to observe the pods. Call this function before
|
||||
// running the RC manager to prevent the rc manager from creating new pods
|
||||
// rather than adopting the existing ones.
|
||||
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
|
||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
if len(objects) == podNum {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, attachdetach.AttachDetachController, cache.SharedIndexInformer, cache.SharedIndexInformer) {
|
||||
config := restclient.Config{
|
||||
Host: server.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion},
|
||||
QPS: 1000000,
|
||||
Burst: 1000000,
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
testClient := clientset.NewForConfigOrDie(&config)
|
||||
|
||||
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
|
||||
plugin := &volumetest.FakeVolumePlugin{
|
||||
PluginName: provisionerPluginName,
|
||||
Host: host,
|
||||
Config: volume.VolumeConfig{},
|
||||
LastProvisionerOptions: volume.VolumeOptions{},
|
||||
NewAttacherCallCount: 0,
|
||||
NewDetacherCallCount: 0,
|
||||
Mounters: nil,
|
||||
Unmounters: nil,
|
||||
Attachers: nil,
|
||||
Detachers: nil,
|
||||
}
|
||||
plugins := []volume.VolumePlugin{plugin}
|
||||
cloud := &fakecloud.FakeCloud{}
|
||||
podInformer := informers.NewPodInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod)
|
||||
nodeInformer := informers.NewNodeInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "node-informer")), resyncPeriod)
|
||||
pvcInformer := informers.NewNodeInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pvc-informer")), resyncPeriod)
|
||||
pvInformer := informers.NewNodeInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pv-informer")), resyncPeriod)
|
||||
ctrl, err := attachdetach.NewAttachDetachController(testClient, podInformer, nodeInformer, pvcInformer, pvInformer, cloud, plugins, false, time.Second*5)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating AttachDetach : %v", err)
|
||||
}
|
||||
return testClient, ctrl, podInformer, nodeInformer
|
||||
}
|
1166
vendor/k8s.io/kubernetes/test/integration/volume/persistent_volumes_test.go
generated
vendored
Normal file
1166
vendor/k8s.io/kubernetes/test/integration/volume/persistent_volumes_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue