52baf68d50
Signed-off-by: Michał Żyłowski <michal.zylowski@intel.com>
222 lines
7.4 KiB
Go
222 lines
7.4 KiB
Go
// +build integration,!no-etcd
|
|
|
|
/*
|
|
Copyright 2015 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package quota
|
|
|
|
import (
|
|
"fmt"
|
|
"net/http"
|
|
"net/http/httptest"
|
|
"testing"
|
|
"time"
|
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/fields"
|
|
"k8s.io/apimachinery/pkg/labels"
|
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
"k8s.io/apimachinery/pkg/watch"
|
|
restclient "k8s.io/client-go/rest"
|
|
"k8s.io/kubernetes/pkg/api"
|
|
"k8s.io/kubernetes/pkg/api/resource"
|
|
"k8s.io/kubernetes/pkg/api/v1"
|
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
|
"k8s.io/kubernetes/pkg/client/record"
|
|
"k8s.io/kubernetes/pkg/controller"
|
|
"k8s.io/kubernetes/pkg/controller/informers"
|
|
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
|
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
|
|
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
|
quotainstall "k8s.io/kubernetes/pkg/quota/install"
|
|
"k8s.io/kubernetes/plugin/pkg/admission/resourcequota"
|
|
"k8s.io/kubernetes/test/integration/framework"
|
|
)
|
|
|
|
// 1.2 code gets:
|
|
// quota_test.go:95: Took 4.218619579s to scale up without quota
|
|
// quota_test.go:199: unexpected error: timed out waiting for the condition, ended with 342 pods (1 minute)
|
|
// 1.3+ code gets:
|
|
// quota_test.go:100: Took 4.196205966s to scale up without quota
|
|
// quota_test.go:115: Took 12.021640372s to scale up with quota
|
|
func TestQuota(t *testing.T) {
|
|
// Set up a master
|
|
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
|
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
|
<-h.Initialized
|
|
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
|
}))
|
|
defer s.Close()
|
|
|
|
admissionCh := make(chan struct{})
|
|
clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
|
internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
|
admission, err := resourcequota.NewResourceQuota(quotainstall.NewRegistry(nil, nil), 5, admissionCh)
|
|
if err != nil {
|
|
t.Fatalf("unexpected error: %v", err)
|
|
}
|
|
admission.(kubeadmission.WantsInternalClientSet).SetInternalClientSet(internalClientset)
|
|
defer close(admissionCh)
|
|
|
|
masterConfig := framework.NewIntegrationTestMasterConfig()
|
|
masterConfig.GenericConfig.AdmissionControl = admission
|
|
framework.RunAMasterUsingServer(masterConfig, s, h)
|
|
|
|
ns := framework.CreateTestingNamespace("quotaed", s, t)
|
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
|
ns2 := framework.CreateTestingNamespace("non-quotaed", s, t)
|
|
defer framework.DeleteTestingNamespace(ns2, s, t)
|
|
|
|
controllerCh := make(chan struct{})
|
|
defer close(controllerCh)
|
|
|
|
informers := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc())
|
|
podInformer := informers.Pods().Informer()
|
|
rcInformer := informers.ReplicationControllers().Informer()
|
|
rm := replicationcontroller.NewReplicationManager(podInformer, rcInformer, clientset, replicationcontroller.BurstReplicas, 4096, false)
|
|
rm.SetEventRecorder(&record.FakeRecorder{})
|
|
informers.Start(controllerCh)
|
|
go rm.Run(3, controllerCh)
|
|
|
|
resourceQuotaRegistry := quotainstall.NewRegistry(clientset, nil)
|
|
groupKindsToReplenish := []schema.GroupKind{
|
|
api.Kind("Pod"),
|
|
}
|
|
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
|
|
KubeClient: clientset,
|
|
ResyncPeriod: controller.NoResyncPeriodFunc,
|
|
Registry: resourceQuotaRegistry,
|
|
GroupKindsToReplenish: groupKindsToReplenish,
|
|
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
|
|
ControllerFactory: resourcequotacontroller.NewReplenishmentControllerFactoryFromClient(clientset),
|
|
}
|
|
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(2, controllerCh)
|
|
|
|
startTime := time.Now()
|
|
scale(t, ns2.Name, clientset)
|
|
endTime := time.Now()
|
|
t.Logf("Took %v to scale up without quota", endTime.Sub(startTime))
|
|
|
|
quota := &v1.ResourceQuota{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "quota",
|
|
Namespace: ns.Name,
|
|
},
|
|
Spec: v1.ResourceQuotaSpec{
|
|
Hard: v1.ResourceList{
|
|
v1.ResourcePods: resource.MustParse("1000"),
|
|
},
|
|
},
|
|
}
|
|
waitForQuota(t, quota, clientset)
|
|
|
|
startTime = time.Now()
|
|
scale(t, "quotaed", clientset)
|
|
endTime = time.Now()
|
|
t.Logf("Took %v to scale up with quota", endTime.Sub(startTime))
|
|
}
|
|
|
|
func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) {
|
|
w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: quota.Name}))
|
|
if err != nil {
|
|
t.Fatalf("unexpected error: %v", err)
|
|
}
|
|
|
|
if _, err := clientset.Core().ResourceQuotas(quota.Namespace).Create(quota); err != nil {
|
|
t.Fatalf("unexpected error: %v", err)
|
|
}
|
|
|
|
_, err = watch.Until(1*time.Minute, w, func(event watch.Event) (bool, error) {
|
|
switch event.Type {
|
|
case watch.Modified:
|
|
default:
|
|
return false, nil
|
|
}
|
|
|
|
switch cast := event.Object.(type) {
|
|
case *v1.ResourceQuota:
|
|
if len(cast.Status.Hard) > 0 {
|
|
return true, nil
|
|
}
|
|
}
|
|
|
|
return false, nil
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("unexpected error: %v", err)
|
|
}
|
|
}
|
|
|
|
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
|
|
target := int32(100)
|
|
rc := &v1.ReplicationController{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "foo",
|
|
Namespace: namespace,
|
|
},
|
|
Spec: v1.ReplicationControllerSpec{
|
|
Replicas: &target,
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Template: &v1.PodTemplateSpec{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Labels: map[string]string{
|
|
"foo": "bar",
|
|
},
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "container",
|
|
Image: "busybox",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
w, err := clientset.Core().ReplicationControllers(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: rc.Name}))
|
|
if err != nil {
|
|
t.Fatalf("unexpected error: %v", err)
|
|
}
|
|
|
|
if _, err := clientset.Core().ReplicationControllers(namespace).Create(rc); err != nil {
|
|
t.Fatalf("unexpected error: %v", err)
|
|
}
|
|
|
|
_, err = watch.Until(3*time.Minute, w, func(event watch.Event) (bool, error) {
|
|
switch event.Type {
|
|
case watch.Modified:
|
|
default:
|
|
return false, nil
|
|
}
|
|
|
|
switch cast := event.Object.(type) {
|
|
case *v1.ReplicationController:
|
|
fmt.Printf("Found %v of %v replicas\n", int(cast.Status.Replicas), target)
|
|
if cast.Status.Replicas == target {
|
|
return true, nil
|
|
}
|
|
}
|
|
|
|
return false, nil
|
|
})
|
|
if err != nil {
|
|
pods, _ := clientset.Core().Pods(namespace).List(metav1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()})
|
|
t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items))
|
|
}
|
|
}
|