Switch to github.com/golang/dep for vendoring

Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
Mrunal Patel 2017-01-31 16:45:59 -08:00
parent d6ab91be27
commit 8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions

View file

@ -0,0 +1,75 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"replenishment_controller.go",
"resource_quota_controller.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/quota:go_default_library",
"//pkg/quota/evaluator/core:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:k8s.io/apimachinery/pkg/api/meta",
"//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
"//vendor:k8s.io/apimachinery/pkg/util/wait",
"//vendor:k8s.io/apimachinery/pkg/watch",
],
)
go_test(
name = "go_default_test",
srcs = [
"replenishment_controller_test.go",
"resource_quota_controller_test.go",
],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/quota/generic:go_default_library",
"//pkg/quota/install:go_default_library",
"//pkg/util/intstr:go_default_library",
"//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -0,0 +1,3 @@
reviewers:
- derekwaynecarr
- deads2k

View file

@ -0,0 +1,18 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// resourcequota contains a controller that makes resource quota usage observations
package resourcequota // import "k8s.io/kubernetes/pkg/controller/resourcequota"

View file

@ -0,0 +1,289 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcequota
import (
"fmt"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/quota/evaluator/core"
"k8s.io/kubernetes/pkg/util/metrics"
)
// ReplenishmentFunc is a function that is invoked when controller sees a change
// that may require a quota to be replenished (i.e. object deletion, or object moved to terminal state)
type ReplenishmentFunc func(groupKind schema.GroupKind, namespace string, object runtime.Object)
// ReplenishmentControllerOptions is an options struct that tells a factory
// how to configure a controller that can inform the quota system it should
// replenish quota
type ReplenishmentControllerOptions struct {
// The kind monitored for replenishment
GroupKind schema.GroupKind
// The period that should be used to re-sync the monitored resource
ResyncPeriod controller.ResyncPeriodFunc
// The function to invoke when a change is observed that should trigger
// replenishment
ReplenishmentFunc ReplenishmentFunc
}
// PodReplenishmentUpdateFunc will replenish if the old pod was quota tracked but the new is not
func PodReplenishmentUpdateFunc(options *ReplenishmentControllerOptions) func(oldObj, newObj interface{}) {
return func(oldObj, newObj interface{}) {
oldPod := oldObj.(*v1.Pod)
newPod := newObj.(*v1.Pod)
if core.QuotaV1Pod(oldPod) && !core.QuotaV1Pod(newPod) {
options.ReplenishmentFunc(options.GroupKind, newPod.Namespace, oldPod)
}
}
}
// ObjectReplenenishmentDeleteFunc will replenish on every delete
func ObjectReplenishmentDeleteFunc(options *ReplenishmentControllerOptions) func(obj interface{}) {
return func(obj interface{}) {
metaObject, err := meta.Accessor(obj)
if err != nil {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("replenishment controller could not get object from tombstone %+v, could take up to %v before quota is replenished", obj, options.ResyncPeriod())
utilruntime.HandleError(err)
return
}
metaObject, err = meta.Accessor(tombstone.Obj)
if err != nil {
glog.Errorf("replenishment controller tombstone contained object that is not a meta %+v, could take up to %v before quota is replenished", tombstone.Obj, options.ResyncPeriod())
utilruntime.HandleError(err)
return
}
}
options.ReplenishmentFunc(options.GroupKind, metaObject.GetNamespace(), nil)
}
}
// ReplenishmentControllerFactory knows how to build replenishment controllers
type ReplenishmentControllerFactory interface {
// NewController returns a controller configured with the specified options.
// This method is NOT thread-safe.
NewController(options *ReplenishmentControllerOptions) (cache.Controller, error)
}
// replenishmentControllerFactory implements ReplenishmentControllerFactory
type replenishmentControllerFactory struct {
kubeClient clientset.Interface
sharedInformerFactory informers.SharedInformerFactory
}
// NewReplenishmentControllerFactory returns a factory that knows how to build controllers
// to replenish resources when updated or deleted
func NewReplenishmentControllerFactory(f informers.SharedInformerFactory, kubeClient clientset.Interface) ReplenishmentControllerFactory {
return &replenishmentControllerFactory{
kubeClient: kubeClient,
sharedInformerFactory: f,
}
}
// NewReplenishmentControllerFactoryFromClient returns a factory that knows how to build controllers to replenish resources
// when updated or deleted using the specified client.
func NewReplenishmentControllerFactoryFromClient(kubeClient clientset.Interface) ReplenishmentControllerFactory {
return NewReplenishmentControllerFactory(nil, kubeClient)
}
// controllerFor returns a replenishment controller for the specified group resource.
func controllerFor(
groupResource schema.GroupResource,
f informers.SharedInformerFactory,
handlerFuncs cache.ResourceEventHandlerFuncs,
) (cache.Controller, error) {
genericInformer, err := f.ForResource(groupResource)
if err != nil {
return nil, err
}
informer := genericInformer.Informer()
informer.AddEventHandler(handlerFuncs)
return informer.GetController(), nil
}
func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (result cache.Controller, err error) {
if r.kubeClient != nil && r.kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replenishment_controller", r.kubeClient.Core().RESTClient().GetRateLimiter())
}
switch options.GroupKind {
case api.Kind("Pod"):
if r.sharedInformerFactory != nil {
result, err = controllerFor(api.Resource("pods"), r.sharedInformerFactory, cache.ResourceEventHandlerFuncs{
UpdateFunc: PodReplenishmentUpdateFunc(options),
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
})
break
}
result = informers.NewPodInformer(r.kubeClient, options.ResyncPeriod())
case api.Kind("Service"):
// TODO move to informer when defined
_, result = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().Services(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return r.kubeClient.Core().Services(v1.NamespaceAll).Watch(options)
},
},
&v1.Service{},
options.ResyncPeriod(),
cache.ResourceEventHandlerFuncs{
UpdateFunc: ServiceReplenishmentUpdateFunc(options),
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
},
)
case api.Kind("ReplicationController"):
// TODO move to informer when defined
_, result = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return r.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).Watch(options)
},
},
&v1.ReplicationController{},
options.ResyncPeriod(),
cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
},
)
case api.Kind("PersistentVolumeClaim"):
if r.sharedInformerFactory != nil {
result, err = controllerFor(api.Resource("persistentvolumeclaims"), r.sharedInformerFactory, cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
})
break
}
// TODO (derekwaynecarr) remove me when we can require a sharedInformerFactory in all code paths...
_, result = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return r.kubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).Watch(options)
},
},
&v1.PersistentVolumeClaim{},
options.ResyncPeriod(),
cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
},
)
case api.Kind("Secret"):
// TODO move to informer when defined
_, result = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().Secrets(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return r.kubeClient.Core().Secrets(v1.NamespaceAll).Watch(options)
},
},
&v1.Secret{},
options.ResyncPeriod(),
cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
},
)
case api.Kind("ConfigMap"):
// TODO move to informer when defined
_, result = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().ConfigMaps(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return r.kubeClient.Core().ConfigMaps(v1.NamespaceAll).Watch(options)
},
},
&v1.ConfigMap{},
options.ResyncPeriod(),
cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
},
)
default:
return nil, NewUnhandledGroupKindError(options.GroupKind)
}
return result, err
}
// ServiceReplenishmentUpdateFunc will replenish if the service was quota tracked has changed service type
func ServiceReplenishmentUpdateFunc(options *ReplenishmentControllerOptions) func(oldObj, newObj interface{}) {
return func(oldObj, newObj interface{}) {
oldService := oldObj.(*v1.Service)
newService := newObj.(*v1.Service)
if core.GetQuotaServiceType(oldService) != core.GetQuotaServiceType(newService) {
options.ReplenishmentFunc(options.GroupKind, newService.Namespace, nil)
}
}
}
type unhandledKindErr struct {
kind schema.GroupKind
}
func (e unhandledKindErr) Error() string {
return fmt.Sprintf("no replenishment controller available for %s", e.kind)
}
func NewUnhandledGroupKindError(kind schema.GroupKind) error {
return unhandledKindErr{kind: kind}
}
func IsUnhandledGroupKindError(err error) bool {
if err == nil {
return false
}
_, ok := err.(unhandledKindErr)
return ok
}
// UnionReplenishmentControllerFactory iterates through its constituent factories ignoring, UnhandledGroupKindErrors
// returning the first success or failure it hits. If there are no hits either way, it return an UnhandledGroupKind error
type UnionReplenishmentControllerFactory []ReplenishmentControllerFactory
func (f UnionReplenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (cache.Controller, error) {
for _, factory := range f {
controller, err := factory.NewController(options)
if !IsUnhandledGroupKindError(err) {
return controller, err
}
}
return nil, NewUnhandledGroupKindError(options.GroupKind)
}

View file

@ -0,0 +1,156 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcequota
import (
"testing"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/intstr"
)
// testReplenishment lets us test replenishment functions are invoked
type testReplenishment struct {
groupKind schema.GroupKind
namespace string
}
// mock function that holds onto the last kind that was replenished
func (t *testReplenishment) Replenish(groupKind schema.GroupKind, namespace string, object runtime.Object) {
t.groupKind = groupKind
t.namespace = namespace
}
func TestPodReplenishmentUpdateFunc(t *testing.T) {
mockReplenish := &testReplenishment{}
options := ReplenishmentControllerOptions{
GroupKind: api.Kind("Pod"),
ReplenishmentFunc: mockReplenish.Replenish,
ResyncPeriod: controller.NoResyncPeriodFunc,
}
oldPod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "pod"},
Status: v1.PodStatus{Phase: v1.PodRunning},
}
newPod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "pod"},
Status: v1.PodStatus{Phase: v1.PodFailed},
}
updateFunc := PodReplenishmentUpdateFunc(&options)
updateFunc(oldPod, newPod)
if mockReplenish.groupKind != api.Kind("Pod") {
t.Errorf("Unexpected group kind %v", mockReplenish.groupKind)
}
if mockReplenish.namespace != oldPod.Namespace {
t.Errorf("Unexpected namespace %v", mockReplenish.namespace)
}
}
func TestObjectReplenishmentDeleteFunc(t *testing.T) {
mockReplenish := &testReplenishment{}
options := ReplenishmentControllerOptions{
GroupKind: api.Kind("Pod"),
ReplenishmentFunc: mockReplenish.Replenish,
ResyncPeriod: controller.NoResyncPeriodFunc,
}
oldPod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "pod"},
Status: v1.PodStatus{Phase: v1.PodRunning},
}
deleteFunc := ObjectReplenishmentDeleteFunc(&options)
deleteFunc(oldPod)
if mockReplenish.groupKind != api.Kind("Pod") {
t.Errorf("Unexpected group kind %v", mockReplenish.groupKind)
}
if mockReplenish.namespace != oldPod.Namespace {
t.Errorf("Unexpected namespace %v", mockReplenish.namespace)
}
}
func TestServiceReplenishmentUpdateFunc(t *testing.T) {
mockReplenish := &testReplenishment{}
options := ReplenishmentControllerOptions{
GroupKind: api.Kind("Service"),
ReplenishmentFunc: mockReplenish.Replenish,
ResyncPeriod: controller.NoResyncPeriodFunc,
}
oldService := &v1.Service{
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
},
}
newService := &v1.Service{
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}}},
}
updateFunc := ServiceReplenishmentUpdateFunc(&options)
updateFunc(oldService, newService)
if mockReplenish.groupKind != api.Kind("Service") {
t.Errorf("Unexpected group kind %v", mockReplenish.groupKind)
}
if mockReplenish.namespace != oldService.Namespace {
t.Errorf("Unexpected namespace %v", mockReplenish.namespace)
}
mockReplenish = &testReplenishment{}
options = ReplenishmentControllerOptions{
GroupKind: api.Kind("Service"),
ReplenishmentFunc: mockReplenish.Replenish,
ResyncPeriod: controller.NoResyncPeriodFunc,
}
oldService = &v1.Service{
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
},
}
newService = &v1.Service{
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{{
Port: 81,
TargetPort: intstr.FromInt(81),
}}},
}
updateFunc = ServiceReplenishmentUpdateFunc(&options)
updateFunc(oldService, newService)
if mockReplenish.groupKind == api.Kind("Service") {
t.Errorf("Unexpected group kind %v", mockReplenish.groupKind)
}
if mockReplenish.namespace == oldService.Namespace {
t.Errorf("Unexpected namespace %v", mockReplenish.namespace)
}
}

View file

@ -0,0 +1,363 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcequota
import (
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/quota"
"k8s.io/kubernetes/pkg/util/metrics"
"k8s.io/kubernetes/pkg/util/workqueue"
)
// ResourceQuotaControllerOptions holds options for creating a quota controller
type ResourceQuotaControllerOptions struct {
// Must have authority to list all quotas, and update quota status
KubeClient clientset.Interface
// Controls full recalculation of quota usage
ResyncPeriod controller.ResyncPeriodFunc
// Knows how to calculate usage
Registry quota.Registry
// Knows how to build controllers that notify replenishment events
ControllerFactory ReplenishmentControllerFactory
// Controls full resync of objects monitored for replenishment.
ReplenishmentResyncPeriod controller.ResyncPeriodFunc
// List of GroupKind objects that should be monitored for replenishment at
// a faster frequency than the quota controller recalculation interval
GroupKindsToReplenish []schema.GroupKind
}
// ResourceQuotaController is responsible for tracking quota usage status in the system
type ResourceQuotaController struct {
// Must have authority to list all resources in the system, and update quota status
kubeClient clientset.Interface
// An index of resource quota objects by namespace
rqIndexer cache.Indexer
// Watches changes to all resource quota
rqController cache.Controller
// ResourceQuota objects that need to be synchronized
queue workqueue.RateLimitingInterface
// missingUsageQueue holds objects that are missing the initial usage information
missingUsageQueue workqueue.RateLimitingInterface
// To allow injection of syncUsage for testing.
syncHandler func(key string) error
// function that controls full recalculation of quota usage
resyncPeriod controller.ResyncPeriodFunc
// knows how to calculate usage
registry quota.Registry
// controllers monitoring to notify for replenishment
replenishmentControllers []cache.Controller
}
func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *ResourceQuotaController {
// build the resource quota controller
rq := &ResourceQuotaController{
kubeClient: options.KubeClient,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_primary"),
missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_priority"),
resyncPeriod: options.ResyncPeriod,
registry: options.Registry,
replenishmentControllers: []cache.Controller{},
}
if options.KubeClient != nil && options.KubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", options.KubeClient.Core().RESTClient().GetRateLimiter())
}
// set the synchronization handler
rq.syncHandler = rq.syncResourceQuotaFromKey
// build the controller that observes quota
rq.rqIndexer, rq.rqController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return rq.kubeClient.Core().ResourceQuotas(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return rq.kubeClient.Core().ResourceQuotas(v1.NamespaceAll).Watch(options)
},
},
&v1.ResourceQuota{},
rq.resyncPeriod(),
cache.ResourceEventHandlerFuncs{
AddFunc: rq.addQuota,
UpdateFunc: func(old, cur interface{}) {
// We are only interested in observing updates to quota.spec to drive updates to quota.status.
// We ignore all updates to quota.Status because they are all driven by this controller.
// IMPORTANT:
// We do not use this function to queue up a full quota recalculation. To do so, would require
// us to enqueue all quota.Status updates, and since quota.Status updates involve additional queries
// that cannot be backed by a cache and result in a full query of a namespace's content, we do not
// want to pay the price on spurious status updates. As a result, we have a separate routine that is
// responsible for enqueue of all resource quotas when doing a full resync (enqueueAll)
oldResourceQuota := old.(*v1.ResourceQuota)
curResourceQuota := cur.(*v1.ResourceQuota)
if quota.V1Equals(oldResourceQuota.Spec.Hard, curResourceQuota.Spec.Hard) {
return
}
rq.addQuota(curResourceQuota)
},
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
// way of achieving this is by performing a `stop` operation on the controller.
DeleteFunc: rq.enqueueResourceQuota,
},
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
)
for _, groupKindToReplenish := range options.GroupKindsToReplenish {
controllerOptions := &ReplenishmentControllerOptions{
GroupKind: groupKindToReplenish,
ResyncPeriod: options.ReplenishmentResyncPeriod,
ReplenishmentFunc: rq.replenishQuota,
}
replenishmentController, err := options.ControllerFactory.NewController(controllerOptions)
if err != nil {
glog.Warningf("quota controller unable to replenish %s due to %v, changes only accounted during full resync", groupKindToReplenish, err)
} else {
rq.replenishmentControllers = append(rq.replenishmentControllers, replenishmentController)
}
}
return rq
}
// enqueueAll is called at the fullResyncPeriod interval to force a full recalculation of quota usage statistics
func (rq *ResourceQuotaController) enqueueAll() {
defer glog.V(4).Infof("Resource quota controller queued all resource quota for full calculation of usage")
for _, k := range rq.rqIndexer.ListKeys() {
rq.queue.Add(k)
}
}
// obj could be an *v1.ResourceQuota, or a DeletionFinalStateUnknown marker item.
func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
return
}
rq.queue.Add(key)
}
func (rq *ResourceQuotaController) addQuota(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
return
}
resourceQuota := obj.(*v1.ResourceQuota)
// if we declared an intent that is not yet captured in status (prioritize it)
if !api.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard) {
rq.missingUsageQueue.Add(key)
return
}
// if we declared a constraint that has no usage (which this controller can calculate, prioritize it)
for constraint := range resourceQuota.Status.Hard {
if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound {
matchedResources := []api.ResourceName{api.ResourceName(constraint)}
for _, evaluator := range rq.registry.Evaluators() {
if intersection := evaluator.MatchingResources(matchedResources); len(intersection) > 0 {
rq.missingUsageQueue.Add(key)
return
}
}
}
}
// no special priority, go in normal recalc queue
rq.queue.Add(key)
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
func (rq *ResourceQuotaController) worker(queue workqueue.RateLimitingInterface) func() {
workFunc := func() bool {
key, quit := queue.Get()
if quit {
return true
}
defer queue.Done(key)
err := rq.syncHandler(key.(string))
if err == nil {
queue.Forget(key)
return false
}
utilruntime.HandleError(err)
queue.AddRateLimited(key)
return false
}
return func() {
for {
if quit := workFunc(); quit {
glog.Infof("resource quota controller worker shutting down")
return
}
}
}
}
// Run begins quota controller using the specified number of workers
func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
go rq.rqController.Run(stopCh)
// the controllers that replenish other resources to respond rapidly to state changes
for _, replenishmentController := range rq.replenishmentControllers {
go replenishmentController.Run(stopCh)
}
// the workers that chug through the quota calculation backlog
for i := 0; i < workers; i++ {
go wait.Until(rq.worker(rq.queue), time.Second, stopCh)
go wait.Until(rq.worker(rq.missingUsageQueue), time.Second, stopCh)
}
// the timer for how often we do a full recalculation across all quotas
go wait.Until(func() { rq.enqueueAll() }, rq.resyncPeriod(), stopCh)
<-stopCh
glog.Infof("Shutting down ResourceQuotaController")
rq.queue.ShutDown()
}
// syncResourceQuotaFromKey syncs a quota key
func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Now().Sub(startTime))
}()
obj, exists, err := rq.rqIndexer.GetByKey(key)
if !exists {
glog.Infof("Resource quota has been deleted %v", key)
return nil
}
if err != nil {
glog.Infof("Unable to retrieve resource quota %v from store: %v", key, err)
rq.queue.Add(key)
return err
}
quota := *obj.(*v1.ResourceQuota)
return rq.syncResourceQuota(quota)
}
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota v1.ResourceQuota) (err error) {
// quota is dirty if any part of spec hard limits differs from the status hard limits
dirty := !api.Semantic.DeepEqual(v1ResourceQuota.Spec.Hard, v1ResourceQuota.Status.Hard)
resourceQuota := api.ResourceQuota{}
if err := v1.Convert_v1_ResourceQuota_To_api_ResourceQuota(&v1ResourceQuota, &resourceQuota, nil); err != nil {
return err
}
// dirty tracks if the usage status differs from the previous sync,
// if so, we send a new usage with latest status
// if this is our first sync, it will be dirty by default, since we need track usage
dirty = dirty || (resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil)
used := api.ResourceList{}
if resourceQuota.Status.Used != nil {
used = quota.Add(api.ResourceList{}, resourceQuota.Status.Used)
}
hardLimits := quota.Add(api.ResourceList{}, resourceQuota.Spec.Hard)
newUsage, err := quota.CalculateUsage(resourceQuota.Namespace, resourceQuota.Spec.Scopes, hardLimits, rq.registry)
if err != nil {
return err
}
for key, value := range newUsage {
used[key] = value
}
// ensure set of used values match those that have hard constraints
hardResources := quota.ResourceNames(hardLimits)
used = quota.Mask(used, hardResources)
// Create a usage object that is based on the quota resource version that will handle updates
// by default, we preserve the past usage observation, and set hard to the current spec
usage := api.ResourceQuota{
ObjectMeta: api.ObjectMeta{
Name: resourceQuota.Name,
Namespace: resourceQuota.Namespace,
ResourceVersion: resourceQuota.ResourceVersion,
Labels: resourceQuota.Labels,
Annotations: resourceQuota.Annotations},
Status: api.ResourceQuotaStatus{
Hard: hardLimits,
Used: used,
},
}
dirty = dirty || !quota.Equals(usage.Status.Used, resourceQuota.Status.Used)
// there was a change observed by this controller that requires we update quota
if dirty {
v1Usage := &v1.ResourceQuota{}
if err := v1.Convert_api_ResourceQuota_To_v1_ResourceQuota(&usage, v1Usage, nil); err != nil {
return err
}
_, err = rq.kubeClient.Core().ResourceQuotas(usage.Namespace).UpdateStatus(v1Usage)
return err
}
return nil
}
// replenishQuota is a replenishment function invoked by a controller to notify that a quota should be recalculated
func (rq *ResourceQuotaController) replenishQuota(groupKind schema.GroupKind, namespace string, object runtime.Object) {
// check if the quota controller can evaluate this kind, if not, ignore it altogether...
evaluators := rq.registry.Evaluators()
evaluator, found := evaluators[groupKind]
if !found {
return
}
// check if this namespace even has a quota...
indexKey := &v1.ResourceQuota{}
indexKey.Namespace = namespace
resourceQuotas, err := rq.rqIndexer.Index("namespace", indexKey)
if err != nil {
glog.Errorf("quota controller could not find ResourceQuota associated with namespace: %s, could take up to %v before a quota replenishes", namespace, rq.resyncPeriod())
}
if len(resourceQuotas) == 0 {
return
}
// only queue those quotas that are tracking a resource associated with this kind.
for i := range resourceQuotas {
resourceQuota := resourceQuotas[i].(*v1.ResourceQuota)
internalResourceQuota := &api.ResourceQuota{}
if err := v1.Convert_v1_ResourceQuota_To_api_ResourceQuota(resourceQuota, internalResourceQuota, nil); err != nil {
glog.Error(err)
continue
}
resourceQuotaResources := quota.ResourceNames(internalResourceQuota.Status.Hard)
if intersection := evaluator.MatchingResources(resourceQuotaResources); len(intersection) > 0 {
// TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota.
rq.enqueueResourceQuota(resourceQuota)
}
}
}

View file

@ -0,0 +1,553 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcequota
import (
"strings"
"testing"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/quota/generic"
"k8s.io/kubernetes/pkg/quota/install"
)
func getResourceList(cpu, memory string) v1.ResourceList {
res := v1.ResourceList{}
if cpu != "" {
res[v1.ResourceCPU] = resource.MustParse(cpu)
}
if memory != "" {
res[v1.ResourceMemory] = resource.MustParse(memory)
}
return res
}
func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
res := v1.ResourceRequirements{}
res.Requests = requests
res.Limits = limits
return res
}
func TestSyncResourceQuota(t *testing.T) {
podList := v1.PodList{
Items: []v1.Pod{
{
ObjectMeta: v1.ObjectMeta{Name: "pod-running", Namespace: "testing"},
Status: v1.PodStatus{Phase: v1.PodRunning},
Spec: v1.PodSpec{
Volumes: []v1.Volume{{Name: "vol"}},
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
},
},
{
ObjectMeta: v1.ObjectMeta{Name: "pod-running-2", Namespace: "testing"},
Status: v1.PodStatus{Phase: v1.PodRunning},
Spec: v1.PodSpec{
Volumes: []v1.Volume{{Name: "vol"}},
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
},
},
{
ObjectMeta: v1.ObjectMeta{Name: "pod-failed", Namespace: "testing"},
Status: v1.PodStatus{Phase: v1.PodFailed},
Spec: v1.PodSpec{
Volumes: []v1.Volume{{Name: "vol"}},
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
},
},
},
}
resourceQuota := v1.ResourceQuota{
ObjectMeta: v1.ObjectMeta{Name: "quota", Namespace: "testing"},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
v1.ResourceMemory: resource.MustParse("100Gi"),
v1.ResourcePods: resource.MustParse("5"),
},
},
}
expectedUsage := v1.ResourceQuota{
Status: v1.ResourceQuotaStatus{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
v1.ResourceMemory: resource.MustParse("100Gi"),
v1.ResourcePods: resource.MustParse("5"),
},
Used: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("200m"),
v1.ResourceMemory: resource.MustParse("2Gi"),
v1.ResourcePods: resource.MustParse("2"),
},
},
}
kubeClient := fake.NewSimpleClientset(&podList, &resourceQuota)
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
KubeClient: kubeClient,
ResyncPeriod: controller.NoResyncPeriodFunc,
Registry: install.NewRegistry(kubeClient, nil),
GroupKindsToReplenish: []schema.GroupKind{
api.Kind("Pod"),
api.Kind("Service"),
api.Kind("ReplicationController"),
api.Kind("PersistentVolumeClaim"),
},
ControllerFactory: NewReplenishmentControllerFactoryFromClient(kubeClient),
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
}
quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
err := quotaController.syncResourceQuota(resourceQuota)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
expectedActionSet := sets.NewString(
strings.Join([]string{"list", "pods", ""}, "-"),
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
)
actionSet := sets.NewString()
for _, action := range kubeClient.Actions() {
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
}
if !actionSet.HasAll(expectedActionSet.List()...) {
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
}
lastActionIndex := len(kubeClient.Actions()) - 1
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*v1.ResourceQuota)
// ensure hard and used limits are what we expected
for k, v := range expectedUsage.Status.Hard {
actual := usage.Status.Hard[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
for k, v := range expectedUsage.Status.Used {
actual := usage.Status.Used[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
}
func TestSyncResourceQuotaSpecChange(t *testing.T) {
resourceQuota := v1.ResourceQuota{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
Name: "rq",
},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
},
Status: v1.ResourceQuotaStatus{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
},
Used: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0"),
},
},
}
expectedUsage := v1.ResourceQuota{
Status: v1.ResourceQuotaStatus{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
Used: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0"),
},
},
}
kubeClient := fake.NewSimpleClientset(&resourceQuota)
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
KubeClient: kubeClient,
ResyncPeriod: controller.NoResyncPeriodFunc,
Registry: install.NewRegistry(kubeClient, nil),
GroupKindsToReplenish: []schema.GroupKind{
api.Kind("Pod"),
api.Kind("Service"),
api.Kind("ReplicationController"),
api.Kind("PersistentVolumeClaim"),
},
ControllerFactory: NewReplenishmentControllerFactoryFromClient(kubeClient),
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
}
quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
err := quotaController.syncResourceQuota(resourceQuota)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
expectedActionSet := sets.NewString(
strings.Join([]string{"list", "pods", ""}, "-"),
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
)
actionSet := sets.NewString()
for _, action := range kubeClient.Actions() {
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
}
if !actionSet.HasAll(expectedActionSet.List()...) {
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
}
lastActionIndex := len(kubeClient.Actions()) - 1
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*v1.ResourceQuota)
// ensure hard and used limits are what we expected
for k, v := range expectedUsage.Status.Hard {
actual := usage.Status.Hard[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
for k, v := range expectedUsage.Status.Used {
actual := usage.Status.Used[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
}
func TestSyncResourceQuotaSpecHardChange(t *testing.T) {
resourceQuota := v1.ResourceQuota{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
Name: "rq",
},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
},
Status: v1.ResourceQuotaStatus{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
v1.ResourceMemory: resource.MustParse("1Gi"),
},
Used: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
}
expectedUsage := v1.ResourceQuota{
Status: v1.ResourceQuotaStatus{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
Used: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0"),
},
},
}
kubeClient := fake.NewSimpleClientset(&resourceQuota)
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
KubeClient: kubeClient,
ResyncPeriod: controller.NoResyncPeriodFunc,
Registry: install.NewRegistry(kubeClient, nil),
GroupKindsToReplenish: []schema.GroupKind{
api.Kind("Pod"),
api.Kind("Service"),
api.Kind("ReplicationController"),
api.Kind("PersistentVolumeClaim"),
},
ControllerFactory: NewReplenishmentControllerFactoryFromClient(kubeClient),
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
}
quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
err := quotaController.syncResourceQuota(resourceQuota)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
expectedActionSet := sets.NewString(
strings.Join([]string{"list", "pods", ""}, "-"),
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
)
actionSet := sets.NewString()
for _, action := range kubeClient.Actions() {
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
}
if !actionSet.HasAll(expectedActionSet.List()...) {
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
}
lastActionIndex := len(kubeClient.Actions()) - 1
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*v1.ResourceQuota)
// ensure hard and used limits are what we expected
for k, v := range expectedUsage.Status.Hard {
actual := usage.Status.Hard[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
for k, v := range expectedUsage.Status.Used {
actual := usage.Status.Used[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
// ensure usage hard and used are are synced with spec hard, not have dirty resource
for k, v := range usage.Status.Hard {
if k == v1.ResourceMemory {
t.Errorf("Unexpected Usage Hard: Key: %v, Value: %v", k, v.String())
}
}
for k, v := range usage.Status.Used {
if k == v1.ResourceMemory {
t.Errorf("Unexpected Usage Used: Key: %v, Value: %v", k, v.String())
}
}
}
func TestSyncResourceQuotaNoChange(t *testing.T) {
resourceQuota := v1.ResourceQuota{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
Name: "rq",
},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
},
Status: v1.ResourceQuotaStatus{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
Used: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0"),
},
},
}
kubeClient := fake.NewSimpleClientset(&v1.PodList{}, &resourceQuota)
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
KubeClient: kubeClient,
ResyncPeriod: controller.NoResyncPeriodFunc,
Registry: install.NewRegistry(kubeClient, nil),
GroupKindsToReplenish: []schema.GroupKind{
api.Kind("Pod"),
api.Kind("Service"),
api.Kind("ReplicationController"),
api.Kind("PersistentVolumeClaim"),
},
ControllerFactory: NewReplenishmentControllerFactoryFromClient(kubeClient),
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
}
quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
err := quotaController.syncResourceQuota(resourceQuota)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
expectedActionSet := sets.NewString(
strings.Join([]string{"list", "pods", ""}, "-"),
)
actionSet := sets.NewString()
for _, action := range kubeClient.Actions() {
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
}
if !actionSet.HasAll(expectedActionSet.List()...) {
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
}
}
func TestAddQuota(t *testing.T) {
kubeClient := fake.NewSimpleClientset()
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
KubeClient: kubeClient,
ResyncPeriod: controller.NoResyncPeriodFunc,
Registry: install.NewRegistry(kubeClient, nil),
GroupKindsToReplenish: []schema.GroupKind{
api.Kind("Pod"),
api.Kind("ReplicationController"),
api.Kind("PersistentVolumeClaim"),
},
ControllerFactory: NewReplenishmentControllerFactoryFromClient(kubeClient),
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
}
quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
delete(quotaController.registry.(*generic.GenericRegistry).InternalEvaluators, api.Kind("Service"))
testCases := []struct {
name string
quota *v1.ResourceQuota
expectedPriority bool
}{
{
name: "no status",
expectedPriority: true,
quota: &v1.ResourceQuota{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
Name: "rq",
},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
},
},
},
{
name: "status, no usage",
expectedPriority: true,
quota: &v1.ResourceQuota{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
Name: "rq",
},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
},
Status: v1.ResourceQuotaStatus{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
},
},
},
{
name: "status, mismatch",
expectedPriority: true,
quota: &v1.ResourceQuota{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
Name: "rq",
},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
},
Status: v1.ResourceQuotaStatus{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("6"),
},
Used: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0"),
},
},
},
},
{
name: "status, missing usage, but don't care",
expectedPriority: false,
quota: &v1.ResourceQuota{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
Name: "rq",
},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceServices: resource.MustParse("4"),
},
},
Status: v1.ResourceQuotaStatus{
Hard: v1.ResourceList{
v1.ResourceServices: resource.MustParse("4"),
},
},
},
},
{
name: "ready",
expectedPriority: false,
quota: &v1.ResourceQuota{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
Name: "rq",
},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
},
Status: v1.ResourceQuotaStatus{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
Used: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0"),
},
},
},
},
}
for _, tc := range testCases {
quotaController.addQuota(tc.quota)
if tc.expectedPriority {
if e, a := 1, quotaController.missingUsageQueue.Len(); e != a {
t.Errorf("%s: expected %v, got %v", tc.name, e, a)
}
if e, a := 0, quotaController.queue.Len(); e != a {
t.Errorf("%s: expected %v, got %v", tc.name, e, a)
}
} else {
if e, a := 0, quotaController.missingUsageQueue.Len(); e != a {
t.Errorf("%s: expected %v, got %v", tc.name, e, a)
}
if e, a := 1, quotaController.queue.Len(); e != a {
t.Errorf("%s: expected %v, got %v", tc.name, e, a)
}
}
for quotaController.missingUsageQueue.Len() > 0 {
key, _ := quotaController.missingUsageQueue.Get()
quotaController.missingUsageQueue.Done(key)
}
for quotaController.queue.Len() > 0 {
key, _ := quotaController.queue.Get()
quotaController.queue.Done(key)
}
}
}