Switch to github.com/golang/dep for vendoring

Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
Mrunal Patel 2017-01-31 16:45:59 -08:00
parent d6ab91be27
commit 8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions

View file

@ -0,0 +1,84 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"horizontal.go",
"replica_calculator.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/autoscaling/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/autoscaling/v1:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller/podautoscaler/metrics:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
"//vendor:k8s.io/apimachinery/pkg/watch",
],
)
go_test(
name = "go_default_test",
srcs = [
"horizontal_test.go",
"replica_calculator_test.go",
],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/resource:go_default_library",
"//pkg/api/unversioned:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/autoscaling/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/controller/podautoscaler/metrics:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
"//vendor:github.com/stretchr/testify/require",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/apimachinery/pkg/watch",
"//vendor:k8s.io/heapster/metrics/api/v1/types",
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/controller/podautoscaler/metrics:all-srcs",
],
tags = ["automanaged"],
)

View file

@ -0,0 +1,6 @@
reviewers:
- DirectXMan12
- mwielgus
- piosz
- jszczepkowski
- fgrzadkowski

View file

@ -0,0 +1,19 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package podautoscaler contains logic for autoscaling number of
// pods based on metrics observed.
package podautoscaler // import "k8s.io/kubernetes/pkg/controller/podautoscaler"

View file

@ -0,0 +1,440 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podautoscaler
import (
"encoding/json"
"fmt"
"math"
"time"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
extensionsv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/record"
)
const (
// Usage shoud exceed the tolerance before we start downscale or upscale the pods.
// TODO: make it a flag or HPA spec element.
tolerance = 0.1
defaultTargetCPUUtilizationPercentage = 80
HpaCustomMetricsTargetAnnotationName = "alpha/target.custom-metrics.podautoscaler.kubernetes.io"
HpaCustomMetricsStatusAnnotationName = "alpha/status.custom-metrics.podautoscaler.kubernetes.io"
scaleUpLimitFactor = 2
scaleUpLimitMinimum = 4
)
func calculateScaleUpLimit(currentReplicas int32) int32 {
return int32(math.Max(scaleUpLimitFactor*float64(currentReplicas), scaleUpLimitMinimum))
}
type HorizontalController struct {
scaleNamespacer unversionedextensions.ScalesGetter
hpaNamespacer unversionedautoscaling.HorizontalPodAutoscalersGetter
replicaCalc *ReplicaCalculator
eventRecorder record.EventRecorder
// A store of HPA objects, populated by the controller.
store cache.Store
// Watches changes to all HPA objects.
controller cache.Controller
}
var downscaleForbiddenWindow = 5 * time.Minute
var upscaleForbiddenWindow = 3 * time.Minute
func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, cache.Controller) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return controller.hpaNamespacer.HorizontalPodAutoscalers(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return controller.hpaNamespacer.HorizontalPodAutoscalers(v1.NamespaceAll).Watch(options)
},
},
&autoscaling.HorizontalPodAutoscaler{},
resyncPeriod,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
hpa := obj.(*autoscaling.HorizontalPodAutoscaler)
hasCPUPolicy := hpa.Spec.TargetCPUUtilizationPercentage != nil
_, hasCustomMetricsPolicy := hpa.Annotations[HpaCustomMetricsTargetAnnotationName]
if !hasCPUPolicy && !hasCustomMetricsPolicy {
controller.eventRecorder.Event(hpa, v1.EventTypeNormal, "DefaultPolicy", "No scaling policy specified - will use default one. See documentation for details")
}
err := controller.reconcileAutoscaler(hpa)
if err != nil {
glog.Warningf("Failed to reconcile %s: %v", hpa.Name, err)
}
},
UpdateFunc: func(old, cur interface{}) {
hpa := cur.(*autoscaling.HorizontalPodAutoscaler)
err := controller.reconcileAutoscaler(hpa)
if err != nil {
glog.Warningf("Failed to reconcile %s: %v", hpa.Name, err)
}
},
// We are not interested in deletions.
},
)
}
func NewHorizontalController(evtNamespacer v1core.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedautoscaling.HorizontalPodAutoscalersGetter, replicaCalc *ReplicaCalculator, resyncPeriod time.Duration) *HorizontalController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")})
recorder := broadcaster.NewRecorder(v1.EventSource{Component: "horizontal-pod-autoscaler"})
controller := &HorizontalController{
replicaCalc: replicaCalc,
eventRecorder: recorder,
scaleNamespacer: scaleNamespacer,
hpaNamespacer: hpaNamespacer,
}
store, frameworkController := newInformer(controller, resyncPeriod)
controller.store = store
controller.controller = frameworkController
return controller
}
func (a *HorizontalController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting HPA Controller")
go a.controller.Run(stopCh)
<-stopCh
glog.Infof("Shutting down HPA Controller")
}
// getLastScaleTime returns the hpa's last scale time or the hpa's creation time if the last scale time is nil.
func getLastScaleTime(hpa *autoscaling.HorizontalPodAutoscaler) time.Time {
lastScaleTime := hpa.Status.LastScaleTime
if lastScaleTime == nil {
lastScaleTime = &hpa.CreationTimestamp
}
return lastScaleTime.Time
}
func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *autoscaling.HorizontalPodAutoscaler, scale *extensionsv1beta1.Scale) (int32, *int32, time.Time, error) {
targetUtilization := int32(defaultTargetCPUUtilizationPercentage)
if hpa.Spec.TargetCPUUtilizationPercentage != nil {
targetUtilization = *hpa.Spec.TargetCPUUtilizationPercentage
}
currentReplicas := scale.Status.Replicas
if scale.Status.Selector == nil {
errMsg := "selector is required"
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
return 0, nil, time.Time{}, fmt.Errorf(errMsg)
}
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{MatchLabels: scale.Status.Selector})
if err != nil {
errMsg := fmt.Sprintf("couldn't convert selector string to a corresponding selector object: %v", err)
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
return 0, nil, time.Time{}, fmt.Errorf(errMsg)
}
desiredReplicas, utilization, timestamp, err := a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, v1.ResourceCPU, hpa.Namespace, selector)
if err != nil {
lastScaleTime := getLastScaleTime(hpa)
if time.Now().After(lastScaleTime.Add(upscaleForbiddenWindow)) {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetMetrics", err.Error())
} else {
a.eventRecorder.Event(hpa, v1.EventTypeNormal, "MetricsNotAvailableYet", err.Error())
}
return 0, nil, time.Time{}, fmt.Errorf("failed to get CPU utilization: %v", err)
}
if desiredReplicas != currentReplicas {
a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "DesiredReplicasComputed",
"Computed the desired num of replicas: %d (avgCPUutil: %d, current replicas: %d)",
desiredReplicas, utilization, scale.Status.Replicas)
}
return desiredReplicas, &utilization, timestamp, nil
}
// computeReplicasForCustomMetrics computes the desired number of replicas based on the CustomMetrics passed in cmAnnotation
// as json-serialized extensions.CustomMetricsTargetList.
// Returns number of replicas, metric which required highest number of replicas,
// status string (also json-serialized extensions.CustomMetricsCurrentStatusList),
// last timestamp of the metrics involved in computations or error, if occurred.
func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *autoscaling.HorizontalPodAutoscaler, scale *extensionsv1beta1.Scale,
cmAnnotation string) (replicas int32, metric string, status string, timestamp time.Time, err error) {
if cmAnnotation == "" {
return
}
currentReplicas := scale.Status.Replicas
var targetList extensionsv1beta1.CustomMetricTargetList
if err := json.Unmarshal([]byte(cmAnnotation), &targetList); err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedParseCustomMetricsAnnotation", err.Error())
return 0, "", "", time.Time{}, fmt.Errorf("failed to parse custom metrics annotation: %v", err)
}
if len(targetList.Items) == 0 {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "NoCustomMetricsInAnnotation", err.Error())
return 0, "", "", time.Time{}, fmt.Errorf("no custom metrics in annotation")
}
statusList := extensionsv1beta1.CustomMetricCurrentStatusList{
Items: make([]extensionsv1beta1.CustomMetricCurrentStatus, 0),
}
for _, customMetricTarget := range targetList.Items {
if scale.Status.Selector == nil {
errMsg := "selector is required"
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
return 0, "", "", time.Time{}, fmt.Errorf("selector is required")
}
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{MatchLabels: scale.Status.Selector})
if err != nil {
errMsg := fmt.Sprintf("couldn't convert selector string to a corresponding selector object: %v", err)
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
return 0, "", "", time.Time{}, fmt.Errorf("couldn't convert selector string to a corresponding selector object: %v", err)
}
floatTarget := float64(customMetricTarget.TargetValue.MilliValue()) / 1000.0
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetMetricReplicas(currentReplicas, floatTarget, fmt.Sprintf("custom/%s", customMetricTarget.Name), hpa.Namespace, selector)
if err != nil {
lastScaleTime := getLastScaleTime(hpa)
if time.Now().After(lastScaleTime.Add(upscaleForbiddenWindow)) {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetCustomMetrics", err.Error())
} else {
a.eventRecorder.Event(hpa, v1.EventTypeNormal, "CustomMetricsNotAvailableYet", err.Error())
}
return 0, "", "", time.Time{}, fmt.Errorf("failed to get custom metric value: %v", err)
}
if replicaCountProposal > replicas {
timestamp = timestampProposal
replicas = replicaCountProposal
metric = fmt.Sprintf("Custom metric %s", customMetricTarget.Name)
}
quantity, err := resource.ParseQuantity(fmt.Sprintf("%.3f", utilizationProposal))
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedSetCustomMetrics", err.Error())
return 0, "", "", time.Time{}, fmt.Errorf("failed to set custom metric value: %v", err)
}
statusList.Items = append(statusList.Items, extensionsv1beta1.CustomMetricCurrentStatus{
Name: customMetricTarget.Name,
CurrentValue: quantity,
})
}
byteStatusList, err := json.Marshal(statusList)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedSerializeCustomMetrics", err.Error())
return 0, "", "", time.Time{}, fmt.Errorf("failed to serialize custom metric status: %v", err)
}
if replicas != currentReplicas {
a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "DesiredReplicasComputedCustomMetric",
"Computed the desired num of replicas: %d, metric: %s, current replicas: %d",
func() *int32 { i := int32(replicas); return &i }(), metric, scale.Status.Replicas)
}
return replicas, metric, string(byteStatusList), timestamp, nil
}
func (a *HorizontalController) reconcileAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler) error {
reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name)
scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Kind, hpa.Spec.ScaleTargetRef.Name)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
}
currentReplicas := scale.Status.Replicas
cpuDesiredReplicas := int32(0)
cpuCurrentUtilization := new(int32)
cpuTimestamp := time.Time{}
cmDesiredReplicas := int32(0)
cmMetric := ""
cmStatus := ""
cmTimestamp := time.Time{}
desiredReplicas := int32(0)
rescaleReason := ""
timestamp := time.Now()
rescale := true
if scale.Spec.Replicas == 0 {
// Autoscaling is disabled for this resource
desiredReplicas = 0
rescale = false
} else if currentReplicas > hpa.Spec.MaxReplicas {
rescaleReason = "Current number of replicas above Spec.MaxReplicas"
desiredReplicas = hpa.Spec.MaxReplicas
} else if hpa.Spec.MinReplicas != nil && currentReplicas < *hpa.Spec.MinReplicas {
rescaleReason = "Current number of replicas below Spec.MinReplicas"
desiredReplicas = *hpa.Spec.MinReplicas
} else if currentReplicas == 0 {
rescaleReason = "Current number of replicas must be greater than 0"
desiredReplicas = 1
} else {
// All basic scenarios covered, the state should be sane, lets use metrics.
cmAnnotation, cmAnnotationFound := hpa.Annotations[HpaCustomMetricsTargetAnnotationName]
if hpa.Spec.TargetCPUUtilizationPercentage != nil || !cmAnnotationFound {
cpuDesiredReplicas, cpuCurrentUtilization, cpuTimestamp, err = a.computeReplicasForCPUUtilization(hpa, scale)
if err != nil {
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
return fmt.Errorf("failed to compute desired number of replicas based on CPU utilization for %s: %v", reference, err)
}
}
if cmAnnotationFound {
cmDesiredReplicas, cmMetric, cmStatus, cmTimestamp, err = a.computeReplicasForCustomMetrics(hpa, scale, cmAnnotation)
if err != nil {
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
return fmt.Errorf("failed to compute desired number of replicas based on Custom Metrics for %s: %v", reference, err)
}
}
rescaleMetric := ""
if cpuDesiredReplicas > desiredReplicas {
desiredReplicas = cpuDesiredReplicas
timestamp = cpuTimestamp
rescaleMetric = "CPU utilization"
}
if cmDesiredReplicas > desiredReplicas {
desiredReplicas = cmDesiredReplicas
timestamp = cmTimestamp
rescaleMetric = cmMetric
}
if desiredReplicas > currentReplicas {
rescaleReason = fmt.Sprintf("%s above target", rescaleMetric)
}
if desiredReplicas < currentReplicas {
rescaleReason = "All metrics below target"
}
if hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas {
desiredReplicas = *hpa.Spec.MinReplicas
}
// never scale down to 0, reserved for disabling autoscaling
if desiredReplicas == 0 {
desiredReplicas = 1
}
if desiredReplicas > hpa.Spec.MaxReplicas {
desiredReplicas = hpa.Spec.MaxReplicas
}
// Do not upscale too much to prevent incorrect rapid increase of the number of master replicas caused by
// bogus CPU usage report from heapster/kubelet (like in issue #32304).
if desiredReplicas > calculateScaleUpLimit(currentReplicas) {
desiredReplicas = calculateScaleUpLimit(currentReplicas)
}
rescale = shouldScale(hpa, currentReplicas, desiredReplicas, timestamp)
}
if rescale {
scale.Spec.Replicas = desiredReplicas
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleTargetRef.Kind, scale)
if err != nil {
a.eventRecorder.Eventf(hpa, v1.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error())
return fmt.Errorf("failed to rescale %s: %v", reference, err)
}
a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason)
glog.Infof("Successfull rescale of %s, old size: %d, new size: %d, reason: %s",
hpa.Name, currentReplicas, desiredReplicas, rescaleReason)
} else {
desiredReplicas = currentReplicas
}
return a.updateStatus(hpa, currentReplicas, desiredReplicas, cpuCurrentUtilization, cmStatus, rescale)
}
func shouldScale(hpa *autoscaling.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, timestamp time.Time) bool {
if desiredReplicas == currentReplicas {
return false
}
if hpa.Status.LastScaleTime == nil {
return true
}
// Going down only if the usageRatio dropped significantly below the target
// and there was no rescaling in the last downscaleForbiddenWindow.
if desiredReplicas < currentReplicas && hpa.Status.LastScaleTime.Add(downscaleForbiddenWindow).Before(timestamp) {
return true
}
// Going up only if the usage ratio increased significantly above the target
// and there was no rescaling in the last upscaleForbiddenWindow.
if desiredReplicas > currentReplicas && hpa.Status.LastScaleTime.Add(upscaleForbiddenWindow).Before(timestamp) {
return true
}
return false
}
func (a *HorizontalController) updateCurrentReplicasInStatus(hpa *autoscaling.HorizontalPodAutoscaler, currentReplicas int32) {
err := a.updateStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentCPUUtilizationPercentage, hpa.Annotations[HpaCustomMetricsStatusAnnotationName], false)
if err != nil {
glog.Errorf("%v", err)
}
}
func (a *HorizontalController) updateStatus(hpa *autoscaling.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, cpuCurrentUtilization *int32, cmStatus string, rescale bool) error {
hpa.Status = autoscaling.HorizontalPodAutoscalerStatus{
CurrentReplicas: currentReplicas,
DesiredReplicas: desiredReplicas,
CurrentCPUUtilizationPercentage: cpuCurrentUtilization,
LastScaleTime: hpa.Status.LastScaleTime,
}
if cmStatus != "" {
hpa.Annotations[HpaCustomMetricsStatusAnnotationName] = cmStatus
}
if rescale {
now := metav1.NewTime(time.Now())
hpa.Status.LastScaleTime = &now
}
_, err := a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(hpa)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error())
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
}
glog.V(2).Infof("Successfully updated status for %s", hpa.Name)
return nil
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,60 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"metrics_client.go",
"utilization.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/heapster/metrics/api/v1/types",
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
],
)
go_test(
name = "go_default_test",
srcs = ["metrics_client_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/resource:go_default_library",
"//pkg/api/unversioned:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
"//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/heapster/metrics/api/v1/types",
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -0,0 +1,226 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
heapster "k8s.io/heapster/metrics/api/v1/types"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
)
// PodResourceInfo contains pod resourcemetric values as a map from pod names to
// metric values
type PodResourceInfo map[string]int64
// PodMetricsInfo contains pod resourcemetric values as a map from pod names to
// metric values
type PodMetricsInfo map[string]float64
// MetricsClient knows how to query a remote interface to retrieve container-level
// resource metrics as well as pod-level arbitrary metrics
type MetricsClient interface {
// GetResourceMetric gets the given resource metric (and an associated oldest timestamp)
// for all pods matching the specified selector in the given namespace
GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error)
// GetRawMetric gets the given metric (and an associated oldest timestamp)
// for all pods matching the specified selector in the given namespace
GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error)
}
const (
DefaultHeapsterNamespace = "kube-system"
DefaultHeapsterScheme = "http"
DefaultHeapsterService = "heapster"
DefaultHeapsterPort = "" // use the first exposed port on the service
)
var heapsterQueryStart = -5 * time.Minute
type HeapsterMetricsClient struct {
services v1core.ServiceInterface
podsGetter v1core.PodsGetter
heapsterScheme string
heapsterService string
heapsterPort string
}
func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, service, port string) MetricsClient {
return &HeapsterMetricsClient{
services: client.Core().Services(namespace),
podsGetter: client.Core(),
heapsterScheme: scheme,
heapsterService: service,
heapsterPort: port,
}
}
func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error) {
metricPath := fmt.Sprintf("/apis/metrics/v1alpha1/namespaces/%s/pods", namespace)
params := map[string]string{"labelSelector": selector.String()}
resultRaw, err := h.services.
ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, params).
DoRaw()
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to get heapster service: %v", err)
}
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
metrics := metricsapi.PodMetricsList{}
err = json.Unmarshal(resultRaw, &metrics)
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
}
if len(metrics.Items) == 0 {
return nil, time.Time{}, fmt.Errorf("no metrics returned from heapster")
}
res := make(PodResourceInfo, len(metrics.Items))
for _, m := range metrics.Items {
podSum := int64(0)
missing := len(m.Containers) == 0
for _, c := range m.Containers {
resValue, found := c.Usage[v1.ResourceName(resource)]
if !found {
missing = true
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
continue
}
podSum += resValue.MilliValue()
}
if !missing {
res[m.Name] = int64(podSum)
}
}
timestamp := metrics.Items[0].Timestamp.Time
return res, timestamp, nil
}
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
podList, err := h.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
}
if len(podList.Items) == 0 {
return nil, time.Time{}, fmt.Errorf("no pods matched the provided selector")
}
podNames := make([]string, len(podList.Items))
for i, pod := range podList.Items {
podNames[i] = pod.Name
}
now := time.Now()
startTime := now.Add(heapsterQueryStart)
metricPath := fmt.Sprintf("/api/v1/model/namespaces/%s/pod-list/%s/metrics/%s",
namespace,
strings.Join(podNames, ","),
metricName)
resultRaw, err := h.services.
ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, map[string]string{"start": startTime.Format(time.RFC3339)}).
DoRaw()
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to get heapster service: %v", err)
}
var metrics heapster.MetricResultList
err = json.Unmarshal(resultRaw, &metrics)
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
}
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
if len(metrics.Items) != len(podNames) {
// if we get too many metrics or two few metrics, we have no way of knowing which metric goes to which pod
// (note that Heapster returns *empty* metric items when a pod does not exist or have that metric, so this
// does not cover the "missing metric entry" case)
return nil, time.Time{}, fmt.Errorf("requested metrics for %v pods, got metrics for %v", len(podNames), len(metrics.Items))
}
var timestamp *time.Time
res := make(PodMetricsInfo, len(metrics.Items))
for i, podMetrics := range metrics.Items {
val, podTimestamp, hadMetrics := collapseTimeSamples(podMetrics, time.Minute)
if hadMetrics {
res[podNames[i]] = val
if timestamp == nil || podTimestamp.Before(*timestamp) {
timestamp = &podTimestamp
}
}
}
if timestamp == nil {
timestamp = &time.Time{}
}
return res, *timestamp, nil
}
func collapseTimeSamples(metrics heapster.MetricResult, duration time.Duration) (float64, time.Time, bool) {
floatSum := float64(0)
intSum := int64(0)
intSumCount := 0
floatSumCount := 0
var newest *heapster.MetricPoint // creation time of the newest sample for this pod
for i, metricPoint := range metrics.Metrics {
if newest == nil || newest.Timestamp.Before(metricPoint.Timestamp) {
newest = &metrics.Metrics[i]
}
}
if newest != nil {
for _, metricPoint := range metrics.Metrics {
if metricPoint.Timestamp.Add(duration).After(newest.Timestamp) {
intSum += int64(metricPoint.Value)
intSumCount++
if metricPoint.FloatValue != nil {
floatSum += *metricPoint.FloatValue
floatSumCount++
}
}
}
if newest.FloatValue != nil {
return floatSum / float64(floatSumCount), newest.Timestamp, true
} else {
return float64(intSum / int64(intSumCount)), newest.Timestamp, true
}
}
return 0, time.Time{}, false
}

View file

@ -0,0 +1,372 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"encoding/json"
"fmt"
"io"
"testing"
"time"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core"
heapster "k8s.io/heapster/metrics/api/v1/types"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"github.com/stretchr/testify/assert"
)
var fixedTimestamp = time.Date(2015, time.November, 10, 12, 30, 0, 0, time.UTC)
func (w fakeResponseWrapper) DoRaw() ([]byte, error) {
return w.raw, nil
}
func (w fakeResponseWrapper) Stream() (io.ReadCloser, error) {
return nil, nil
}
func newFakeResponseWrapper(raw []byte) fakeResponseWrapper {
return fakeResponseWrapper{raw: raw}
}
type fakeResponseWrapper struct {
raw []byte
}
// timestamp is used for establishing order on metricPoints
type metricPoint struct {
level uint64
timestamp int
}
type testCase struct {
desiredResourceValues PodResourceInfo
desiredMetricValues PodMetricsInfo
desiredError error
replicas int
targetTimestamp int
reportedMetricsPoints [][]metricPoint
reportedPodMetrics [][]int64
namespace string
selector labels.Selector
resourceName v1.ResourceName
metricName string
}
func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
namespace := "test-namespace"
tc.namespace = namespace
podNamePrefix := "test-pod"
podLabels := map[string]string{"name": podNamePrefix}
tc.selector = labels.SelectorFromSet(podLabels)
// it's a resource test if we have a resource name
isResource := len(tc.resourceName) > 0
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &v1.PodList{}
for i := 0; i < tc.replicas; i++ {
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := buildPod(namespace, podName, podLabels, v1.PodRunning, "1024")
obj.Items = append(obj.Items, pod)
}
return true, obj, nil
})
if isResource {
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
metrics := metricsapi.PodMetricsList{}
for i, containers := range tc.reportedPodMetrics {
metric := metricsapi.PodMetrics{
ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
Namespace: namespace,
},
Timestamp: unversioned.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)},
Containers: []metricsapi.ContainerMetrics{},
}
for j, cpu := range containers {
cm := metricsapi.ContainerMetrics{
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
cpu,
resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(
int64(1024*1024),
resource.BinarySI),
},
}
metric.Containers = append(metric.Containers, cm)
}
metrics.Items = append(metrics.Items, metric)
}
heapsterRawMemResponse, _ := json.Marshal(&metrics)
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
})
} else {
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
metrics := heapster.MetricResultList{}
var latestTimestamp time.Time
for _, reportedMetricPoints := range tc.reportedMetricsPoints {
var heapsterMetricPoints []heapster.MetricPoint
for _, reportedMetricPoint := range reportedMetricPoints {
timestamp := fixedTimestamp.Add(time.Duration(reportedMetricPoint.timestamp) * time.Minute)
if latestTimestamp.Before(timestamp) {
latestTimestamp = timestamp
}
heapsterMetricPoint := heapster.MetricPoint{Timestamp: timestamp, Value: reportedMetricPoint.level, FloatValue: nil}
heapsterMetricPoints = append(heapsterMetricPoints, heapsterMetricPoint)
}
metric := heapster.MetricResult{
Metrics: heapsterMetricPoints,
LatestTimestamp: latestTimestamp,
}
metrics.Items = append(metrics.Items, metric)
}
heapsterRawMemResponse, _ := json.Marshal(&metrics)
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
})
}
return fakeClient
}
func buildPod(namespace, podName string, podLabels map[string]string, phase v1.PodPhase, request string) v1.Pod {
return v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: namespace,
Labels: podLabels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(request),
},
},
},
},
},
Status: v1.PodStatus{
Phase: phase,
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
},
}
}
func (tc *testCase) verifyResults(t *testing.T, metrics interface{}, timestamp time.Time, err error) {
if tc.desiredError != nil {
assert.Error(t, err, "there should be an error retrieving the metrics")
assert.Contains(t, fmt.Sprintf("%v", err), fmt.Sprintf("%v", tc.desiredError), "the error message should be eas expected")
return
}
assert.NoError(t, err, "there should be no error retrieving the metrics")
assert.NotNil(t, metrics, "there should be metrics returned")
if metricsInfo, wasRaw := metrics.(PodMetricsInfo); wasRaw {
assert.Equal(t, tc.desiredMetricValues, metricsInfo, "the raw metrics values should be as expected")
} else if resourceInfo, wasResource := metrics.(PodResourceInfo); wasResource {
assert.Equal(t, tc.desiredResourceValues, resourceInfo, "the resource metrics values be been as expected")
} else {
assert.False(t, true, "should return either resource metrics info or raw metrics info")
}
targetTimestamp := fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)
assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp))
}
func (tc *testCase) runTest(t *testing.T) {
testClient := tc.prepareTestClient(t)
metricsClient := NewHeapsterMetricsClient(testClient, DefaultHeapsterNamespace, DefaultHeapsterScheme, DefaultHeapsterService, DefaultHeapsterPort)
isResource := len(tc.resourceName) > 0
if isResource {
info, timestamp, err := metricsClient.GetResourceMetric(tc.resourceName, tc.namespace, tc.selector)
tc.verifyResults(t, info, timestamp, err)
} else {
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector)
tc.verifyResults(t, info, timestamp, err)
}
}
func TestCPU(t *testing.T) {
tc := testCase{
replicas: 3,
desiredResourceValues: PodResourceInfo{
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
},
resourceName: v1.ResourceCPU,
targetTimestamp: 1,
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
}
tc.runTest(t)
}
func TestQPS(t *testing.T) {
tc := testCase{
replicas: 3,
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 10, "test-pod-1": 20, "test-pod-2": 10,
},
metricName: "qps",
targetTimestamp: 1,
reportedMetricsPoints: [][]metricPoint{{{10, 1}}, {{20, 1}}, {{10, 1}}},
}
tc.runTest(t)
}
func TestQpsSumEqualZero(t *testing.T) {
tc := testCase{
replicas: 3,
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 0, "test-pod-1": 0, "test-pod-2": 0,
},
metricName: "qps",
targetTimestamp: 0,
reportedMetricsPoints: [][]metricPoint{{{0, 0}}, {{0, 0}}, {{0, 0}}},
}
tc.runTest(t)
}
func TestCPUMoreMetrics(t *testing.T) {
tc := testCase{
replicas: 5,
desiredResourceValues: PodResourceInfo{
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
"test-pod-3": 5000, "test-pod-4": 5000,
},
resourceName: v1.ResourceCPU,
targetTimestamp: 10,
reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}},
}
tc.runTest(t)
}
func TestCPUMissingMetrics(t *testing.T) {
tc := testCase{
replicas: 3,
desiredResourceValues: PodResourceInfo{
"test-pod-0": 4000,
},
resourceName: v1.ResourceCPU,
reportedPodMetrics: [][]int64{{4000}},
}
tc.runTest(t)
}
func TestQpsMissingMetrics(t *testing.T) {
tc := testCase{
replicas: 3,
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 1"),
metricName: "qps",
targetTimestamp: 1,
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}},
}
tc.runTest(t)
}
func TestQpsSuperfluousMetrics(t *testing.T) {
tc := testCase{
replicas: 3,
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 6"),
metricName: "qps",
reportedMetricsPoints: [][]metricPoint{{{1000, 1}}, {{2000, 4}}, {{2000, 1}}, {{4000, 5}}, {{2000, 1}}, {{4000, 4}}},
}
tc.runTest(t)
}
func TestCPUEmptyMetrics(t *testing.T) {
tc := testCase{
replicas: 3,
resourceName: v1.ResourceCPU,
desiredError: fmt.Errorf("no metrics returned from heapster"),
reportedMetricsPoints: [][]metricPoint{},
reportedPodMetrics: [][]int64{},
}
tc.runTest(t)
}
func TestQpsEmptyEntries(t *testing.T) {
tc := testCase{
replicas: 3,
metricName: "qps",
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 4000, "test-pod-2": 2000,
},
targetTimestamp: 4,
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}, {}, {{2000, 4}}},
}
tc.runTest(t)
}
func TestCPUZeroReplicas(t *testing.T) {
tc := testCase{
replicas: 0,
resourceName: v1.ResourceCPU,
desiredError: fmt.Errorf("no metrics returned from heapster"),
reportedPodMetrics: [][]int64{},
}
tc.runTest(t)
}
func TestCPUEmptyMetricsForOnePod(t *testing.T) {
tc := testCase{
replicas: 3,
resourceName: v1.ResourceCPU,
desiredResourceValues: PodResourceInfo{
"test-pod-0": 100, "test-pod-1": 700,
},
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
}
tc.runTest(t)
}
func testCollapseTimeSamples(t *testing.T) {
now := time.Now()
metrics := heapster.MetricResult{
Metrics: []heapster.MetricPoint{
{Timestamp: now, Value: 50, FloatValue: nil},
{Timestamp: now.Add(-15 * time.Second), Value: 100, FloatValue: nil},
{Timestamp: now.Add(-60 * time.Second), Value: 100000, FloatValue: nil}},
LatestTimestamp: now,
}
val, timestamp, hadMetrics := collapseTimeSamples(metrics, time.Minute)
assert.True(t, hadMetrics, "should report that it received a populated list of metrics")
assert.InEpsilon(t, float64(75), val, 0.1, "collapsed sample value should be as expected")
assert.True(t, timestamp.Equal(now), "timestamp should be the current time (the newest)")
}

View file

@ -0,0 +1,64 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"fmt"
)
// GetResourceUtilizationRatio takes in a set of metrics, a set of matching requests,
// and a target utilization percentage, and calcuates the the ratio of
// desired to actual utilization (returning that and the actual utilization)
func GetResourceUtilizationRatio(metrics PodResourceInfo, requests map[string]int64, targetUtilization int32) (float64, int32, error) {
metricsTotal := int64(0)
requestsTotal := int64(0)
for podName, metricValue := range metrics {
request, hasRequest := requests[podName]
if !hasRequest {
// we check for missing requests elsewhere, so assuming missing requests == extraneous metrics
continue
}
metricsTotal += metricValue
requestsTotal += request
}
// if the set of requests is completely disjoint from the set of metrics,
// then we could have an issue where the requests total is zero
if requestsTotal == 0 {
return 0, 0, fmt.Errorf("no metrics returned matched known pods")
}
currentUtilization := int32((metricsTotal * 100) / requestsTotal)
return float64(currentUtilization) / float64(targetUtilization), currentUtilization, nil
}
// GetMetricUtilizationRatio takes in a set of metrics and a target utilization value,
// and calcuates the ratio of desired to actual utilization
// (returning that and the actual utilization)
func GetMetricUtilizationRatio(metrics PodMetricsInfo, targetUtilization float64) (float64, float64) {
metricsTotal := float64(0)
for _, metricValue := range metrics {
metricsTotal += metricValue
}
currentUtilization := metricsTotal / float64(len(metrics))
return currentUtilization / targetUtilization, currentUtilization
}

View file

@ -0,0 +1,246 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podautoscaler
import (
"fmt"
"math"
"time"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api/v1"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
)
type ReplicaCalculator struct {
metricsClient metricsclient.MetricsClient
podsGetter v1core.PodsGetter
}
func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podsGetter v1core.PodsGetter) *ReplicaCalculator {
return &ReplicaCalculator{
metricsClient: metricsClient,
podsGetter: podsGetter,
}
}
// GetResourceReplicas calculates the desired replica count based on a target resource utilization percentage
// of the given resource for pods matching the given selector in the given namespace, and the current replica count
func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUtilization int32, resource v1.ResourceName, namespace string, selector labels.Selector) (replicaCount int32, utilization int32, timestamp time.Time, err error) {
metrics, timestamp, err := c.metricsClient.GetResourceMetric(resource, namespace, selector)
if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
}
podList, err := c.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
}
if len(podList.Items) == 0 {
return 0, 0, time.Time{}, fmt.Errorf("no pods returned by selector while calculating replica count")
}
requests := make(map[string]int64, len(podList.Items))
readyPodCount := 0
unreadyPods := sets.NewString()
missingPods := sets.NewString()
for _, pod := range podList.Items {
podSum := int64(0)
for _, container := range pod.Spec.Containers {
if containerRequest, ok := container.Resources.Requests[resource]; ok {
podSum += containerRequest.MilliValue()
} else {
return 0, 0, time.Time{}, fmt.Errorf("missing request for %s on container %s in pod %s/%s", resource, container.Name, namespace, pod.Name)
}
}
requests[pod.Name] = podSum
if pod.Status.Phase != v1.PodRunning || !v1.IsPodReady(&pod) {
// save this pod name for later, but pretend it doesn't exist for now
unreadyPods.Insert(pod.Name)
delete(metrics, pod.Name)
continue
}
if _, found := metrics[pod.Name]; !found {
// save this pod name for later, but pretend it doesn't exist for now
missingPods.Insert(pod.Name)
continue
}
readyPodCount++
}
if len(metrics) == 0 {
return 0, 0, time.Time{}, fmt.Errorf("did not receive metrics for any ready pods")
}
usageRatio, utilization, err := metricsclient.GetResourceUtilizationRatio(metrics, requests, targetUtilization)
if err != nil {
return 0, 0, time.Time{}, err
}
rebalanceUnready := len(unreadyPods) > 0 && usageRatio > 1.0
if !rebalanceUnready && len(missingPods) == 0 {
if math.Abs(1.0-usageRatio) <= tolerance {
// return the current replicas if the change would be too small
return currentReplicas, utilization, timestamp, nil
}
// if we don't have any unready or missing pods, we can calculate the new replica count now
return int32(math.Ceil(usageRatio * float64(readyPodCount))), utilization, timestamp, nil
}
if len(missingPods) > 0 {
if usageRatio < 1.0 {
// on a scale-down, treat missing pods as using 100% of the resource request
for podName := range missingPods {
metrics[podName] = requests[podName]
}
} else if usageRatio > 1.0 {
// on a scale-up, treat missing pods as using 0% of the resource request
for podName := range missingPods {
metrics[podName] = 0
}
}
}
if rebalanceUnready {
// on a scale-up, treat unready pods as using 0% of the resource request
for podName := range unreadyPods {
metrics[podName] = 0
}
}
// re-run the utilization calculation with our new numbers
newUsageRatio, _, err := metricsclient.GetResourceUtilizationRatio(metrics, requests, targetUtilization)
if err != nil {
return 0, utilization, time.Time{}, err
}
if math.Abs(1.0-newUsageRatio) <= tolerance || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) {
// return the current replicas if the change would be too small,
// or if the new usage ratio would cause a change in scale direction
return currentReplicas, utilization, timestamp, nil
}
// return the result, where the number of replicas considered is
// however many replicas factored into our calculation
return int32(math.Ceil(newUsageRatio * float64(len(metrics)))), utilization, timestamp, nil
}
// GetMetricReplicas calculates the desired replica count based on a target resource utilization percentage
// of the given resource for pods matching the given selector in the given namespace, and the current replica count
func (c *ReplicaCalculator) GetMetricReplicas(currentReplicas int32, targetUtilization float64, metricName string, namespace string, selector labels.Selector) (replicaCount int32, utilization float64, timestamp time.Time, err error) {
metrics, timestamp, err := c.metricsClient.GetRawMetric(metricName, namespace, selector)
if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v", metricName, err)
}
podList, err := c.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
}
if len(podList.Items) == 0 {
return 0, 0, time.Time{}, fmt.Errorf("no pods returned by selector while calculating replica count")
}
readyPodCount := 0
unreadyPods := sets.NewString()
missingPods := sets.NewString()
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodRunning || !v1.IsPodReady(&pod) {
// save this pod name for later, but pretend it doesn't exist for now
unreadyPods.Insert(pod.Name)
delete(metrics, pod.Name)
continue
}
if _, found := metrics[pod.Name]; !found {
// save this pod name for later, but pretend it doesn't exist for now
missingPods.Insert(pod.Name)
continue
}
readyPodCount++
}
if len(metrics) == 0 {
return 0, 0, time.Time{}, fmt.Errorf("did not recieve metrics for any ready pods")
}
usageRatio, utilization := metricsclient.GetMetricUtilizationRatio(metrics, targetUtilization)
if err != nil {
return 0, 0, time.Time{}, err
}
rebalanceUnready := len(unreadyPods) > 0 && usageRatio > 1.0
if !rebalanceUnready && len(missingPods) == 0 {
if math.Abs(1.0-usageRatio) <= tolerance {
// return the current replicas if the change would be too small
return currentReplicas, utilization, timestamp, nil
}
// if we don't have any unready or missing pods, we can calculate the new replica count now
return int32(math.Ceil(usageRatio * float64(readyPodCount))), utilization, timestamp, nil
}
if len(missingPods) > 0 {
if usageRatio < 1.0 {
// on a scale-down, treat missing pods as using 100% of the resource request
for podName := range missingPods {
metrics[podName] = targetUtilization
}
} else {
// on a scale-up, treat missing pods as using 0% of the resource request
for podName := range missingPods {
metrics[podName] = 0
}
}
}
if rebalanceUnready {
// on a scale-up, treat unready pods as using 0% of the resource request
for podName := range unreadyPods {
metrics[podName] = 0
}
}
// re-run the utilization calculation with our new numbers
newUsageRatio, _ := metricsclient.GetMetricUtilizationRatio(metrics, targetUtilization)
if err != nil {
return 0, utilization, time.Time{}, err
}
if math.Abs(1.0-newUsageRatio) <= tolerance || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) {
// return the current replicas if the change would be too small,
// or if the new usage ratio would cause a change in scale direction
return currentReplicas, utilization, timestamp, nil
}
// return the result, where the number of replicas considered is
// however many replicas factored into our calculation
return int32(math.Ceil(newUsageRatio * float64(len(metrics)))), utilization, timestamp, nil
}

View file

@ -0,0 +1,676 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podautoscaler
import (
"encoding/json"
"fmt"
"math"
"strconv"
"strings"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
heapster "k8s.io/heapster/metrics/api/v1/types"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type resourceInfo struct {
name v1.ResourceName
requests []resource.Quantity
levels []int64
// only applies to pod names returned from "heapster"
podNames []string
targetUtilization int32
expectedUtilization int32
}
type metricInfo struct {
name string
levels []float64
targetUtilization float64
expectedUtilization float64
}
type replicaCalcTestCase struct {
currentReplicas int32
expectedReplicas int32
expectedError error
timestamp time.Time
resource *resourceInfo
metric *metricInfo
podReadiness []v1.ConditionStatus
}
const (
testNamespace = "test-namespace"
podNamePrefix = "test-pod"
)
func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &v1.PodList{}
for i := 0; i < int(tc.currentReplicas); i++ {
podReadiness := v1.ConditionTrue
if tc.podReadiness != nil {
podReadiness = tc.podReadiness[i]
}
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := v1.Pod{
Status: v1.PodStatus{
Phase: v1.PodRunning,
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: podReadiness,
},
},
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: testNamespace,
Labels: map[string]string{
"name": podNamePrefix,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{}, {}},
},
}
if tc.resource != nil && i < len(tc.resource.requests) {
pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
Requests: v1.ResourceList{
tc.resource.name: tc.resource.requests[i],
},
}
pod.Spec.Containers[1].Resources = v1.ResourceRequirements{
Requests: v1.ResourceList{
tc.resource.name: tc.resource.requests[i],
},
}
}
obj.Items = append(obj.Items, pod)
}
return true, obj, nil
})
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
var heapsterRawMemResponse []byte
if tc.resource != nil {
metrics := metricsapi.PodMetricsList{}
for i, resValue := range tc.resource.levels {
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
if len(tc.resource.podNames) > i {
podName = tc.resource.podNames[i]
}
podMetric := metricsapi.PodMetrics{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: testNamespace,
},
Timestamp: unversioned.Time{Time: tc.timestamp},
Containers: []metricsapi.ContainerMetrics{
{
Name: "container1",
Usage: v1.ResourceList{
v1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
int64(resValue),
resource.DecimalSI),
},
},
{
Name: "container2",
Usage: v1.ResourceList{
v1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
int64(resValue),
resource.DecimalSI),
},
},
},
}
metrics.Items = append(metrics.Items, podMetric)
}
heapsterRawMemResponse, _ = json.Marshal(&metrics)
} else {
// only return the pods that we actually asked for
proxyAction := action.(core.ProxyGetAction)
pathParts := strings.Split(proxyAction.GetPath(), "/")
// pathParts should look like [ api, v1, model, namespaces, $NS, pod-list, $PODS, metrics, $METRIC... ]
if len(pathParts) < 9 {
return true, nil, fmt.Errorf("invalid heapster path %q", proxyAction.GetPath())
}
podNames := strings.Split(pathParts[7], ",")
podPresent := make([]bool, len(tc.metric.levels))
for _, name := range podNames {
if len(name) <= len(podNamePrefix)+1 {
return true, nil, fmt.Errorf("unknown pod %q", name)
}
num, err := strconv.Atoi(name[len(podNamePrefix)+1:])
if err != nil {
return true, nil, fmt.Errorf("unknown pod %q", name)
}
podPresent[num] = true
}
timestamp := tc.timestamp
metrics := heapster.MetricResultList{}
for i, level := range tc.metric.levels {
if !podPresent[i] {
continue
}
metric := heapster.MetricResult{
Metrics: []heapster.MetricPoint{{Timestamp: timestamp, Value: uint64(level), FloatValue: &tc.metric.levels[i]}},
LatestTimestamp: timestamp,
}
metrics.Items = append(metrics.Items, metric)
}
heapsterRawMemResponse, _ = json.Marshal(&metrics)
}
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
})
return fakeClient
}
func (tc *replicaCalcTestCase) runTest(t *testing.T) {
testClient := tc.prepareTestClient(t)
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
replicaCalc := &ReplicaCalculator{
metricsClient: metricsClient,
podsGetter: testClient.Core(),
}
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
MatchLabels: map[string]string{"name": podNamePrefix},
})
if err != nil {
require.Nil(t, err, "something went horribly wrong...")
}
if tc.resource != nil {
outReplicas, outUtilization, outTimestamp, err := replicaCalc.GetResourceReplicas(tc.currentReplicas, tc.resource.targetUtilization, tc.resource.name, testNamespace, selector)
if tc.expectedError != nil {
require.Error(t, err, "there should be an error calculating the replica count")
assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
return
}
require.NoError(t, err, "there should not have been an error calculating the replica count")
assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
assert.Equal(t, tc.resource.expectedUtilization, outUtilization, "utilization should be as expected")
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
} else {
outReplicas, outUtilization, outTimestamp, err := replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, selector)
if tc.expectedError != nil {
require.Error(t, err, "there should be an error calculating the replica count")
assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
return
}
require.NoError(t, err, "there should not have been an error calculating the replica count")
assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
assert.InDelta(t, tc.metric.expectedUtilization, 0.1, outUtilization, "utilization should be as expected")
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
}
}
func TestReplicaCalcDisjointResourcesMetrics(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 1,
expectedError: fmt.Errorf("no metrics returned matched known pods"),
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0")},
levels: []int64{100},
podNames: []string{"an-older-pod-name"},
targetUtilization: 100,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleUp(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 5,
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{300, 500, 700},
targetUtilization: 30,
expectedUtilization: 50,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 4,
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{300, 500, 700},
targetUtilization: 30,
expectedUtilization: 60,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 3,
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{400, 500, 700},
targetUtilization: 30,
expectedUtilization: 40,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleUpCM(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 4,
metric: &metricInfo{
name: "qps",
levels: []float64{20.0, 10.0, 30.0},
targetUtilization: 15.0,
expectedUtilization: 20.0,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 4,
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
metric: &metricInfo{
name: "qps",
levels: []float64{50.0, 10.0, 30.0},
targetUtilization: 15.0,
expectedUtilization: 30.0,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 3,
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
metric: &metricInfo{
name: "qps",
levels: []float64{50.0, 15.0, 30.0},
targetUtilization: 15.0,
expectedUtilization: 15.0,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleDown(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 5,
expectedReplicas: 3,
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 300, 500, 250, 250},
targetUtilization: 50,
expectedUtilization: 28,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleDownCM(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 5,
expectedReplicas: 3,
metric: &metricInfo{
name: "qps",
levels: []float64{12.0, 12.0, 12.0, 12.0, 12.0},
targetUtilization: 20.0,
expectedUtilization: 12.0,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 5,
expectedReplicas: 2,
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 300, 500, 250, 250},
targetUtilization: 50,
expectedUtilization: 30,
},
}
tc.runTest(t)
}
func TestReplicaCalcTolerance(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 3,
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
levels: []int64{1010, 1030, 1020},
targetUtilization: 100,
expectedUtilization: 102,
},
}
tc.runTest(t)
}
func TestReplicaCalcToleranceCM(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 3,
metric: &metricInfo{
name: "qps",
levels: []float64{20.0, 21.0, 21.0},
targetUtilization: 20.0,
expectedUtilization: 20.66666,
},
}
tc.runTest(t)
}
func TestReplicaCalcSuperfluousMetrics(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 4,
expectedReplicas: 24,
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{4000, 9500, 3000, 7000, 3200, 2000},
targetUtilization: 100,
expectedUtilization: 587,
},
}
tc.runTest(t)
}
func TestReplicaCalcMissingMetrics(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 4,
expectedReplicas: 3,
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{400, 95},
targetUtilization: 100,
expectedUtilization: 24,
},
}
tc.runTest(t)
}
func TestReplicaCalcEmptyMetrics(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 4,
expectedError: fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from heapster"),
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{},
targetUtilization: 100,
},
}
tc.runTest(t)
}
func TestReplicaCalcEmptyCPURequest(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 1,
expectedError: fmt.Errorf("missing request for"),
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{},
levels: []int64{200},
targetUtilization: 100,
},
}
tc.runTest(t)
}
func TestReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 2,
expectedReplicas: 2,
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{1000},
targetUtilization: 100,
expectedUtilization: 100,
},
}
tc.runTest(t)
}
func TestReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 2,
expectedReplicas: 2,
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{1900},
targetUtilization: 100,
expectedUtilization: 190,
},
}
tc.runTest(t)
}
func TestReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 2,
expectedReplicas: 2,
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{600},
targetUtilization: 100,
expectedUtilization: 60,
},
}
tc.runTest(t)
}
func TestReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 3,
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 450},
targetUtilization: 50,
expectedUtilization: 45,
},
}
tc.runTest(t)
}
func TestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 4,
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 2000},
targetUtilization: 50,
expectedUtilization: 200,
},
}
tc.runTest(t)
}
func TestReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 4,
expectedReplicas: 3,
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 100, 100},
targetUtilization: 50,
expectedUtilization: 10,
},
}
tc.runTest(t)
}
// TestComputedToleranceAlgImplementation is a regression test which
// back-calculates a minimal percentage for downscaling based on a small percentage
// increase in pod utilization which is calibrated against the tolerance value.
func TestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
startPods := int32(10)
// 150 mCPU per pod.
totalUsedCPUOfAllPods := int64(startPods * 150)
// Each pod starts out asking for 2X what is really needed.
// This means we will have a 50% ratio of used/requested
totalRequestedCPUOfAllPods := int32(2 * totalUsedCPUOfAllPods)
requestedToUsed := float64(totalRequestedCPUOfAllPods / int32(totalUsedCPUOfAllPods))
// Spread the amount we ask over 10 pods. We can add some jitter later in reportedLevels.
perPodRequested := totalRequestedCPUOfAllPods / startPods
// Force a minimal scaling event by satisfying (tolerance < 1 - resourcesUsedRatio).
target := math.Abs(1/(requestedToUsed*(1-tolerance))) + .01
finalCpuPercentTarget := int32(target * 100)
resourcesUsedRatio := float64(totalUsedCPUOfAllPods) / float64(float64(totalRequestedCPUOfAllPods)*target)
// i.e. .60 * 20 -> scaled down expectation.
finalPods := int32(math.Ceil(resourcesUsedRatio * float64(startPods)))
// To breach tolerance we will create a utilization ratio difference of tolerance to usageRatioToleranceValue)
tc := replicaCalcTestCase{
currentReplicas: startPods,
expectedReplicas: finalPods,
resource: &resourceInfo{
name: v1.ResourceCPU,
levels: []int64{
totalUsedCPUOfAllPods / 10,
totalUsedCPUOfAllPods / 10,
totalUsedCPUOfAllPods / 10,
totalUsedCPUOfAllPods / 10,
totalUsedCPUOfAllPods / 10,
totalUsedCPUOfAllPods / 10,
totalUsedCPUOfAllPods / 10,
totalUsedCPUOfAllPods / 10,
totalUsedCPUOfAllPods / 10,
totalUsedCPUOfAllPods / 10,
},
requests: []resource.Quantity{
resource.MustParse(fmt.Sprint(perPodRequested+100) + "m"),
resource.MustParse(fmt.Sprint(perPodRequested-100) + "m"),
resource.MustParse(fmt.Sprint(perPodRequested+10) + "m"),
resource.MustParse(fmt.Sprint(perPodRequested-10) + "m"),
resource.MustParse(fmt.Sprint(perPodRequested+2) + "m"),
resource.MustParse(fmt.Sprint(perPodRequested-2) + "m"),
resource.MustParse(fmt.Sprint(perPodRequested+1) + "m"),
resource.MustParse(fmt.Sprint(perPodRequested-1) + "m"),
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
},
targetUtilization: finalCpuPercentTarget,
expectedUtilization: int32(totalUsedCPUOfAllPods*100) / totalRequestedCPUOfAllPods,
},
}
tc.runTest(t)
// Reuse the data structure above, now testing "unscaling".
// Now, we test that no scaling happens if we are in a very close margin to the tolerance
target = math.Abs(1/(requestedToUsed*(1-tolerance))) + .004
finalCpuPercentTarget = int32(target * 100)
tc.resource.targetUtilization = finalCpuPercentTarget
tc.currentReplicas = startPods
tc.expectedReplicas = startPods
tc.runTest(t)
}
// TODO: add more tests