Switch to github.com/golang/dep for vendoring

Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
Mrunal Patel 2017-01-31 16:45:59 -08:00
parent d6ab91be27
commit 8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions

View file

@ -0,0 +1,60 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"metrics_client.go",
"utilization.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/heapster/metrics/api/v1/types",
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
],
)
go_test(
name = "go_default_test",
srcs = ["metrics_client_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/resource:go_default_library",
"//pkg/api/unversioned:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
"//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/heapster/metrics/api/v1/types",
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -0,0 +1,226 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
heapster "k8s.io/heapster/metrics/api/v1/types"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
)
// PodResourceInfo contains pod resourcemetric values as a map from pod names to
// metric values
type PodResourceInfo map[string]int64
// PodMetricsInfo contains pod resourcemetric values as a map from pod names to
// metric values
type PodMetricsInfo map[string]float64
// MetricsClient knows how to query a remote interface to retrieve container-level
// resource metrics as well as pod-level arbitrary metrics
type MetricsClient interface {
// GetResourceMetric gets the given resource metric (and an associated oldest timestamp)
// for all pods matching the specified selector in the given namespace
GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error)
// GetRawMetric gets the given metric (and an associated oldest timestamp)
// for all pods matching the specified selector in the given namespace
GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error)
}
const (
DefaultHeapsterNamespace = "kube-system"
DefaultHeapsterScheme = "http"
DefaultHeapsterService = "heapster"
DefaultHeapsterPort = "" // use the first exposed port on the service
)
var heapsterQueryStart = -5 * time.Minute
type HeapsterMetricsClient struct {
services v1core.ServiceInterface
podsGetter v1core.PodsGetter
heapsterScheme string
heapsterService string
heapsterPort string
}
func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, service, port string) MetricsClient {
return &HeapsterMetricsClient{
services: client.Core().Services(namespace),
podsGetter: client.Core(),
heapsterScheme: scheme,
heapsterService: service,
heapsterPort: port,
}
}
func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error) {
metricPath := fmt.Sprintf("/apis/metrics/v1alpha1/namespaces/%s/pods", namespace)
params := map[string]string{"labelSelector": selector.String()}
resultRaw, err := h.services.
ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, params).
DoRaw()
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to get heapster service: %v", err)
}
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
metrics := metricsapi.PodMetricsList{}
err = json.Unmarshal(resultRaw, &metrics)
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
}
if len(metrics.Items) == 0 {
return nil, time.Time{}, fmt.Errorf("no metrics returned from heapster")
}
res := make(PodResourceInfo, len(metrics.Items))
for _, m := range metrics.Items {
podSum := int64(0)
missing := len(m.Containers) == 0
for _, c := range m.Containers {
resValue, found := c.Usage[v1.ResourceName(resource)]
if !found {
missing = true
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
continue
}
podSum += resValue.MilliValue()
}
if !missing {
res[m.Name] = int64(podSum)
}
}
timestamp := metrics.Items[0].Timestamp.Time
return res, timestamp, nil
}
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
podList, err := h.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
}
if len(podList.Items) == 0 {
return nil, time.Time{}, fmt.Errorf("no pods matched the provided selector")
}
podNames := make([]string, len(podList.Items))
for i, pod := range podList.Items {
podNames[i] = pod.Name
}
now := time.Now()
startTime := now.Add(heapsterQueryStart)
metricPath := fmt.Sprintf("/api/v1/model/namespaces/%s/pod-list/%s/metrics/%s",
namespace,
strings.Join(podNames, ","),
metricName)
resultRaw, err := h.services.
ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, map[string]string{"start": startTime.Format(time.RFC3339)}).
DoRaw()
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to get heapster service: %v", err)
}
var metrics heapster.MetricResultList
err = json.Unmarshal(resultRaw, &metrics)
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
}
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
if len(metrics.Items) != len(podNames) {
// if we get too many metrics or two few metrics, we have no way of knowing which metric goes to which pod
// (note that Heapster returns *empty* metric items when a pod does not exist or have that metric, so this
// does not cover the "missing metric entry" case)
return nil, time.Time{}, fmt.Errorf("requested metrics for %v pods, got metrics for %v", len(podNames), len(metrics.Items))
}
var timestamp *time.Time
res := make(PodMetricsInfo, len(metrics.Items))
for i, podMetrics := range metrics.Items {
val, podTimestamp, hadMetrics := collapseTimeSamples(podMetrics, time.Minute)
if hadMetrics {
res[podNames[i]] = val
if timestamp == nil || podTimestamp.Before(*timestamp) {
timestamp = &podTimestamp
}
}
}
if timestamp == nil {
timestamp = &time.Time{}
}
return res, *timestamp, nil
}
func collapseTimeSamples(metrics heapster.MetricResult, duration time.Duration) (float64, time.Time, bool) {
floatSum := float64(0)
intSum := int64(0)
intSumCount := 0
floatSumCount := 0
var newest *heapster.MetricPoint // creation time of the newest sample for this pod
for i, metricPoint := range metrics.Metrics {
if newest == nil || newest.Timestamp.Before(metricPoint.Timestamp) {
newest = &metrics.Metrics[i]
}
}
if newest != nil {
for _, metricPoint := range metrics.Metrics {
if metricPoint.Timestamp.Add(duration).After(newest.Timestamp) {
intSum += int64(metricPoint.Value)
intSumCount++
if metricPoint.FloatValue != nil {
floatSum += *metricPoint.FloatValue
floatSumCount++
}
}
}
if newest.FloatValue != nil {
return floatSum / float64(floatSumCount), newest.Timestamp, true
} else {
return float64(intSum / int64(intSumCount)), newest.Timestamp, true
}
}
return 0, time.Time{}, false
}

View file

@ -0,0 +1,372 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"encoding/json"
"fmt"
"io"
"testing"
"time"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core"
heapster "k8s.io/heapster/metrics/api/v1/types"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"github.com/stretchr/testify/assert"
)
var fixedTimestamp = time.Date(2015, time.November, 10, 12, 30, 0, 0, time.UTC)
func (w fakeResponseWrapper) DoRaw() ([]byte, error) {
return w.raw, nil
}
func (w fakeResponseWrapper) Stream() (io.ReadCloser, error) {
return nil, nil
}
func newFakeResponseWrapper(raw []byte) fakeResponseWrapper {
return fakeResponseWrapper{raw: raw}
}
type fakeResponseWrapper struct {
raw []byte
}
// timestamp is used for establishing order on metricPoints
type metricPoint struct {
level uint64
timestamp int
}
type testCase struct {
desiredResourceValues PodResourceInfo
desiredMetricValues PodMetricsInfo
desiredError error
replicas int
targetTimestamp int
reportedMetricsPoints [][]metricPoint
reportedPodMetrics [][]int64
namespace string
selector labels.Selector
resourceName v1.ResourceName
metricName string
}
func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
namespace := "test-namespace"
tc.namespace = namespace
podNamePrefix := "test-pod"
podLabels := map[string]string{"name": podNamePrefix}
tc.selector = labels.SelectorFromSet(podLabels)
// it's a resource test if we have a resource name
isResource := len(tc.resourceName) > 0
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &v1.PodList{}
for i := 0; i < tc.replicas; i++ {
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := buildPod(namespace, podName, podLabels, v1.PodRunning, "1024")
obj.Items = append(obj.Items, pod)
}
return true, obj, nil
})
if isResource {
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
metrics := metricsapi.PodMetricsList{}
for i, containers := range tc.reportedPodMetrics {
metric := metricsapi.PodMetrics{
ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
Namespace: namespace,
},
Timestamp: unversioned.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)},
Containers: []metricsapi.ContainerMetrics{},
}
for j, cpu := range containers {
cm := metricsapi.ContainerMetrics{
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
cpu,
resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(
int64(1024*1024),
resource.BinarySI),
},
}
metric.Containers = append(metric.Containers, cm)
}
metrics.Items = append(metrics.Items, metric)
}
heapsterRawMemResponse, _ := json.Marshal(&metrics)
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
})
} else {
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
metrics := heapster.MetricResultList{}
var latestTimestamp time.Time
for _, reportedMetricPoints := range tc.reportedMetricsPoints {
var heapsterMetricPoints []heapster.MetricPoint
for _, reportedMetricPoint := range reportedMetricPoints {
timestamp := fixedTimestamp.Add(time.Duration(reportedMetricPoint.timestamp) * time.Minute)
if latestTimestamp.Before(timestamp) {
latestTimestamp = timestamp
}
heapsterMetricPoint := heapster.MetricPoint{Timestamp: timestamp, Value: reportedMetricPoint.level, FloatValue: nil}
heapsterMetricPoints = append(heapsterMetricPoints, heapsterMetricPoint)
}
metric := heapster.MetricResult{
Metrics: heapsterMetricPoints,
LatestTimestamp: latestTimestamp,
}
metrics.Items = append(metrics.Items, metric)
}
heapsterRawMemResponse, _ := json.Marshal(&metrics)
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
})
}
return fakeClient
}
func buildPod(namespace, podName string, podLabels map[string]string, phase v1.PodPhase, request string) v1.Pod {
return v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: namespace,
Labels: podLabels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(request),
},
},
},
},
},
Status: v1.PodStatus{
Phase: phase,
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
},
}
}
func (tc *testCase) verifyResults(t *testing.T, metrics interface{}, timestamp time.Time, err error) {
if tc.desiredError != nil {
assert.Error(t, err, "there should be an error retrieving the metrics")
assert.Contains(t, fmt.Sprintf("%v", err), fmt.Sprintf("%v", tc.desiredError), "the error message should be eas expected")
return
}
assert.NoError(t, err, "there should be no error retrieving the metrics")
assert.NotNil(t, metrics, "there should be metrics returned")
if metricsInfo, wasRaw := metrics.(PodMetricsInfo); wasRaw {
assert.Equal(t, tc.desiredMetricValues, metricsInfo, "the raw metrics values should be as expected")
} else if resourceInfo, wasResource := metrics.(PodResourceInfo); wasResource {
assert.Equal(t, tc.desiredResourceValues, resourceInfo, "the resource metrics values be been as expected")
} else {
assert.False(t, true, "should return either resource metrics info or raw metrics info")
}
targetTimestamp := fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)
assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp))
}
func (tc *testCase) runTest(t *testing.T) {
testClient := tc.prepareTestClient(t)
metricsClient := NewHeapsterMetricsClient(testClient, DefaultHeapsterNamespace, DefaultHeapsterScheme, DefaultHeapsterService, DefaultHeapsterPort)
isResource := len(tc.resourceName) > 0
if isResource {
info, timestamp, err := metricsClient.GetResourceMetric(tc.resourceName, tc.namespace, tc.selector)
tc.verifyResults(t, info, timestamp, err)
} else {
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector)
tc.verifyResults(t, info, timestamp, err)
}
}
func TestCPU(t *testing.T) {
tc := testCase{
replicas: 3,
desiredResourceValues: PodResourceInfo{
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
},
resourceName: v1.ResourceCPU,
targetTimestamp: 1,
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
}
tc.runTest(t)
}
func TestQPS(t *testing.T) {
tc := testCase{
replicas: 3,
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 10, "test-pod-1": 20, "test-pod-2": 10,
},
metricName: "qps",
targetTimestamp: 1,
reportedMetricsPoints: [][]metricPoint{{{10, 1}}, {{20, 1}}, {{10, 1}}},
}
tc.runTest(t)
}
func TestQpsSumEqualZero(t *testing.T) {
tc := testCase{
replicas: 3,
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 0, "test-pod-1": 0, "test-pod-2": 0,
},
metricName: "qps",
targetTimestamp: 0,
reportedMetricsPoints: [][]metricPoint{{{0, 0}}, {{0, 0}}, {{0, 0}}},
}
tc.runTest(t)
}
func TestCPUMoreMetrics(t *testing.T) {
tc := testCase{
replicas: 5,
desiredResourceValues: PodResourceInfo{
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
"test-pod-3": 5000, "test-pod-4": 5000,
},
resourceName: v1.ResourceCPU,
targetTimestamp: 10,
reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}},
}
tc.runTest(t)
}
func TestCPUMissingMetrics(t *testing.T) {
tc := testCase{
replicas: 3,
desiredResourceValues: PodResourceInfo{
"test-pod-0": 4000,
},
resourceName: v1.ResourceCPU,
reportedPodMetrics: [][]int64{{4000}},
}
tc.runTest(t)
}
func TestQpsMissingMetrics(t *testing.T) {
tc := testCase{
replicas: 3,
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 1"),
metricName: "qps",
targetTimestamp: 1,
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}},
}
tc.runTest(t)
}
func TestQpsSuperfluousMetrics(t *testing.T) {
tc := testCase{
replicas: 3,
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 6"),
metricName: "qps",
reportedMetricsPoints: [][]metricPoint{{{1000, 1}}, {{2000, 4}}, {{2000, 1}}, {{4000, 5}}, {{2000, 1}}, {{4000, 4}}},
}
tc.runTest(t)
}
func TestCPUEmptyMetrics(t *testing.T) {
tc := testCase{
replicas: 3,
resourceName: v1.ResourceCPU,
desiredError: fmt.Errorf("no metrics returned from heapster"),
reportedMetricsPoints: [][]metricPoint{},
reportedPodMetrics: [][]int64{},
}
tc.runTest(t)
}
func TestQpsEmptyEntries(t *testing.T) {
tc := testCase{
replicas: 3,
metricName: "qps",
desiredMetricValues: PodMetricsInfo{
"test-pod-0": 4000, "test-pod-2": 2000,
},
targetTimestamp: 4,
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}, {}, {{2000, 4}}},
}
tc.runTest(t)
}
func TestCPUZeroReplicas(t *testing.T) {
tc := testCase{
replicas: 0,
resourceName: v1.ResourceCPU,
desiredError: fmt.Errorf("no metrics returned from heapster"),
reportedPodMetrics: [][]int64{},
}
tc.runTest(t)
}
func TestCPUEmptyMetricsForOnePod(t *testing.T) {
tc := testCase{
replicas: 3,
resourceName: v1.ResourceCPU,
desiredResourceValues: PodResourceInfo{
"test-pod-0": 100, "test-pod-1": 700,
},
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
}
tc.runTest(t)
}
func testCollapseTimeSamples(t *testing.T) {
now := time.Now()
metrics := heapster.MetricResult{
Metrics: []heapster.MetricPoint{
{Timestamp: now, Value: 50, FloatValue: nil},
{Timestamp: now.Add(-15 * time.Second), Value: 100, FloatValue: nil},
{Timestamp: now.Add(-60 * time.Second), Value: 100000, FloatValue: nil}},
LatestTimestamp: now,
}
val, timestamp, hadMetrics := collapseTimeSamples(metrics, time.Minute)
assert.True(t, hadMetrics, "should report that it received a populated list of metrics")
assert.InEpsilon(t, float64(75), val, 0.1, "collapsed sample value should be as expected")
assert.True(t, timestamp.Equal(now), "timestamp should be the current time (the newest)")
}

View file

@ -0,0 +1,64 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"fmt"
)
// GetResourceUtilizationRatio takes in a set of metrics, a set of matching requests,
// and a target utilization percentage, and calcuates the the ratio of
// desired to actual utilization (returning that and the actual utilization)
func GetResourceUtilizationRatio(metrics PodResourceInfo, requests map[string]int64, targetUtilization int32) (float64, int32, error) {
metricsTotal := int64(0)
requestsTotal := int64(0)
for podName, metricValue := range metrics {
request, hasRequest := requests[podName]
if !hasRequest {
// we check for missing requests elsewhere, so assuming missing requests == extraneous metrics
continue
}
metricsTotal += metricValue
requestsTotal += request
}
// if the set of requests is completely disjoint from the set of metrics,
// then we could have an issue where the requests total is zero
if requestsTotal == 0 {
return 0, 0, fmt.Errorf("no metrics returned matched known pods")
}
currentUtilization := int32((metricsTotal * 100) / requestsTotal)
return float64(currentUtilization) / float64(targetUtilization), currentUtilization, nil
}
// GetMetricUtilizationRatio takes in a set of metrics and a target utilization value,
// and calcuates the ratio of desired to actual utilization
// (returning that and the actual utilization)
func GetMetricUtilizationRatio(metrics PodMetricsInfo, targetUtilization float64) (float64, float64) {
metricsTotal := float64(0)
for _, metricValue := range metrics {
metricsTotal += metricValue
}
currentUtilization := metricsTotal / float64(len(metrics))
return currentUtilization / targetUtilization, currentUtilization
}