Switch to github.com/golang/dep for vendoring

Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
Mrunal Patel 2017-01-31 16:45:59 -08:00
parent d6ab91be27
commit 8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions

54
vendor/k8s.io/kubernetes/pkg/kubelet/qos/BUILD generated vendored Normal file
View file

@ -0,0 +1,54 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"policy.go",
"qos.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
],
)
go_test(
name = "go_default_test",
srcs = [
"policy_test.go",
"qos_test.go",
],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/kubelet/types:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

25
vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go generated vendored Normal file
View file

@ -0,0 +1,25 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// package qos contains helper functions for quality of service.
// For each resource (memory, CPU) Kubelet supports three classes of containers.
// Memory guaranteed containers will receive the highest priority and will get all the resources
// they need.
// Burstable containers will be guaranteed their request and can “burst” and use more resources
// when available.
// Best-Effort containers, which dont specify a request, can use resources only if not being used
// by other pods.
package qos // import "k8s.io/kubernetes/pkg/kubelet/qos"

79
vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go generated vendored Normal file
View file

@ -0,0 +1,79 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package qos
import (
"k8s.io/kubernetes/pkg/api/v1"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
const (
// PodInfraOOMAdj is very docker specific. For arbitrary runtime, it may not make
// sense to set sandbox level oom score, e.g. a sandbox could only be a namespace
// without a process.
// TODO: Handle infra container oom score adj in a runtime agnostic way.
// TODO: Should handle critical pod oom score adj with a proper preemption priority.
// This is the workaround for https://github.com/kubernetes/kubernetes/issues/38322.
PodInfraOOMAdj int = -998
CriticalPodOOMAdj int = -998
KubeletOOMScoreAdj int = -999
DockerOOMScoreAdj int = -999
KubeProxyOOMScoreAdj int = -999
guaranteedOOMScoreAdj int = -998
besteffortOOMScoreAdj int = 1000
)
// GetContainerOOMAdjust returns the amount by which the OOM score of all processes in the
// container should be adjusted.
// The OOM score of a process is the percentage of memory it consumes
// multiplied by 10 (barring exceptional cases) + a configurable quantity which is between -1000
// and 1000. Containers with higher OOM scores are killed if the system runs out of memory.
// See https://lwn.net/Articles/391222/ for more information.
func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapacity int64) int {
if kubetypes.IsCriticalPod(pod) {
return CriticalPodOOMAdj
}
switch GetPodQOS(pod) {
case v1.PodQOSGuaranteed:
// Guaranteed containers should be the last to get killed.
return guaranteedOOMScoreAdj
case v1.PodQOSBestEffort:
return besteffortOOMScoreAdj
}
// Burstable containers are a middle tier, between Guaranteed and Best-Effort. Ideally,
// we want to protect Burstable containers that consume less memory than requested.
// The formula below is a heuristic. A container requesting for 10% of a system's
// memory will have an OOM score adjust of 900. If a process in container Y
// uses over 10% of memory, its OOM score will be 1000. The idea is that containers
// which use more than their request will have an OOM score of 1000 and will be prime
// targets for OOM kills.
// Note that this is a heuristic, it won't work if a container has many small processes.
memoryRequest := container.Resources.Requests.Memory().Value()
oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity
// A guaranteed pod using 100% of memory can have an OOM score of 10. Ensure
// that burstable pods have a higher OOM score adjustment.
if int(oomScoreAdjust) < (1000 + guaranteedOOMScoreAdj) {
return (1000 + guaranteedOOMScoreAdj)
}
// Give burstable pods a higher chance of survival over besteffort pods.
if int(oomScoreAdjust) == besteffortOOMScoreAdj {
return int(oomScoreAdjust - 1)
}
return int(oomScoreAdjust)
}

224
vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy_test.go generated vendored Normal file
View file

@ -0,0 +1,224 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package qos
import (
"strconv"
"testing"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
const (
standardMemoryAmount = 8000000000
)
var (
cpuLimit = v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
},
},
},
},
},
}
memoryLimitCPURequest = v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("0"),
},
Limits: v1.ResourceList{
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
},
},
},
},
},
}
zeroMemoryLimit = v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceName(v1.ResourceMemory): resource.MustParse("0"),
},
},
},
},
},
}
noRequestLimit = v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{},
},
},
},
}
equalRequestLimitCPUMemory = v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
v1.ResourceName(v1.ResourceCPU): resource.MustParse("5m"),
},
Limits: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("5m"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
},
},
},
},
},
}
cpuUnlimitedMemoryLimitedWithRequests = v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceMemory): resource.MustParse(strconv.Itoa(standardMemoryAmount / 2)),
v1.ResourceName(v1.ResourceCPU): resource.MustParse("5m"),
},
Limits: v1.ResourceList{
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
},
},
},
},
},
}
requestNoLimit = v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceMemory): resource.MustParse(strconv.Itoa(standardMemoryAmount - 1)),
v1.ResourceName(v1.ResourceCPU): resource.MustParse("5m"),
},
},
},
},
},
}
criticalPodWithNoLimit = v1.Pod{
ObjectMeta: v1.ObjectMeta{
Annotations: map[string]string{
kubetypes.CriticalPodAnnotationKey: "",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceMemory): resource.MustParse(strconv.Itoa(standardMemoryAmount - 1)),
v1.ResourceName(v1.ResourceCPU): resource.MustParse("5m"),
},
},
},
},
},
}
)
type oomTest struct {
pod *v1.Pod
memoryCapacity int64
lowOOMScoreAdj int // The max oom_score_adj score the container should be assigned.
highOOMScoreAdj int // The min oom_score_adj score the container should be assigned.
}
func TestGetContainerOOMScoreAdjust(t *testing.T) {
oomTests := []oomTest{
{
pod: &cpuLimit,
memoryCapacity: 4000000000,
lowOOMScoreAdj: 999,
highOOMScoreAdj: 999,
},
{
pod: &memoryLimitCPURequest,
memoryCapacity: 8000000000,
lowOOMScoreAdj: 999,
highOOMScoreAdj: 999,
},
{
pod: &zeroMemoryLimit,
memoryCapacity: 7230457451,
lowOOMScoreAdj: 1000,
highOOMScoreAdj: 1000,
},
{
pod: &noRequestLimit,
memoryCapacity: 4000000000,
lowOOMScoreAdj: 1000,
highOOMScoreAdj: 1000,
},
{
pod: &equalRequestLimitCPUMemory,
memoryCapacity: 123456789,
lowOOMScoreAdj: -998,
highOOMScoreAdj: -998,
},
{
pod: &cpuUnlimitedMemoryLimitedWithRequests,
memoryCapacity: standardMemoryAmount,
lowOOMScoreAdj: 495,
highOOMScoreAdj: 505,
},
{
pod: &requestNoLimit,
memoryCapacity: standardMemoryAmount,
lowOOMScoreAdj: 2,
highOOMScoreAdj: 2,
},
{
pod: &criticalPodWithNoLimit,
memoryCapacity: standardMemoryAmount,
lowOOMScoreAdj: -998,
highOOMScoreAdj: -998,
},
}
for _, test := range oomTests {
oomScoreAdj := GetContainerOOMScoreAdjust(test.pod, &test.pod.Spec.Containers[0], test.memoryCapacity)
if oomScoreAdj < test.lowOOMScoreAdj || oomScoreAdj > test.highOOMScoreAdj {
t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOOMScoreAdj, test.highOOMScoreAdj, oomScoreAdj)
}
}
}

213
vendor/k8s.io/kubernetes/pkg/kubelet/qos/qos.go generated vendored Normal file
View file

@ -0,0 +1,213 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package qos
import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
)
// isResourceGuaranteed returns true if the container's resource requirements are Guaranteed.
func isResourceGuaranteed(container *v1.Container, resource v1.ResourceName) bool {
// A container resource is guaranteed if its request == limit.
// If request == limit, the user is very confident of resource consumption.
req, hasReq := container.Resources.Requests[resource]
limit, hasLimit := container.Resources.Limits[resource]
if !hasReq || !hasLimit {
return false
}
return req.Cmp(limit) == 0 && req.Value() != 0
}
// isResourceBestEffort returns true if the container's resource requirements are best-effort.
func isResourceBestEffort(container *v1.Container, resource v1.ResourceName) bool {
// A container resource is best-effort if its request is unspecified or 0.
// If a request is specified, then the user expects some kind of resource guarantee.
req, hasReq := container.Resources.Requests[resource]
return !hasReq || req.Value() == 0
}
// GetPodQOS returns the QoS class of a pod.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
func GetPodQOS(pod *v1.Pod) v1.PodQOSClass {
requests := v1.ResourceList{}
limits := v1.ResourceList{}
zeroQuantity := resource.MustParse("0")
isGuaranteed := true
for _, container := range pod.Spec.Containers {
// process requests
for name, quantity := range container.Resources.Requests {
if !supportedQoSComputeResources.Has(string(name)) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.Copy()
if _, exists := requests[name]; !exists {
requests[name] = *delta
} else {
delta.Add(requests[name])
requests[name] = *delta
}
}
}
// process limits
qosLimitsFound := sets.NewString()
for name, quantity := range container.Resources.Limits {
if !supportedQoSComputeResources.Has(string(name)) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosLimitsFound.Insert(string(name))
delta := quantity.Copy()
if _, exists := limits[name]; !exists {
limits[name] = *delta
} else {
delta.Add(limits[name])
limits[name] = *delta
}
}
}
if len(qosLimitsFound) != len(supportedQoSComputeResources) {
isGuaranteed = false
}
}
if len(requests) == 0 && len(limits) == 0 {
return v1.PodQOSBestEffort
}
// Check is requests match limits for all resources.
if isGuaranteed {
for name, req := range requests {
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
isGuaranteed = false
break
}
}
}
if isGuaranteed &&
len(requests) == len(limits) {
return v1.PodQOSGuaranteed
}
return v1.PodQOSBurstable
}
// InternalGetPodQOS returns the QoS class of a pod.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
func InternalGetPodQOS(pod *api.Pod) api.PodQOSClass {
requests := api.ResourceList{}
limits := api.ResourceList{}
zeroQuantity := resource.MustParse("0")
isGuaranteed := true
for _, container := range pod.Spec.Containers {
// process requests
for name, quantity := range container.Resources.Requests {
if !supportedQoSComputeResources.Has(string(name)) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.Copy()
if _, exists := requests[name]; !exists {
requests[name] = *delta
} else {
delta.Add(requests[name])
requests[name] = *delta
}
}
}
// process limits
qosLimitsFound := sets.NewString()
for name, quantity := range container.Resources.Limits {
if !supportedQoSComputeResources.Has(string(name)) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosLimitsFound.Insert(string(name))
delta := quantity.Copy()
if _, exists := limits[name]; !exists {
limits[name] = *delta
} else {
delta.Add(limits[name])
limits[name] = *delta
}
}
}
if len(qosLimitsFound) != len(supportedQoSComputeResources) {
isGuaranteed = false
}
}
if len(requests) == 0 && len(limits) == 0 {
return api.PodQOSBestEffort
}
// Check is requests match limits for all resources.
if isGuaranteed {
for name, req := range requests {
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
isGuaranteed = false
break
}
}
}
if isGuaranteed &&
len(requests) == len(limits) {
return api.PodQOSGuaranteed
}
return api.PodQOSBurstable
}
// QOSList is a set of (resource name, QoS class) pairs.
type QOSList map[v1.ResourceName]v1.PodQOSClass
// GetQOS returns a mapping of resource name to QoS class of a container
func GetQOS(container *v1.Container) QOSList {
resourceToQOS := QOSList{}
for resource := range allResources(container) {
switch {
case isResourceGuaranteed(container, resource):
resourceToQOS[resource] = v1.PodQOSGuaranteed
case isResourceBestEffort(container, resource):
resourceToQOS[resource] = v1.PodQOSBestEffort
default:
resourceToQOS[resource] = v1.PodQOSBurstable
}
}
return resourceToQOS
}
// supportedComputeResources is the list of compute resources for with QoS is supported.
var supportedQoSComputeResources = sets.NewString(string(v1.ResourceCPU), string(v1.ResourceMemory))
// allResources returns a set of all possible resources whose mapped key value is true if present on the container
func allResources(container *v1.Container) map[v1.ResourceName]bool {
resources := map[v1.ResourceName]bool{}
for _, resource := range supportedQoSComputeResources.List() {
resources[v1.ResourceName(resource)] = false
}
for resource := range container.Resources.Requests {
resources[resource] = true
}
for resource := range container.Resources.Limits {
resources[resource] = true
}
return resources
}

176
vendor/k8s.io/kubernetes/pkg/kubelet/qos/qos_test.go generated vendored Normal file
View file

@ -0,0 +1,176 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package qos
import (
"testing"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
)
func getResourceList(cpu, memory string) v1.ResourceList {
res := v1.ResourceList{}
if cpu != "" {
res[v1.ResourceCPU] = resource.MustParse(cpu)
}
if memory != "" {
res[v1.ResourceMemory] = resource.MustParse(memory)
}
return res
}
func addResource(rName, value string, rl v1.ResourceList) v1.ResourceList {
rl[v1.ResourceName(rName)] = resource.MustParse(value)
return rl
}
func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
res := v1.ResourceRequirements{}
res.Requests = requests
res.Limits = limits
return res
}
func newContainer(name string, requests v1.ResourceList, limits v1.ResourceList) v1.Container {
return v1.Container{
Name: name,
Resources: getResourceRequirements(requests, limits),
}
}
func newPod(name string, containers []v1.Container) *v1.Pod {
return &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: containers,
},
}
}
func TestGetPodQOS(t *testing.T) {
testCases := []struct {
pod *v1.Pod
expected v1.PodQOSClass
}{
{
pod: newPod("guaranteed", []v1.Container{
newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
}),
expected: v1.PodQOSGuaranteed,
},
{
pod: newPod("guaranteed-with-gpu", []v1.Container{
newContainer("guaranteed", getResourceList("100m", "100Mi"), addResource("nvidia-gpu", "2", getResourceList("100m", "100Mi"))),
}),
expected: v1.PodQOSGuaranteed,
},
{
pod: newPod("guaranteed-guaranteed", []v1.Container{
newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
}),
expected: v1.PodQOSGuaranteed,
},
{
pod: newPod("guaranteed-guaranteed-with-gpu", []v1.Container{
newContainer("guaranteed", getResourceList("100m", "100Mi"), addResource("nvidia-gpu", "2", getResourceList("100m", "100Mi"))),
newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
}),
expected: v1.PodQOSGuaranteed,
},
{
pod: newPod("best-effort-best-effort", []v1.Container{
newContainer("best-effort", getResourceList("", ""), getResourceList("", "")),
newContainer("best-effort", getResourceList("", ""), getResourceList("", "")),
}),
expected: v1.PodQOSBestEffort,
},
{
pod: newPod("best-effort-best-effort-with-gpu", []v1.Container{
newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))),
newContainer("best-effort", getResourceList("", ""), getResourceList("", "")),
}),
expected: v1.PodQOSBestEffort,
},
{
pod: newPod("best-effort-with-gpu", []v1.Container{
newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))),
}),
expected: v1.PodQOSBestEffort,
},
{
pod: newPod("best-effort-burstable", []v1.Container{
newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))),
newContainer("burstable", getResourceList("1", ""), getResourceList("2", "")),
}),
expected: v1.PodQOSBurstable,
},
{
pod: newPod("best-effort-guaranteed", []v1.Container{
newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))),
newContainer("guaranteed", getResourceList("10m", "100Mi"), getResourceList("10m", "100Mi")),
}),
expected: v1.PodQOSBurstable,
},
{
pod: newPod("burstable-cpu-guaranteed-memory", []v1.Container{
newContainer("burstable", getResourceList("", "100Mi"), getResourceList("", "100Mi")),
}),
expected: v1.PodQOSBurstable,
},
{
pod: newPod("burstable-no-limits", []v1.Container{
newContainer("burstable", getResourceList("100m", "100Mi"), getResourceList("", "")),
}),
expected: v1.PodQOSBurstable,
},
{
pod: newPod("burstable-guaranteed", []v1.Container{
newContainer("burstable", getResourceList("1", "100Mi"), getResourceList("2", "100Mi")),
newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
}),
expected: v1.PodQOSBurstable,
},
{
pod: newPod("burstable-unbounded-but-requests-match-limits", []v1.Container{
newContainer("burstable", getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
newContainer("burstable-unbounded", getResourceList("100m", "100Mi"), getResourceList("", "")),
}),
expected: v1.PodQOSBurstable,
},
{
pod: newPod("burstable-1", []v1.Container{
newContainer("burstable", getResourceList("10m", "100Mi"), getResourceList("100m", "200Mi")),
}),
expected: v1.PodQOSBurstable,
},
{
pod: newPod("burstable-2", []v1.Container{
newContainer("burstable", getResourceList("0", "0"), addResource("nvidia-gpu", "2", getResourceList("100m", "200Mi"))),
}),
expected: v1.PodQOSBurstable,
},
}
for id, testCase := range testCases {
if actual := GetPodQOS(testCase.pod); testCase.expected != actual {
t.Errorf("[%d]: invalid qos pod %s, expected: %s, actual: %s", id, testCase.pod.Name, testCase.expected, actual)
}
}
}