vendor: remove dep and use vndr
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
parent
16f44674a4
commit
148e72d81e
16131 changed files with 73815 additions and 4235138 deletions
111
vendor/k8s.io/kubernetes/pkg/api/v1/BUILD
generated
vendored
111
vendor/k8s.io/kubernetes/pkg/api/v1/BUILD
generated
vendored
|
@ -1,111 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"conversion.go",
|
||||
"defaults.go",
|
||||
"doc.go",
|
||||
"generate.go",
|
||||
"generated.pb.go",
|
||||
"helpers.go",
|
||||
"meta.go",
|
||||
"ref.go",
|
||||
"register.go",
|
||||
"resource_helpers.go",
|
||||
"types.generated.go",
|
||||
"types.go",
|
||||
"types_swagger_doc_generated.go",
|
||||
"zz_generated.conversion.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
"zz_generated.defaults.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/util:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//pkg/util/parsers:go_default_library",
|
||||
"//vendor:github.com/gogo/protobuf/proto",
|
||||
"//vendor:github.com/gogo/protobuf/sortkeys",
|
||||
"//vendor:github.com/ugorji/go/codec",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/meta",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/conversion",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/selection",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/rand",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/validation/field",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_xtest",
|
||||
srcs = [
|
||||
"backward_compatibility_test.go",
|
||||
"conversion_test.go",
|
||||
"defaults_test.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/testing/compat:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/validation:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/diff",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/validation/field",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"helpers_test.go",
|
||||
"resource_helpers_test.go",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/api/v1/endpoints:all-srcs",
|
||||
"//pkg/api/v1/pod:all-srcs",
|
||||
"//pkg/api/v1/service:all-srcs",
|
||||
"//pkg/api/v1/validation:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
41
vendor/k8s.io/kubernetes/pkg/api/v1/OWNERS
generated
vendored
41
vendor/k8s.io/kubernetes/pkg/api/v1/OWNERS
generated
vendored
|
@ -1,41 +0,0 @@
|
|||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- yujuhong
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- caesarxuchao
|
||||
- vishh
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- erictune
|
||||
- davidopp
|
||||
- pmorie
|
||||
- sttts
|
||||
- kargakis
|
||||
- dchen1107
|
||||
- saad-ali
|
||||
- zmerlynn
|
||||
- luxas
|
||||
- janetkuo
|
||||
- justinsb
|
||||
- roberthbailey
|
||||
- ncdc
|
||||
- timstclair
|
||||
- eparis
|
||||
- timothysc
|
||||
- piosz
|
||||
- jsafrane
|
||||
- dims
|
||||
- errordeveloper
|
||||
- madhusudancs
|
||||
- krousey
|
||||
- jayunit100
|
||||
- rootfs
|
||||
- markturansky
|
229
vendor/k8s.io/kubernetes/pkg/api/v1/backward_compatibility_test.go
generated
vendored
229
vendor/k8s.io/kubernetes/pkg/api/v1/backward_compatibility_test.go
generated
vendored
|
@ -1,229 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testing/compat"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
)
|
||||
|
||||
func TestCompatibility_v1_PodSecurityContext(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
input string
|
||||
expectedKeys map[string]string
|
||||
absentKeys []string
|
||||
}{
|
||||
{
|
||||
name: "hostNetwork = true",
|
||||
input: `
|
||||
{
|
||||
"kind":"Pod",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"},
|
||||
"spec": {
|
||||
"hostNetwork": true,
|
||||
"containers":[{
|
||||
"name":"a",
|
||||
"image":"my-container-image"
|
||||
}]
|
||||
}
|
||||
}
|
||||
`,
|
||||
expectedKeys: map[string]string{
|
||||
"spec.hostNetwork": "true",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "hostNetwork = false",
|
||||
input: `
|
||||
{
|
||||
"kind":"Pod",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"},
|
||||
"spec": {
|
||||
"hostNetwork": false,
|
||||
"containers":[{
|
||||
"name":"a",
|
||||
"image":"my-container-image"
|
||||
}]
|
||||
}
|
||||
}
|
||||
`,
|
||||
absentKeys: []string{
|
||||
"spec.hostNetwork",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "hostIPC = true",
|
||||
input: `
|
||||
{
|
||||
"kind":"Pod",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"},
|
||||
"spec": {
|
||||
"hostIPC": true,
|
||||
"containers":[{
|
||||
"name":"a",
|
||||
"image":"my-container-image"
|
||||
}]
|
||||
}
|
||||
}
|
||||
`,
|
||||
expectedKeys: map[string]string{
|
||||
"spec.hostIPC": "true",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "hostIPC = false",
|
||||
input: `
|
||||
{
|
||||
"kind":"Pod",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"},
|
||||
"spec": {
|
||||
"hostIPC": false,
|
||||
"containers":[{
|
||||
"name":"a",
|
||||
"image":"my-container-image"
|
||||
}]
|
||||
}
|
||||
}
|
||||
`,
|
||||
absentKeys: []string{
|
||||
"spec.hostIPC",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "hostPID = true",
|
||||
input: `
|
||||
{
|
||||
"kind":"Pod",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"},
|
||||
"spec": {
|
||||
"hostPID": true,
|
||||
"containers":[{
|
||||
"name":"a",
|
||||
"image":"my-container-image"
|
||||
}]
|
||||
}
|
||||
}
|
||||
`,
|
||||
expectedKeys: map[string]string{
|
||||
"spec.hostPID": "true",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "hostPID = false",
|
||||
input: `
|
||||
{
|
||||
"kind":"Pod",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"},
|
||||
"spec": {
|
||||
"hostPID": false,
|
||||
"containers":[{
|
||||
"name":"a",
|
||||
"image":"my-container-image"
|
||||
}]
|
||||
}
|
||||
}
|
||||
`,
|
||||
absentKeys: []string{
|
||||
"spec.hostPID",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "reseting defaults for pre-v1.1 mirror pods",
|
||||
input: `
|
||||
{
|
||||
"kind":"Pod",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{
|
||||
"name":"my-pod-name",
|
||||
"namespace":"my-pod-namespace",
|
||||
"annotations": {
|
||||
"kubernetes.io/config.mirror": "mirror"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers":[{
|
||||
"name":"a",
|
||||
"image":"my-container-image",
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpu": "100m"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
`,
|
||||
absentKeys: []string{
|
||||
"spec.terminationGracePeriodSeconds",
|
||||
"spec.containers[0].resources.requests",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "preserving defaults for v1.1+ mirror pods",
|
||||
input: `
|
||||
{
|
||||
"kind":"Pod",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{
|
||||
"name":"my-pod-name",
|
||||
"namespace":"my-pod-namespace",
|
||||
"annotations": {
|
||||
"kubernetes.io/config.mirror": "cbe924f710c7e26f7693d6a341bcfad0"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers":[{
|
||||
"name":"a",
|
||||
"image":"my-container-image",
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpu": "100m"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
`,
|
||||
expectedKeys: map[string]string{
|
||||
"spec.terminationGracePeriodSeconds": "30",
|
||||
"spec.containers[0].resources.requests": "map[cpu:100m]",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
validator := func(obj runtime.Object) field.ErrorList {
|
||||
return validation.ValidatePodSpec(&(obj.(*api.Pod).Spec), field.NewPath("spec"))
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Logf("Testing 1.0.0 backward compatibility for %v", tc.name)
|
||||
compat.TestCompatibility(t, v1.SchemeGroupVersion, []byte(tc.input), validator, tc.expectedKeys, tc.absentKeys)
|
||||
}
|
||||
}
|
23
vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go
generated
vendored
|
@ -29,14 +29,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
const (
|
||||
// Annotation key used to identify mirror pods.
|
||||
mirrorAnnotationKey = "kubernetes.io/config.mirror"
|
||||
|
||||
// Value used to identify mirror pods from pre-v1.1 kubelet.
|
||||
mirrorAnnotationValue_1_0 = "mirror"
|
||||
)
|
||||
|
||||
// This is a "fast-path" that avoids reflection for common types. It focuses on the objects that are
|
||||
// converted the most in the cluster.
|
||||
// TODO: generate one of these for every external API group - this is to prove the impact
|
||||
|
@ -279,7 +271,7 @@ func Convert_v1_ReplicationController_to_extensions_ReplicaSet(in *ReplicationCo
|
|||
func Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(in *ReplicationControllerSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error {
|
||||
out.Replicas = *in.Replicas
|
||||
if in.Selector != nil {
|
||||
api.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s)
|
||||
metav1.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s)
|
||||
}
|
||||
if in.Template != nil {
|
||||
if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, &out.Template, s); err != nil {
|
||||
|
@ -322,7 +314,7 @@ func Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(in *exten
|
|||
out.MinReadySeconds = in.MinReadySeconds
|
||||
var invalidErr error
|
||||
if in.Selector != nil {
|
||||
invalidErr = api.Convert_unversioned_LabelSelector_to_map(in.Selector, &out.Selector, s)
|
||||
invalidErr = metav1.Convert_unversioned_LabelSelector_to_map(in.Selector, &out.Selector, s)
|
||||
}
|
||||
out.Template = new(PodTemplateSpec)
|
||||
if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil {
|
||||
|
@ -587,17 +579,6 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error
|
|||
out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value)
|
||||
}
|
||||
|
||||
// We need to reset certain fields for mirror pods from pre-v1.1 kubelet
|
||||
// (#15960).
|
||||
// TODO: Remove this code after we drop support for v1.0 kubelets.
|
||||
if value, ok := in.Annotations[mirrorAnnotationKey]; ok && value == mirrorAnnotationValue_1_0 {
|
||||
// Reset the TerminationGracePeriodSeconds.
|
||||
out.Spec.TerminationGracePeriodSeconds = nil
|
||||
// Reset the resource requests.
|
||||
for i := range out.Spec.Containers {
|
||||
out.Spec.Containers[i].Resources.Requests = nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
217
vendor/k8s.io/kubernetes/pkg/api/v1/conversion_test.go
generated
vendored
217
vendor/k8s.io/kubernetes/pkg/api/v1/conversion_test.go
generated
vendored
|
@ -1,217 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1_test
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
func TestPodLogOptions(t *testing.T) {
|
||||
sinceSeconds := int64(1)
|
||||
sinceTime := metav1.NewTime(time.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC).Local())
|
||||
tailLines := int64(2)
|
||||
limitBytes := int64(3)
|
||||
|
||||
versionedLogOptions := &v1.PodLogOptions{
|
||||
Container: "mycontainer",
|
||||
Follow: true,
|
||||
Previous: true,
|
||||
SinceSeconds: &sinceSeconds,
|
||||
SinceTime: &sinceTime,
|
||||
Timestamps: true,
|
||||
TailLines: &tailLines,
|
||||
LimitBytes: &limitBytes,
|
||||
}
|
||||
unversionedLogOptions := &api.PodLogOptions{
|
||||
Container: "mycontainer",
|
||||
Follow: true,
|
||||
Previous: true,
|
||||
SinceSeconds: &sinceSeconds,
|
||||
SinceTime: &sinceTime,
|
||||
Timestamps: true,
|
||||
TailLines: &tailLines,
|
||||
LimitBytes: &limitBytes,
|
||||
}
|
||||
expectedParameters := url.Values{
|
||||
"container": {"mycontainer"},
|
||||
"follow": {"true"},
|
||||
"previous": {"true"},
|
||||
"sinceSeconds": {"1"},
|
||||
"sinceTime": {"2000-01-01T12:34:56Z"},
|
||||
"timestamps": {"true"},
|
||||
"tailLines": {"2"},
|
||||
"limitBytes": {"3"},
|
||||
}
|
||||
|
||||
codec := runtime.NewParameterCodec(api.Scheme)
|
||||
|
||||
// unversioned -> query params
|
||||
{
|
||||
actualParameters, err := codec.EncodeParameters(unversionedLogOptions, v1.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(actualParameters, expectedParameters) {
|
||||
t.Fatalf("Expected\n%#v\ngot\n%#v", expectedParameters, actualParameters)
|
||||
}
|
||||
}
|
||||
|
||||
// versioned -> query params
|
||||
{
|
||||
actualParameters, err := codec.EncodeParameters(versionedLogOptions, v1.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(actualParameters, expectedParameters) {
|
||||
t.Fatalf("Expected\n%#v\ngot\n%#v", expectedParameters, actualParameters)
|
||||
}
|
||||
}
|
||||
|
||||
// query params -> versioned
|
||||
{
|
||||
convertedLogOptions := &v1.PodLogOptions{}
|
||||
err := codec.DecodeParameters(expectedParameters, v1.SchemeGroupVersion, convertedLogOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(convertedLogOptions, versionedLogOptions) {
|
||||
t.Fatalf("Unexpected deserialization:\n%s", diff.ObjectGoPrintSideBySide(versionedLogOptions, convertedLogOptions))
|
||||
}
|
||||
}
|
||||
|
||||
// query params -> unversioned
|
||||
{
|
||||
convertedLogOptions := &api.PodLogOptions{}
|
||||
err := codec.DecodeParameters(expectedParameters, v1.SchemeGroupVersion, convertedLogOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(convertedLogOptions, unversionedLogOptions) {
|
||||
t.Fatalf("Unexpected deserialization:\n%s", diff.ObjectGoPrintSideBySide(unversionedLogOptions, convertedLogOptions))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPodSpecConversion tests that ServiceAccount is an alias for
|
||||
// ServiceAccountName.
|
||||
func TestPodSpecConversion(t *testing.T) {
|
||||
name, other := "foo", "bar"
|
||||
|
||||
// Test internal -> v1. Should have both alias (DeprecatedServiceAccount)
|
||||
// and new field (ServiceAccountName).
|
||||
i := &api.PodSpec{
|
||||
ServiceAccountName: name,
|
||||
}
|
||||
v := v1.PodSpec{}
|
||||
if err := api.Scheme.Convert(i, &v, nil); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if v.ServiceAccountName != name {
|
||||
t.Fatalf("want v1.ServiceAccountName %q, got %q", name, v.ServiceAccountName)
|
||||
}
|
||||
if v.DeprecatedServiceAccount != name {
|
||||
t.Fatalf("want v1.DeprecatedServiceAccount %q, got %q", name, v.DeprecatedServiceAccount)
|
||||
}
|
||||
|
||||
// Test v1 -> internal. Either DeprecatedServiceAccount, ServiceAccountName,
|
||||
// or both should translate to ServiceAccountName. ServiceAccountName wins
|
||||
// if both are set.
|
||||
testCases := []*v1.PodSpec{
|
||||
// New
|
||||
{ServiceAccountName: name},
|
||||
// Alias
|
||||
{DeprecatedServiceAccount: name},
|
||||
// Both: same
|
||||
{ServiceAccountName: name, DeprecatedServiceAccount: name},
|
||||
// Both: different
|
||||
{ServiceAccountName: name, DeprecatedServiceAccount: other},
|
||||
}
|
||||
for k, v := range testCases {
|
||||
got := api.PodSpec{}
|
||||
err := api.Scheme.Convert(v, &got, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error for case %d: %v", k, err)
|
||||
}
|
||||
if got.ServiceAccountName != name {
|
||||
t.Fatalf("want api.ServiceAccountName %q, got %q", name, got.ServiceAccountName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceListConversion(t *testing.T) {
|
||||
bigMilliQuantity := resource.NewQuantity(resource.MaxMilliValue, resource.DecimalSI)
|
||||
bigMilliQuantity.Add(resource.MustParse("12345m"))
|
||||
|
||||
tests := []struct {
|
||||
input v1.ResourceList
|
||||
expected api.ResourceList
|
||||
}{
|
||||
{ // No changes necessary.
|
||||
input: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("30M"),
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceStorage: resource.MustParse("1G"),
|
||||
},
|
||||
expected: api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("30M"),
|
||||
api.ResourceCPU: resource.MustParse("100m"),
|
||||
api.ResourceStorage: resource.MustParse("1G"),
|
||||
},
|
||||
},
|
||||
{ // Nano-scale values should be rounded up to milli-scale.
|
||||
input: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3.000023m"),
|
||||
v1.ResourceMemory: resource.MustParse("500.000050m"),
|
||||
},
|
||||
expected: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4m"),
|
||||
api.ResourceMemory: resource.MustParse("501m"),
|
||||
},
|
||||
},
|
||||
{ // Large values should still be accurate.
|
||||
input: v1.ResourceList{
|
||||
v1.ResourceCPU: *bigMilliQuantity.Copy(),
|
||||
v1.ResourceStorage: *bigMilliQuantity.Copy(),
|
||||
},
|
||||
expected: api.ResourceList{
|
||||
api.ResourceCPU: *bigMilliQuantity.Copy(),
|
||||
api.ResourceStorage: *bigMilliQuantity.Copy(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
output := api.ResourceList{}
|
||||
err := api.Scheme.Convert(&test.input, &output, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error for case %d: %v", i, err)
|
||||
}
|
||||
if !api.Semantic.DeepEqual(test.expected, output) {
|
||||
t.Errorf("unexpected conversion for case %d: Expected %+v; Got %+v", i, test.expected, output)
|
||||
}
|
||||
}
|
||||
}
|
37
vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go
generated
vendored
37
vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go
generated
vendored
|
@ -18,8 +18,8 @@ package v1
|
|||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/parsers"
|
||||
)
|
||||
|
||||
|
@ -39,6 +39,7 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
|||
SetDefaults_SecretVolumeSource,
|
||||
SetDefaults_ConfigMapVolumeSource,
|
||||
SetDefaults_DownwardAPIVolumeSource,
|
||||
SetDefaults_ProjectedVolumeSource,
|
||||
SetDefaults_Secret,
|
||||
SetDefaults_PersistentVolume,
|
||||
SetDefaults_PersistentVolumeClaim,
|
||||
|
@ -159,6 +160,18 @@ func SetDefaults_Pod(obj *Pod) {
|
|||
}
|
||||
}
|
||||
}
|
||||
for i := range obj.Spec.InitContainers {
|
||||
if obj.Spec.InitContainers[i].Resources.Limits != nil {
|
||||
if obj.Spec.InitContainers[i].Resources.Requests == nil {
|
||||
obj.Spec.InitContainers[i].Resources.Requests = make(ResourceList)
|
||||
}
|
||||
for key, value := range obj.Spec.InitContainers[i].Resources.Limits {
|
||||
if _, exists := obj.Spec.InitContainers[i].Resources.Requests[key]; !exists {
|
||||
obj.Spec.InitContainers[i].Resources.Requests[key] = *(value.Copy())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func SetDefaults_PodSpec(obj *PodSpec) {
|
||||
if obj.DNSPolicy == "" {
|
||||
|
@ -169,6 +182,7 @@ func SetDefaults_PodSpec(obj *PodSpec) {
|
|||
}
|
||||
if obj.HostNetwork {
|
||||
defaultHostNetworkPorts(&obj.Containers)
|
||||
defaultHostNetworkPorts(&obj.InitContainers)
|
||||
}
|
||||
if obj.SecurityContext == nil {
|
||||
obj.SecurityContext = &PodSecurityContext{}
|
||||
|
@ -218,6 +232,12 @@ func SetDefaults_Secret(obj *Secret) {
|
|||
obj.Type = SecretTypeOpaque
|
||||
}
|
||||
}
|
||||
func SetDefaults_ProjectedVolumeSource(obj *ProjectedVolumeSource) {
|
||||
if obj.DefaultMode == nil {
|
||||
perm := int32(ProjectedVolumeSourceDefaultMode)
|
||||
obj.DefaultMode = &perm
|
||||
}
|
||||
}
|
||||
func SetDefaults_PersistentVolume(obj *PersistentVolume) {
|
||||
if obj.Status.Phase == "" {
|
||||
obj.Status.Phase = VolumePending
|
||||
|
@ -352,3 +372,18 @@ func SetDefaults_RBDVolumeSource(obj *RBDVolumeSource) {
|
|||
obj.Keyring = "/etc/ceph/keyring"
|
||||
}
|
||||
}
|
||||
|
||||
func SetDefaults_ScaleIOVolumeSource(obj *ScaleIOVolumeSource) {
|
||||
if obj.ProtectionDomain == "" {
|
||||
obj.ProtectionDomain = "default"
|
||||
}
|
||||
if obj.StoragePool == "" {
|
||||
obj.StoragePool = "default"
|
||||
}
|
||||
if obj.StorageMode == "" {
|
||||
obj.StorageMode = "ThinProvisioned"
|
||||
}
|
||||
if obj.FSType == "" {
|
||||
obj.FSType = "xfs"
|
||||
}
|
||||
}
|
||||
|
|
818
vendor/k8s.io/kubernetes/pkg/api/v1/defaults_test.go
generated
vendored
818
vendor/k8s.io/kubernetes/pkg/api/v1/defaults_test.go
generated
vendored
|
@ -1,818 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func roundTrip(t *testing.T, obj runtime.Object) runtime.Object {
|
||||
codec := api.Codecs.LegacyCodec(v1.SchemeGroupVersion)
|
||||
data, err := runtime.Encode(codec, obj)
|
||||
if err != nil {
|
||||
t.Errorf("%v\n %#v", err, obj)
|
||||
return nil
|
||||
}
|
||||
obj2, err := runtime.Decode(codec, data)
|
||||
if err != nil {
|
||||
t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj)
|
||||
return nil
|
||||
}
|
||||
obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object)
|
||||
err = api.Scheme.Convert(obj2, obj3, nil)
|
||||
if err != nil {
|
||||
t.Errorf("%v\nSource: %#v", err, obj2)
|
||||
return nil
|
||||
}
|
||||
return obj3
|
||||
}
|
||||
|
||||
func TestSetDefaultReplicationController(t *testing.T) {
|
||||
tests := []struct {
|
||||
rc *v1.ReplicationController
|
||||
expectLabels bool
|
||||
expectSelector bool
|
||||
}{
|
||||
{
|
||||
rc: &v1.ReplicationController{
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectLabels: true,
|
||||
expectSelector: true,
|
||||
},
|
||||
{
|
||||
rc: &v1.ReplicationController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"bar": "foo",
|
||||
},
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectLabels: false,
|
||||
expectSelector: true,
|
||||
},
|
||||
{
|
||||
rc: &v1.ReplicationController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"bar": "foo",
|
||||
},
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{
|
||||
"some": "other",
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectLabels: false,
|
||||
expectSelector: false,
|
||||
},
|
||||
{
|
||||
rc: &v1.ReplicationController{
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{
|
||||
"some": "other",
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectLabels: true,
|
||||
expectSelector: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
rc := test.rc
|
||||
obj2 := roundTrip(t, runtime.Object(rc))
|
||||
rc2, ok := obj2.(*v1.ReplicationController)
|
||||
if !ok {
|
||||
t.Errorf("unexpected object: %v", rc2)
|
||||
t.FailNow()
|
||||
}
|
||||
if test.expectSelector != reflect.DeepEqual(rc2.Spec.Selector, rc2.Spec.Template.Labels) {
|
||||
if test.expectSelector {
|
||||
t.Errorf("expected: %v, got: %v", rc2.Spec.Template.Labels, rc2.Spec.Selector)
|
||||
} else {
|
||||
t.Errorf("unexpected equality: %v", rc.Spec.Selector)
|
||||
}
|
||||
}
|
||||
if test.expectLabels != reflect.DeepEqual(rc2.Labels, rc2.Spec.Template.Labels) {
|
||||
if test.expectLabels {
|
||||
t.Errorf("expected: %v, got: %v", rc2.Spec.Template.Labels, rc2.Labels)
|
||||
} else {
|
||||
t.Errorf("unexpected equality: %v", rc.Labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newInt(val int32) *int32 {
|
||||
p := new(int32)
|
||||
*p = val
|
||||
return p
|
||||
}
|
||||
|
||||
func TestSetDefaultReplicationControllerReplicas(t *testing.T) {
|
||||
tests := []struct {
|
||||
rc v1.ReplicationController
|
||||
expectReplicas int32
|
||||
}{
|
||||
{
|
||||
rc: v1.ReplicationController{
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectReplicas: 1,
|
||||
},
|
||||
{
|
||||
rc: v1.ReplicationController{
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: newInt(0),
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectReplicas: 0,
|
||||
},
|
||||
{
|
||||
rc: v1.ReplicationController{
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: newInt(3),
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectReplicas: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
rc := &test.rc
|
||||
obj2 := roundTrip(t, runtime.Object(rc))
|
||||
rc2, ok := obj2.(*v1.ReplicationController)
|
||||
if !ok {
|
||||
t.Errorf("unexpected object: %v", rc2)
|
||||
t.FailNow()
|
||||
}
|
||||
if rc2.Spec.Replicas == nil {
|
||||
t.Errorf("unexpected nil Replicas")
|
||||
} else if test.expectReplicas != *rc2.Spec.Replicas {
|
||||
t.Errorf("expected: %d replicas, got: %d", test.expectReplicas, *rc2.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultReplicationControllerImagePullPolicy(t *testing.T) {
|
||||
containersWithoutPullPolicy, _ := json.Marshal([]map[string]interface{}{
|
||||
{
|
||||
"name": "install",
|
||||
"image": "busybox:latest",
|
||||
},
|
||||
})
|
||||
|
||||
containersWithPullPolicy, _ := json.Marshal([]map[string]interface{}{
|
||||
{
|
||||
"name": "install",
|
||||
"imagePullPolicy": "IfNotPresent",
|
||||
},
|
||||
})
|
||||
|
||||
tests := []struct {
|
||||
rc v1.ReplicationController
|
||||
expectPullPolicy v1.PullPolicy
|
||||
}{
|
||||
{
|
||||
rc: v1.ReplicationController{
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
"pod.beta.kubernetes.io/init-containers": string(containersWithoutPullPolicy),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectPullPolicy: v1.PullAlways,
|
||||
},
|
||||
{
|
||||
rc: v1.ReplicationController{
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
"pod.beta.kubernetes.io/init-containers": string(containersWithPullPolicy),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectPullPolicy: v1.PullIfNotPresent,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
rc := &test.rc
|
||||
obj2 := roundTrip(t, runtime.Object(rc))
|
||||
rc2, ok := obj2.(*v1.ReplicationController)
|
||||
if !ok {
|
||||
t.Errorf("unexpected object: %v", rc2)
|
||||
t.FailNow()
|
||||
}
|
||||
if test.expectPullPolicy != rc2.Spec.Template.Spec.InitContainers[0].ImagePullPolicy {
|
||||
t.Errorf("expected ImagePullPolicy: %s, got: %s",
|
||||
test.expectPullPolicy,
|
||||
rc2.Spec.Template.Spec.InitContainers[0].ImagePullPolicy,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultService(t *testing.T) {
|
||||
svc := &v1.Service{}
|
||||
obj2 := roundTrip(t, runtime.Object(svc))
|
||||
svc2 := obj2.(*v1.Service)
|
||||
if svc2.Spec.SessionAffinity != v1.ServiceAffinityNone {
|
||||
t.Errorf("Expected default session affinity type:%s, got: %s", v1.ServiceAffinityNone, svc2.Spec.SessionAffinity)
|
||||
}
|
||||
if svc2.Spec.Type != v1.ServiceTypeClusterIP {
|
||||
t.Errorf("Expected default type:%s, got: %s", v1.ServiceTypeClusterIP, svc2.Spec.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultSecretVolumeSource(t *testing.T) {
|
||||
s := v1.PodSpec{}
|
||||
s.Volumes = []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
Spec: s,
|
||||
}
|
||||
output := roundTrip(t, runtime.Object(pod))
|
||||
pod2 := output.(*v1.Pod)
|
||||
defaultMode := pod2.Spec.Volumes[0].VolumeSource.Secret.DefaultMode
|
||||
expectedMode := v1.SecretVolumeSourceDefaultMode
|
||||
|
||||
if defaultMode == nil || *defaultMode != expectedMode {
|
||||
t.Errorf("Expected secret DefaultMode %v, got %v", expectedMode, defaultMode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultConfigMapVolumeSource(t *testing.T) {
|
||||
s := v1.PodSpec{}
|
||||
s.Volumes = []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
Spec: s,
|
||||
}
|
||||
output := roundTrip(t, runtime.Object(pod))
|
||||
pod2 := output.(*v1.Pod)
|
||||
defaultMode := pod2.Spec.Volumes[0].VolumeSource.ConfigMap.DefaultMode
|
||||
expectedMode := v1.ConfigMapVolumeSourceDefaultMode
|
||||
|
||||
if defaultMode == nil || *defaultMode != expectedMode {
|
||||
t.Errorf("Expected ConfigMap DefaultMode %v, got %v", expectedMode, defaultMode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultDownwardAPIVolumeSource(t *testing.T) {
|
||||
s := v1.PodSpec{}
|
||||
s.Volumes = []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
DownwardAPI: &v1.DownwardAPIVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
Spec: s,
|
||||
}
|
||||
output := roundTrip(t, runtime.Object(pod))
|
||||
pod2 := output.(*v1.Pod)
|
||||
defaultMode := pod2.Spec.Volumes[0].VolumeSource.DownwardAPI.DefaultMode
|
||||
expectedMode := v1.DownwardAPIVolumeSourceDefaultMode
|
||||
|
||||
if defaultMode == nil || *defaultMode != expectedMode {
|
||||
t.Errorf("Expected DownwardAPI DefaultMode %v, got %v", expectedMode, defaultMode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultSecret(t *testing.T) {
|
||||
s := &v1.Secret{}
|
||||
obj2 := roundTrip(t, runtime.Object(s))
|
||||
s2 := obj2.(*v1.Secret)
|
||||
|
||||
if s2.Type != v1.SecretTypeOpaque {
|
||||
t.Errorf("Expected secret type %v, got %v", v1.SecretTypeOpaque, s2.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultPersistentVolume(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{}
|
||||
obj2 := roundTrip(t, runtime.Object(pv))
|
||||
pv2 := obj2.(*v1.PersistentVolume)
|
||||
|
||||
if pv2.Status.Phase != v1.VolumePending {
|
||||
t.Errorf("Expected volume phase %v, got %v", v1.VolumePending, pv2.Status.Phase)
|
||||
}
|
||||
if pv2.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimRetain {
|
||||
t.Errorf("Expected pv reclaim policy %v, got %v", v1.PersistentVolumeReclaimRetain, pv2.Spec.PersistentVolumeReclaimPolicy)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultPersistentVolumeClaim(t *testing.T) {
|
||||
pvc := &v1.PersistentVolumeClaim{}
|
||||
obj2 := roundTrip(t, runtime.Object(pvc))
|
||||
pvc2 := obj2.(*v1.PersistentVolumeClaim)
|
||||
|
||||
if pvc2.Status.Phase != v1.ClaimPending {
|
||||
t.Errorf("Expected claim phase %v, got %v", v1.ClaimPending, pvc2.Status.Phase)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaulEndpointsProtocol(t *testing.T) {
|
||||
in := &v1.Endpoints{Subsets: []v1.EndpointSubset{
|
||||
{Ports: []v1.EndpointPort{{}, {Protocol: "UDP"}, {}}},
|
||||
}}
|
||||
obj := roundTrip(t, runtime.Object(in))
|
||||
out := obj.(*v1.Endpoints)
|
||||
|
||||
for i := range out.Subsets {
|
||||
for j := range out.Subsets[i].Ports {
|
||||
if in.Subsets[i].Ports[j].Protocol == "" {
|
||||
if out.Subsets[i].Ports[j].Protocol != v1.ProtocolTCP {
|
||||
t.Errorf("Expected protocol %s, got %s", v1.ProtocolTCP, out.Subsets[i].Ports[j].Protocol)
|
||||
}
|
||||
} else {
|
||||
if out.Subsets[i].Ports[j].Protocol != in.Subsets[i].Ports[j].Protocol {
|
||||
t.Errorf("Expected protocol %s, got %s", in.Subsets[i].Ports[j].Protocol, out.Subsets[i].Ports[j].Protocol)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaulServiceTargetPort(t *testing.T) {
|
||||
in := &v1.Service{Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1234}}}}
|
||||
obj := roundTrip(t, runtime.Object(in))
|
||||
out := obj.(*v1.Service)
|
||||
if out.Spec.Ports[0].TargetPort != intstr.FromInt(1234) {
|
||||
t.Errorf("Expected TargetPort to be defaulted, got %v", out.Spec.Ports[0].TargetPort)
|
||||
}
|
||||
|
||||
in = &v1.Service{Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1234, TargetPort: intstr.FromInt(5678)}}}}
|
||||
obj = roundTrip(t, runtime.Object(in))
|
||||
out = obj.(*v1.Service)
|
||||
if out.Spec.Ports[0].TargetPort != intstr.FromInt(5678) {
|
||||
t.Errorf("Expected TargetPort to be unchanged, got %v", out.Spec.Ports[0].TargetPort)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultServicePort(t *testing.T) {
|
||||
// Unchanged if set.
|
||||
in := &v1.Service{Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
{Protocol: "UDP", Port: 9376, TargetPort: intstr.FromString("p")},
|
||||
{Protocol: "UDP", Port: 8675, TargetPort: intstr.FromInt(309)},
|
||||
},
|
||||
}}
|
||||
out := roundTrip(t, runtime.Object(in)).(*v1.Service)
|
||||
if out.Spec.Ports[0].Protocol != v1.ProtocolUDP {
|
||||
t.Errorf("Expected protocol %s, got %s", v1.ProtocolUDP, out.Spec.Ports[0].Protocol)
|
||||
}
|
||||
if out.Spec.Ports[0].TargetPort != intstr.FromString("p") {
|
||||
t.Errorf("Expected port %v, got %v", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort)
|
||||
}
|
||||
if out.Spec.Ports[1].Protocol != v1.ProtocolUDP {
|
||||
t.Errorf("Expected protocol %s, got %s", v1.ProtocolUDP, out.Spec.Ports[1].Protocol)
|
||||
}
|
||||
if out.Spec.Ports[1].TargetPort != intstr.FromInt(309) {
|
||||
t.Errorf("Expected port %v, got %v", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort)
|
||||
}
|
||||
|
||||
// Defaulted.
|
||||
in = &v1.Service{Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
{Protocol: "", Port: 9376, TargetPort: intstr.FromString("")},
|
||||
{Protocol: "", Port: 8675, TargetPort: intstr.FromInt(0)},
|
||||
},
|
||||
}}
|
||||
out = roundTrip(t, runtime.Object(in)).(*v1.Service)
|
||||
if out.Spec.Ports[0].Protocol != v1.ProtocolTCP {
|
||||
t.Errorf("Expected protocol %s, got %s", v1.ProtocolTCP, out.Spec.Ports[0].Protocol)
|
||||
}
|
||||
if out.Spec.Ports[0].TargetPort != intstr.FromInt(int(in.Spec.Ports[0].Port)) {
|
||||
t.Errorf("Expected port %v, got %v", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort)
|
||||
}
|
||||
if out.Spec.Ports[1].Protocol != v1.ProtocolTCP {
|
||||
t.Errorf("Expected protocol %s, got %s", v1.ProtocolTCP, out.Spec.Ports[1].Protocol)
|
||||
}
|
||||
if out.Spec.Ports[1].TargetPort != intstr.FromInt(int(in.Spec.Ports[1].Port)) {
|
||||
t.Errorf("Expected port %v, got %v", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultNamespace(t *testing.T) {
|
||||
s := &v1.Namespace{}
|
||||
obj2 := roundTrip(t, runtime.Object(s))
|
||||
s2 := obj2.(*v1.Namespace)
|
||||
|
||||
if s2.Status.Phase != v1.NamespaceActive {
|
||||
t.Errorf("Expected phase %v, got %v", v1.NamespaceActive, s2.Status.Phase)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultPodSpecHostNetwork(t *testing.T) {
|
||||
portNum := int32(8080)
|
||||
s := v1.PodSpec{}
|
||||
s.HostNetwork = true
|
||||
s.Containers = []v1.Container{
|
||||
{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: portNum,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
Spec: s,
|
||||
}
|
||||
obj2 := roundTrip(t, runtime.Object(pod))
|
||||
pod2 := obj2.(*v1.Pod)
|
||||
s2 := pod2.Spec
|
||||
|
||||
hostPortNum := s2.Containers[0].Ports[0].HostPort
|
||||
if hostPortNum != portNum {
|
||||
t.Errorf("Expected container port to be defaulted, was made %d instead of %d", hostPortNum, portNum)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultNodeExternalID(t *testing.T) {
|
||||
name := "node0"
|
||||
n := &v1.Node{}
|
||||
n.Name = name
|
||||
obj2 := roundTrip(t, runtime.Object(n))
|
||||
n2 := obj2.(*v1.Node)
|
||||
if n2.Spec.ExternalID != name {
|
||||
t.Errorf("Expected default External ID: %s, got: %s", name, n2.Spec.ExternalID)
|
||||
}
|
||||
if n2.Spec.ProviderID != "" {
|
||||
t.Errorf("Expected empty default Cloud Provider ID, got: %s", n2.Spec.ProviderID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultNodeStatusAllocatable(t *testing.T) {
|
||||
capacity := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("1000m"),
|
||||
v1.ResourceMemory: resource.MustParse("10G"),
|
||||
}
|
||||
allocatable := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("500m"),
|
||||
v1.ResourceMemory: resource.MustParse("5G"),
|
||||
}
|
||||
tests := []struct {
|
||||
capacity v1.ResourceList
|
||||
allocatable v1.ResourceList
|
||||
expectedAllocatable v1.ResourceList
|
||||
}{{ // Everything set, no defaulting.
|
||||
capacity: capacity,
|
||||
allocatable: allocatable,
|
||||
expectedAllocatable: allocatable,
|
||||
}, { // Allocatable set, no defaulting.
|
||||
capacity: nil,
|
||||
allocatable: allocatable,
|
||||
expectedAllocatable: allocatable,
|
||||
}, { // Capacity set, allocatable defaults to capacity.
|
||||
capacity: capacity,
|
||||
allocatable: nil,
|
||||
expectedAllocatable: capacity,
|
||||
}, { // Nothing set, allocatable "defaults" to capacity.
|
||||
capacity: nil,
|
||||
allocatable: nil,
|
||||
expectedAllocatable: nil,
|
||||
}}
|
||||
|
||||
copyResourceList := func(rl v1.ResourceList) v1.ResourceList {
|
||||
if rl == nil {
|
||||
return nil
|
||||
}
|
||||
copy := make(v1.ResourceList, len(rl))
|
||||
for k, v := range rl {
|
||||
copy[k] = *v.Copy()
|
||||
}
|
||||
return copy
|
||||
}
|
||||
|
||||
resourceListsEqual := func(a v1.ResourceList, b v1.ResourceList) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for k, v := range a {
|
||||
vb, found := b[k]
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
if v.Cmp(vb) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
for i, testcase := range tests {
|
||||
node := v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: copyResourceList(testcase.capacity),
|
||||
Allocatable: copyResourceList(testcase.allocatable),
|
||||
},
|
||||
}
|
||||
node2 := roundTrip(t, runtime.Object(&node)).(*v1.Node)
|
||||
actual := node2.Status.Allocatable
|
||||
expected := testcase.expectedAllocatable
|
||||
if !resourceListsEqual(expected, actual) {
|
||||
t.Errorf("[%d] Expected NodeStatus.Allocatable: %+v; Got: %+v", i, expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultObjectFieldSelectorAPIVersion(t *testing.T) {
|
||||
s := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
Spec: s,
|
||||
}
|
||||
obj2 := roundTrip(t, runtime.Object(pod))
|
||||
pod2 := obj2.(*v1.Pod)
|
||||
s2 := pod2.Spec
|
||||
|
||||
apiVersion := s2.Containers[0].Env[0].ValueFrom.FieldRef.APIVersion
|
||||
if apiVersion != "v1" {
|
||||
t.Errorf("Expected default APIVersion v1, got: %v", apiVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetMinimumScalePod(t *testing.T) {
|
||||
// verify we default if limits are specified (and that request=0 is preserved)
|
||||
s := v1.PodSpec{}
|
||||
s.Containers = []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("1n"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2n"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
Spec: s,
|
||||
}
|
||||
v1.SetObjectDefaults_Pod(pod)
|
||||
|
||||
if expect := resource.MustParse("1m"); expect.Cmp(pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory]) != 0 {
|
||||
t.Errorf("did not round resources: %#v", pod.Spec.Containers[0].Resources)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultRequestsPod(t *testing.T) {
|
||||
// verify we default if limits are specified (and that request=0 is preserved)
|
||||
s := v1.PodSpec{}
|
||||
s.Containers = []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("0"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
Spec: s,
|
||||
}
|
||||
output := roundTrip(t, runtime.Object(pod))
|
||||
pod2 := output.(*v1.Pod)
|
||||
defaultRequest := pod2.Spec.Containers[0].Resources.Requests
|
||||
if requestValue := defaultRequest[v1.ResourceCPU]; requestValue.String() != "100m" {
|
||||
t.Errorf("Expected request cpu: %s, got: %s", "100m", requestValue.String())
|
||||
}
|
||||
if requestValue := defaultRequest[v1.ResourceMemory]; requestValue.String() != "0" {
|
||||
t.Errorf("Expected request memory: %s, got: %s", "0", requestValue.String())
|
||||
}
|
||||
|
||||
// verify we do nothing if no limits are specified
|
||||
s = v1.PodSpec{}
|
||||
s.Containers = []v1.Container{{}}
|
||||
pod = &v1.Pod{
|
||||
Spec: s,
|
||||
}
|
||||
output = roundTrip(t, runtime.Object(pod))
|
||||
pod2 = output.(*v1.Pod)
|
||||
defaultRequest = pod2.Spec.Containers[0].Resources.Requests
|
||||
if requestValue := defaultRequest[v1.ResourceCPU]; requestValue.String() != "0" {
|
||||
t.Errorf("Expected 0 request value, got: %s", requestValue.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultRequestIsNotSetForReplicationController(t *testing.T) {
|
||||
s := v1.PodSpec{}
|
||||
s.Containers = []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
rc := &v1.ReplicationController{
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: newInt(3),
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: s,
|
||||
},
|
||||
},
|
||||
}
|
||||
output := roundTrip(t, runtime.Object(rc))
|
||||
rc2 := output.(*v1.ReplicationController)
|
||||
defaultRequest := rc2.Spec.Template.Spec.Containers[0].Resources.Requests
|
||||
requestValue := defaultRequest[v1.ResourceCPU]
|
||||
if requestValue.String() != "0" {
|
||||
t.Errorf("Expected 0 request value, got: %s", requestValue.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultLimitRangeItem(t *testing.T) {
|
||||
limitRange := &v1.LimitRange{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-defaults",
|
||||
},
|
||||
Spec: v1.LimitRangeSpec{
|
||||
Limits: []v1.LimitRangeItem{{
|
||||
Type: v1.LimitTypeContainer,
|
||||
Max: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
},
|
||||
Min: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
Default: v1.ResourceList{},
|
||||
DefaultRequest: v1.ResourceList{},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
output := roundTrip(t, runtime.Object(limitRange))
|
||||
limitRange2 := output.(*v1.LimitRange)
|
||||
defaultLimit := limitRange2.Spec.Limits[0].Default
|
||||
defaultRequest := limitRange2.Spec.Limits[0].DefaultRequest
|
||||
|
||||
// verify that default cpu was set to the max
|
||||
defaultValue := defaultLimit[v1.ResourceCPU]
|
||||
if defaultValue.String() != "100m" {
|
||||
t.Errorf("Expected default cpu: %s, got: %s", "100m", defaultValue.String())
|
||||
}
|
||||
// verify that default request was set to the limit
|
||||
requestValue := defaultRequest[v1.ResourceCPU]
|
||||
if requestValue.String() != "100m" {
|
||||
t.Errorf("Expected request cpu: %s, got: %s", "100m", requestValue.String())
|
||||
}
|
||||
// verify that if a min is provided, it will be the default if no limit is specified
|
||||
requestMinValue := defaultRequest[v1.ResourceMemory]
|
||||
if requestMinValue.String() != "100Mi" {
|
||||
t.Errorf("Expected request memory: %s, got: %s", "100Mi", requestMinValue.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultProbe(t *testing.T) {
|
||||
originalProbe := v1.Probe{}
|
||||
expectedProbe := v1.Probe{
|
||||
InitialDelaySeconds: 0,
|
||||
TimeoutSeconds: 1,
|
||||
PeriodSeconds: 10,
|
||||
SuccessThreshold: 1,
|
||||
FailureThreshold: 3,
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{LivenessProbe: &originalProbe}},
|
||||
},
|
||||
}
|
||||
|
||||
output := roundTrip(t, runtime.Object(pod)).(*v1.Pod)
|
||||
actualProbe := *output.Spec.Containers[0].LivenessProbe
|
||||
if actualProbe != expectedProbe {
|
||||
t.Errorf("Expected probe: %+v\ngot: %+v\n", expectedProbe, actualProbe)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultSchedulerName(t *testing.T) {
|
||||
pod := &v1.Pod{}
|
||||
|
||||
output := roundTrip(t, runtime.Object(pod)).(*v1.Pod)
|
||||
if output.Spec.SchedulerName != v1.DefaultSchedulerName {
|
||||
t.Errorf("Expected scheduler name: %+v\ngot: %+v\n", v1.DefaultSchedulerName, output.Spec.SchedulerName)
|
||||
}
|
||||
}
|
45
vendor/k8s.io/kubernetes/pkg/api/v1/endpoints/BUILD
generated
vendored
45
vendor/k8s.io/kubernetes/pkg/api/v1/endpoints/BUILD
generated
vendored
|
@ -1,45 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["util.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["util_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//vendor:github.com/davecgh/go-spew/spew",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
226
vendor/k8s.io/kubernetes/pkg/api/v1/endpoints/util.go
generated
vendored
226
vendor/k8s.io/kubernetes/pkg/api/v1/endpoints/util.go
generated
vendored
|
@ -1,226 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"sort"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
)
|
||||
|
||||
// RepackSubsets takes a slice of EndpointSubset objects, expands it to the full
|
||||
// representation, and then repacks that into the canonical layout. This
|
||||
// ensures that code which operates on these objects can rely on the common
|
||||
// form for things like comparison. The result is a newly allocated slice.
|
||||
func RepackSubsets(subsets []v1.EndpointSubset) []v1.EndpointSubset {
|
||||
// First map each unique port definition to the sets of hosts that
|
||||
// offer it.
|
||||
allAddrs := map[addressKey]*v1.EndpointAddress{}
|
||||
portToAddrReadyMap := map[v1.EndpointPort]addressSet{}
|
||||
for i := range subsets {
|
||||
for _, port := range subsets[i].Ports {
|
||||
for k := range subsets[i].Addresses {
|
||||
mapAddressByPort(&subsets[i].Addresses[k], port, true, allAddrs, portToAddrReadyMap)
|
||||
}
|
||||
for k := range subsets[i].NotReadyAddresses {
|
||||
mapAddressByPort(&subsets[i].NotReadyAddresses[k], port, false, allAddrs, portToAddrReadyMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Next, map the sets of hosts to the sets of ports they offer.
|
||||
// Go does not allow maps or slices as keys to maps, so we have
|
||||
// to synthesize an artificial key and do a sort of 2-part
|
||||
// associative entity.
|
||||
type keyString string
|
||||
keyToAddrReadyMap := map[keyString]addressSet{}
|
||||
addrReadyMapKeyToPorts := map[keyString][]v1.EndpointPort{}
|
||||
for port, addrs := range portToAddrReadyMap {
|
||||
key := keyString(hashAddresses(addrs))
|
||||
keyToAddrReadyMap[key] = addrs
|
||||
addrReadyMapKeyToPorts[key] = append(addrReadyMapKeyToPorts[key], port)
|
||||
}
|
||||
|
||||
// Next, build the N-to-M association the API wants.
|
||||
final := []v1.EndpointSubset{}
|
||||
for key, ports := range addrReadyMapKeyToPorts {
|
||||
var readyAddrs, notReadyAddrs []v1.EndpointAddress
|
||||
for addr, ready := range keyToAddrReadyMap[key] {
|
||||
if ready {
|
||||
readyAddrs = append(readyAddrs, *addr)
|
||||
} else {
|
||||
notReadyAddrs = append(notReadyAddrs, *addr)
|
||||
}
|
||||
}
|
||||
final = append(final, v1.EndpointSubset{Addresses: readyAddrs, NotReadyAddresses: notReadyAddrs, Ports: ports})
|
||||
}
|
||||
|
||||
// Finally, sort it.
|
||||
return SortSubsets(final)
|
||||
}
|
||||
|
||||
// The sets of hosts must be de-duped, using IP+UID as the key.
|
||||
type addressKey struct {
|
||||
ip string
|
||||
uid types.UID
|
||||
}
|
||||
|
||||
// mapAddressByPort adds an address into a map by its ports, registering the address with a unique pointer, and preserving
|
||||
// any existing ready state.
|
||||
func mapAddressByPort(addr *v1.EndpointAddress, port v1.EndpointPort, ready bool, allAddrs map[addressKey]*v1.EndpointAddress, portToAddrReadyMap map[v1.EndpointPort]addressSet) *v1.EndpointAddress {
|
||||
// use addressKey to distinguish between two endpoints that are identical addresses
|
||||
// but may have come from different hosts, for attribution. For instance, Mesos
|
||||
// assigns pods the node IP, but the pods are distinct.
|
||||
key := addressKey{ip: addr.IP}
|
||||
if addr.TargetRef != nil {
|
||||
key.uid = addr.TargetRef.UID
|
||||
}
|
||||
|
||||
// Accumulate the address. The full EndpointAddress structure is preserved for use when
|
||||
// we rebuild the subsets so that the final TargetRef has all of the necessary data.
|
||||
existingAddress := allAddrs[key]
|
||||
if existingAddress == nil {
|
||||
// Make a copy so we don't write to the
|
||||
// input args of this function.
|
||||
existingAddress = &v1.EndpointAddress{}
|
||||
*existingAddress = *addr
|
||||
allAddrs[key] = existingAddress
|
||||
}
|
||||
|
||||
// Remember that this port maps to this address.
|
||||
if _, found := portToAddrReadyMap[port]; !found {
|
||||
portToAddrReadyMap[port] = addressSet{}
|
||||
}
|
||||
// if we have not yet recorded this port for this address, or if the previous
|
||||
// state was ready, write the current ready state. not ready always trumps
|
||||
// ready.
|
||||
if wasReady, found := portToAddrReadyMap[port][existingAddress]; !found || wasReady {
|
||||
portToAddrReadyMap[port][existingAddress] = ready
|
||||
}
|
||||
return existingAddress
|
||||
}
|
||||
|
||||
type addressSet map[*v1.EndpointAddress]bool
|
||||
|
||||
type addrReady struct {
|
||||
addr *v1.EndpointAddress
|
||||
ready bool
|
||||
}
|
||||
|
||||
func hashAddresses(addrs addressSet) string {
|
||||
// Flatten the list of addresses into a string so it can be used as a
|
||||
// map key. Unfortunately, DeepHashObject is implemented in terms of
|
||||
// spew, and spew does not handle non-primitive map keys well. So
|
||||
// first we collapse it into a slice, sort the slice, then hash that.
|
||||
slice := make([]addrReady, 0, len(addrs))
|
||||
for k, ready := range addrs {
|
||||
slice = append(slice, addrReady{k, ready})
|
||||
}
|
||||
sort.Sort(addrsReady(slice))
|
||||
hasher := md5.New()
|
||||
hashutil.DeepHashObject(hasher, slice)
|
||||
return hex.EncodeToString(hasher.Sum(nil)[0:])
|
||||
}
|
||||
|
||||
func lessAddrReady(a, b addrReady) bool {
|
||||
// ready is not significant to hashing since we can't have duplicate addresses
|
||||
return LessEndpointAddress(a.addr, b.addr)
|
||||
}
|
||||
|
||||
type addrsReady []addrReady
|
||||
|
||||
func (sl addrsReady) Len() int { return len(sl) }
|
||||
func (sl addrsReady) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl addrsReady) Less(i, j int) bool {
|
||||
return lessAddrReady(sl[i], sl[j])
|
||||
}
|
||||
|
||||
func LessEndpointAddress(a, b *v1.EndpointAddress) bool {
|
||||
ipComparison := bytes.Compare([]byte(a.IP), []byte(b.IP))
|
||||
if ipComparison != 0 {
|
||||
return ipComparison < 0
|
||||
}
|
||||
if b.TargetRef == nil {
|
||||
return false
|
||||
}
|
||||
if a.TargetRef == nil {
|
||||
return true
|
||||
}
|
||||
return a.TargetRef.UID < b.TargetRef.UID
|
||||
}
|
||||
|
||||
type addrPtrsByIpAndUID []*v1.EndpointAddress
|
||||
|
||||
func (sl addrPtrsByIpAndUID) Len() int { return len(sl) }
|
||||
func (sl addrPtrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl addrPtrsByIpAndUID) Less(i, j int) bool {
|
||||
return LessEndpointAddress(sl[i], sl[j])
|
||||
}
|
||||
|
||||
// SortSubsets sorts an array of EndpointSubset objects in place. For ease of
|
||||
// use it returns the input slice.
|
||||
func SortSubsets(subsets []v1.EndpointSubset) []v1.EndpointSubset {
|
||||
for i := range subsets {
|
||||
ss := &subsets[i]
|
||||
sort.Sort(addrsByIpAndUID(ss.Addresses))
|
||||
sort.Sort(addrsByIpAndUID(ss.NotReadyAddresses))
|
||||
sort.Sort(portsByHash(ss.Ports))
|
||||
}
|
||||
sort.Sort(subsetsByHash(subsets))
|
||||
return subsets
|
||||
}
|
||||
|
||||
func hashObject(hasher hash.Hash, obj interface{}) []byte {
|
||||
hashutil.DeepHashObject(hasher, obj)
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
type subsetsByHash []v1.EndpointSubset
|
||||
|
||||
func (sl subsetsByHash) Len() int { return len(sl) }
|
||||
func (sl subsetsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl subsetsByHash) Less(i, j int) bool {
|
||||
hasher := md5.New()
|
||||
h1 := hashObject(hasher, sl[i])
|
||||
h2 := hashObject(hasher, sl[j])
|
||||
return bytes.Compare(h1, h2) < 0
|
||||
}
|
||||
|
||||
type addrsByIpAndUID []v1.EndpointAddress
|
||||
|
||||
func (sl addrsByIpAndUID) Len() int { return len(sl) }
|
||||
func (sl addrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl addrsByIpAndUID) Less(i, j int) bool {
|
||||
return LessEndpointAddress(&sl[i], &sl[j])
|
||||
}
|
||||
|
||||
type portsByHash []v1.EndpointPort
|
||||
|
||||
func (sl portsByHash) Len() int { return len(sl) }
|
||||
func (sl portsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl portsByHash) Less(i, j int) bool {
|
||||
hasher := md5.New()
|
||||
h1 := hashObject(hasher, sl[i])
|
||||
h2 := hashObject(hasher, sl[j])
|
||||
return bytes.Compare(h1, h2) < 0
|
||||
}
|
464
vendor/k8s.io/kubernetes/pkg/api/v1/endpoints/util_test.go
generated
vendored
464
vendor/k8s.io/kubernetes/pkg/api/v1/endpoints/util_test.go
generated
vendored
|
@ -1,464 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
func podRef(uid string) *v1.ObjectReference {
|
||||
ref := v1.ObjectReference{UID: types.UID(uid)}
|
||||
return &ref
|
||||
}
|
||||
|
||||
func TestPackSubsets(t *testing.T) {
|
||||
// The downside of table-driven tests is that some things have to live outside the table.
|
||||
fooObjRef := v1.ObjectReference{Name: "foo"}
|
||||
barObjRef := v1.ObjectReference{Name: "bar"}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
given []v1.EndpointSubset
|
||||
expect []v1.EndpointSubset
|
||||
}{
|
||||
{
|
||||
name: "empty everything",
|
||||
given: []v1.EndpointSubset{{Addresses: []v1.EndpointAddress{}, Ports: []v1.EndpointPort{}}},
|
||||
expect: []v1.EndpointSubset{},
|
||||
}, {
|
||||
name: "empty addresses",
|
||||
given: []v1.EndpointSubset{{Addresses: []v1.EndpointAddress{}, Ports: []v1.EndpointPort{{Port: 111}}}},
|
||||
expect: []v1.EndpointSubset{},
|
||||
}, {
|
||||
name: "empty ports",
|
||||
given: []v1.EndpointSubset{{Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, Ports: []v1.EndpointPort{}}},
|
||||
expect: []v1.EndpointSubset{},
|
||||
}, {
|
||||
name: "empty ports",
|
||||
given: []v1.EndpointSubset{{NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}}, Ports: []v1.EndpointPort{}}},
|
||||
expect: []v1.EndpointSubset{},
|
||||
}, {
|
||||
name: "one set, one ip, one port",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, one ip, one port (IPv6)",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "beef::1:2:3:4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "beef::1:2:3:4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, one notReady ip, one port",
|
||||
given: []v1.EndpointSubset{{
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, one ip, one UID, one port",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, one notReady ip, one UID, one port",
|
||||
given: []v1.EndpointSubset{{
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, one ip, empty UID, one port",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, one notReady ip, empty UID, one port",
|
||||
given: []v1.EndpointSubset{{
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, two ips, one port",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "5.6.7.8"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "5.6.7.8"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, two mixed ips, one port",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "5.6.7.8"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "5.6.7.8"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, two duplicate ips, one port, notReady is covered by ready",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, one ip, two ports",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}, {Port: 222}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}, {Port: 222}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, dup ips, one port",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, dup ips, one port (IPv6)",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "beef::1"}, {IP: "beef::1"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "beef::1"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, dup ips with target-refs, one port",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{IP: "1.2.3.4", TargetRef: &fooObjRef},
|
||||
{IP: "1.2.3.4", TargetRef: &barObjRef},
|
||||
},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: &fooObjRef}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, dup mixed ips with target-refs, one port",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{IP: "1.2.3.4", TargetRef: &fooObjRef},
|
||||
},
|
||||
NotReadyAddresses: []v1.EndpointAddress{
|
||||
{IP: "1.2.3.4", TargetRef: &barObjRef},
|
||||
},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
// finding the same address twice is considered an error on input, only the first address+port
|
||||
// reference is preserved
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: &fooObjRef}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "one set, one ip, dup ports",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}, {Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "two sets, dup ip, dup port",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "two sets, dup mixed ip, dup port",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "two sets, dup ip, two ports",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 222}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}, {Port: 222}},
|
||||
}},
|
||||
}, {
|
||||
name: "two sets, dup ip, dup uids, two ports",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 222}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}, {Port: 222}},
|
||||
}},
|
||||
}, {
|
||||
name: "two sets, dup mixed ip, dup uids, two ports",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 222}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 222}},
|
||||
}},
|
||||
}, {
|
||||
name: "two sets, two ips, dup port",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "5.6.7.8"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "5.6.7.8"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "two set, dup ip, two uids, dup ports",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-2")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{IP: "1.2.3.4", TargetRef: podRef("uid-1")},
|
||||
{IP: "1.2.3.4", TargetRef: podRef("uid-2")},
|
||||
},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "two set, dup ip, with and without uid, dup ports",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-2")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{IP: "1.2.3.4"},
|
||||
{IP: "1.2.3.4", TargetRef: podRef("uid-2")},
|
||||
},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "two sets, two ips, two dup ip with uid, dup port, wrong order",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "5.6.7.8"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "5.6.7.8", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{IP: "1.2.3.4"},
|
||||
{IP: "1.2.3.4", TargetRef: podRef("uid-1")},
|
||||
{IP: "5.6.7.8"},
|
||||
{IP: "5.6.7.8", TargetRef: podRef("uid-1")},
|
||||
},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "two sets, two mixed ips, two dup ip with uid, dup port, wrong order, ends up with split addresses",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "5.6.7.8"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "5.6.7.8", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{IP: "5.6.7.8"},
|
||||
},
|
||||
NotReadyAddresses: []v1.EndpointAddress{
|
||||
{IP: "1.2.3.4"},
|
||||
{IP: "1.2.3.4", TargetRef: podRef("uid-1")},
|
||||
{IP: "5.6.7.8", TargetRef: podRef("uid-1")},
|
||||
},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}},
|
||||
}, {
|
||||
name: "two sets, two ips, two ports",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "5.6.7.8"}},
|
||||
Ports: []v1.EndpointPort{{Port: 222}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "5.6.7.8"}},
|
||||
Ports: []v1.EndpointPort{{Port: 222}},
|
||||
}},
|
||||
}, {
|
||||
name: "four sets, three ips, three ports, jumbled",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.5"}},
|
||||
Ports: []v1.EndpointPort{{Port: 222}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.6"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.5"}},
|
||||
Ports: []v1.EndpointPort{{Port: 333}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "1.2.3.6"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.5"}},
|
||||
Ports: []v1.EndpointPort{{Port: 222}, {Port: 333}},
|
||||
}},
|
||||
}, {
|
||||
name: "four sets, three mixed ips, three ports, jumbled",
|
||||
given: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.5"}},
|
||||
Ports: []v1.EndpointPort{{Port: 222}},
|
||||
}, {
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.6"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.5"}},
|
||||
Ports: []v1.EndpointPort{{Port: 333}},
|
||||
}},
|
||||
expect: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "1.2.3.6"}},
|
||||
Ports: []v1.EndpointPort{{Port: 111}},
|
||||
}, {
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.5"}},
|
||||
Ports: []v1.EndpointPort{{Port: 222}, {Port: 333}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
result := RepackSubsets(tc.given)
|
||||
if !reflect.DeepEqual(result, SortSubsets(tc.expect)) {
|
||||
t.Errorf("case %q: expected %s, got %s", tc.name, spew.Sprintf("%#v", SortSubsets(tc.expect)), spew.Sprintf("%#v", result))
|
||||
}
|
||||
}
|
||||
}
|
5190
vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go
generated
vendored
5190
vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
302
vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto
generated
vendored
302
vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto
generated
vendored
|
@ -21,11 +21,12 @@ syntax = 'proto2';
|
|||
|
||||
package k8s.io.kubernetes.pkg.api.v1;
|
||||
|
||||
import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
import "k8s.io/kubernetes/pkg/api/resource/generated.proto";
|
||||
import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
|
||||
import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto";
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "v1";
|
||||
|
@ -308,6 +309,31 @@ message ConfigMapList {
|
|||
repeated ConfigMap items = 2;
|
||||
}
|
||||
|
||||
// Adapts a ConfigMap into a projected volume.
|
||||
//
|
||||
// The contents of the target ConfigMap's Data field will be presented in a
|
||||
// projected volume as files using the keys in the Data field as the file names,
|
||||
// unless the items element is populated with specific mappings of keys to paths.
|
||||
// Note that this is identical to a configmap volume source without the default
|
||||
// mode.
|
||||
message ConfigMapProjection {
|
||||
optional LocalObjectReference localObjectReference = 1;
|
||||
|
||||
// If unspecified, each key-value pair in the Data field of the referenced
|
||||
// ConfigMap will be projected into the volume as a file whose name is the
|
||||
// key and content is the value. If specified, the listed keys will be
|
||||
// projected into the specified paths, and unlisted keys will not be
|
||||
// present. If a key is specified which is not present in the ConfigMap,
|
||||
// the volume setup will error unless it is marked optional. Paths must be
|
||||
// relative and may not contain the '..' path or start with '..'.
|
||||
// +optional
|
||||
repeated KeyToPath items = 2;
|
||||
|
||||
// Specify whether the ConfigMap or it's keys must be defined
|
||||
// +optional
|
||||
optional bool optional = 4;
|
||||
}
|
||||
|
||||
// Adapts a ConfigMap into a volume.
|
||||
//
|
||||
// The contents of the target ConfigMap's Data field will be presented in a
|
||||
|
@ -392,8 +418,8 @@ message Container {
|
|||
repeated ContainerPort ports = 6;
|
||||
|
||||
// List of sources to populate environment variables in the container.
|
||||
// The keys defined within a source must be a C_IDENTIFIER. An invalid key
|
||||
// will prevent the container from starting. When a key exists in multiple
|
||||
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
// will be reported as an event when the container is starting. When a key exists in multiple
|
||||
// sources, the value associated with the last source will take precedence.
|
||||
// Values defined by an Env with a duplicate key will take precedence.
|
||||
// Cannot be updated.
|
||||
|
@ -654,10 +680,28 @@ message DeleteOptions {
|
|||
// +optional
|
||||
optional Preconditions preconditions = 2;
|
||||
|
||||
// Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
|
||||
// Should the dependent objects be orphaned. If true/false, the "orphan"
|
||||
// finalizer will be added to/removed from the object's finalizers list.
|
||||
// Either this field or PropagationPolicy may be set, but not both.
|
||||
// +optional
|
||||
optional bool orphanDependents = 3;
|
||||
|
||||
// Whether and how garbage collection will be performed.
|
||||
// Either this field or OrphanDependents may be set, but not both.
|
||||
// The default policy is decided by the existing finalizer set in the
|
||||
// metadata.finalizers and the resource-specific default policy.
|
||||
// +optional
|
||||
optional string propagationPolicy = 4;
|
||||
}
|
||||
|
||||
// Represents downward API info for projecting into a projected volume.
|
||||
// Note that this is identical to a downwardAPI volume source without the default
|
||||
// mode.
|
||||
message DownwardAPIProjection {
|
||||
// Items is a list of DownwardAPIVolume file
|
||||
// +optional
|
||||
repeated DownwardAPIVolumeFile items = 1;
|
||||
}
|
||||
|
||||
// DownwardAPIVolumeFile represents information to create the file containing the pod field
|
||||
|
@ -1095,7 +1139,7 @@ message HTTPGetAction {
|
|||
// Name or number of the port to access on the container.
|
||||
// Number must be in the range 1 to 65535.
|
||||
// Name must be an IANA_SVC_NAME.
|
||||
optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 2;
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2;
|
||||
|
||||
// Host name to connect to, defaults to the pod IP. You probably want to set
|
||||
// "Host" in httpHeaders instead.
|
||||
|
@ -1178,6 +1222,11 @@ message ISCSIVolumeSource {
|
|||
// Defaults to false.
|
||||
// +optional
|
||||
optional bool readOnly = 6;
|
||||
|
||||
// iSCSI target portal List. The portal is either an IP or ip_addr:port if the port
|
||||
// is other than default (typically TCP ports 860 and 3260).
|
||||
// +optional
|
||||
repeated string portals = 7;
|
||||
}
|
||||
|
||||
// Maps a string key to a path within a volume.
|
||||
|
@ -1241,23 +1290,23 @@ message LimitRangeItem {
|
|||
|
||||
// Max usage constraints on this kind by resource name.
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> max = 2;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> max = 2;
|
||||
|
||||
// Min usage constraints on this kind by resource name.
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> min = 3;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> min = 3;
|
||||
|
||||
// Default resource requirement limit value by resource name if resource limit is omitted.
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> default = 4;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> default = 4;
|
||||
|
||||
// DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> defaultRequest = 5;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> defaultRequest = 5;
|
||||
|
||||
// MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> maxLimitRequestRatio = 6;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> maxLimitRequestRatio = 6;
|
||||
}
|
||||
|
||||
// LimitRangeList is a list of LimitRange items.
|
||||
|
@ -1527,7 +1576,7 @@ message NodeProxyOptions {
|
|||
// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.
|
||||
message NodeResources {
|
||||
// Capacity represents the available resources of a node
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> capacity = 1;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 1;
|
||||
}
|
||||
|
||||
// A node selector represents the union of the results of one or more label queries
|
||||
|
@ -1582,6 +1631,10 @@ message NodeSpec {
|
|||
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration
|
||||
// +optional
|
||||
optional bool unschedulable = 4;
|
||||
|
||||
// If specified, the node's taints.
|
||||
// +optional
|
||||
repeated Taint taints = 5;
|
||||
}
|
||||
|
||||
// NodeStatus is information about the current status of a node.
|
||||
|
@ -1589,12 +1642,12 @@ message NodeStatus {
|
|||
// Capacity represents the total resources of a node.
|
||||
// More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details.
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> capacity = 1;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 1;
|
||||
|
||||
// Allocatable represents the resources of a node that are available for scheduling.
|
||||
// Defaults to Capacity.
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> allocatable = 2;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> allocatable = 2;
|
||||
|
||||
// NodePhase is the recently observed lifecycle phase of the node.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase
|
||||
|
@ -1949,6 +2002,11 @@ message PersistentVolumeClaimSpec {
|
|||
// VolumeName is the binding reference to the PersistentVolume backing this claim.
|
||||
// +optional
|
||||
optional string volumeName = 3;
|
||||
|
||||
// Name of the StorageClass required by the claim.
|
||||
// More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1
|
||||
// +optional
|
||||
optional string storageClassName = 5;
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
|
||||
|
@ -1964,7 +2022,7 @@ message PersistentVolumeClaimStatus {
|
|||
|
||||
// Represents the actual resources of the underlying volume.
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> capacity = 3;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 3;
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
|
||||
|
@ -2079,6 +2137,14 @@ message PersistentVolumeSource {
|
|||
|
||||
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
|
||||
optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17;
|
||||
|
||||
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
|
||||
// +optional
|
||||
optional PortworxVolumeSource portworxVolume = 18;
|
||||
|
||||
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
|
||||
// +optional
|
||||
optional ScaleIOVolumeSource scaleIO = 19;
|
||||
}
|
||||
|
||||
// PersistentVolumeSpec is the specification of a persistent volume.
|
||||
|
@ -2086,7 +2152,7 @@ message PersistentVolumeSpec {
|
|||
// A description of the persistent volume's resources and capacity.
|
||||
// More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> capacity = 1;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 1;
|
||||
|
||||
// The actual volume backing the persistent volume.
|
||||
optional PersistentVolumeSource persistentVolumeSource = 2;
|
||||
|
@ -2109,6 +2175,11 @@ message PersistentVolumeSpec {
|
|||
// More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy
|
||||
// +optional
|
||||
optional string persistentVolumeReclaimPolicy = 5;
|
||||
|
||||
// Name of StorageClass to which this persistent volume belongs. Empty value
|
||||
// means that this volume does not belong to any StorageClass.
|
||||
// +optional
|
||||
optional string storageClassName = 6;
|
||||
}
|
||||
|
||||
// PersistentVolumeStatus is the current status of a persistent volume.
|
||||
|
@ -2208,9 +2279,7 @@ message PodAffinityTerm {
|
|||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1;
|
||||
|
||||
// namespaces specifies which namespaces the labelSelector applies to (matches against);
|
||||
// nil list means "this pod's namespace," empty list means "all namespaces"
|
||||
// The json tag here is not "omitempty" since we need to distinguish nil and empty.
|
||||
// See https://golang.org/pkg/encoding/json/#Marshal for more details.
|
||||
// null or empty list means "this pod's namespace"
|
||||
repeated string namespaces = 2;
|
||||
|
||||
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
|
||||
|
@ -2495,6 +2564,21 @@ message PodSpec {
|
|||
// +optional
|
||||
repeated Volume volumes = 1;
|
||||
|
||||
// List of initialization containers belonging to the pod.
|
||||
// Init containers are executed in order prior to containers being started. If any
|
||||
// init container fails, the pod is considered to have failed and is handled according
|
||||
// to its restartPolicy. The name for an init container or normal container must be
|
||||
// unique among all containers.
|
||||
// Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes.
|
||||
// The resourceRequirements of an init container are taken into account during scheduling
|
||||
// by finding the highest request/limit for each resource type, and then using the max of
|
||||
// of that value or the sum of the normal containers. Limits are applied to init containers
|
||||
// in a similar fashion.
|
||||
// Init containers cannot currently be added or removed.
|
||||
// Cannot be updated.
|
||||
// More info: http://kubernetes.io/docs/user-guide/containers
|
||||
repeated Container initContainers = 20;
|
||||
|
||||
// List of containers belonging to the pod.
|
||||
// Containers cannot currently be added or removed.
|
||||
// There must be at least one container in a Pod.
|
||||
|
@ -2526,8 +2610,9 @@ message PodSpec {
|
|||
optional int64 activeDeadlineSeconds = 5;
|
||||
|
||||
// Set DNS policy for containers within the pod.
|
||||
// One of 'ClusterFirst' or 'Default'.
|
||||
// One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'.
|
||||
// Defaults to "ClusterFirst".
|
||||
// To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.
|
||||
// +optional
|
||||
optional string dnsPolicy = 6;
|
||||
|
||||
|
@ -2548,6 +2633,10 @@ message PodSpec {
|
|||
// +optional
|
||||
optional string serviceAccount = 9;
|
||||
|
||||
// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
|
||||
// +optional
|
||||
optional bool automountServiceAccountToken = 21;
|
||||
|
||||
// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
|
||||
// the scheduler simply schedules this pod onto that node, assuming that it fits resource
|
||||
// requirements.
|
||||
|
@ -2602,7 +2691,11 @@ message PodSpec {
|
|||
// If specified, the pod will be dispatched by specified scheduler.
|
||||
// If not specified, the pod will be dispatched by default scheduler.
|
||||
// +optional
|
||||
optional string schedulername = 19;
|
||||
optional string schedulerName = 19;
|
||||
|
||||
// If specified, the pod's tolerations.
|
||||
// +optional
|
||||
repeated Toleration tolerations = 22;
|
||||
}
|
||||
|
||||
// PodStatus represents information about the status of a pod. Status may trail the actual
|
||||
|
@ -2641,6 +2734,12 @@ message PodStatus {
|
|||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7;
|
||||
|
||||
// The list has one entry per init container in the manifest. The most recent successful
|
||||
// init container will have ready = true, the most recently started container will have
|
||||
// startTime set.
|
||||
// More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses
|
||||
repeated ContainerStatus initContainerStatuses = 10;
|
||||
|
||||
// The list has one entry per container in the manifest. Each entry is currently the output
|
||||
// of `docker inspect`.
|
||||
// More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses
|
||||
|
@ -2707,6 +2806,22 @@ message PodTemplateSpec {
|
|||
optional PodSpec spec = 2;
|
||||
}
|
||||
|
||||
// PortworxVolumeSource represents a Portworx volume resource.
|
||||
message PortworxVolumeSource {
|
||||
// VolumeID uniquely identifies a Portworx volume
|
||||
optional string volumeID = 1;
|
||||
|
||||
// FSType represents the filesystem type to mount
|
||||
// Must be a filesystem type supported by the host operating system.
|
||||
// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
|
||||
optional string fsType = 2;
|
||||
|
||||
// Defaults to false (read/write). ReadOnly here will force
|
||||
// the ReadOnly setting in VolumeMounts.
|
||||
// +optional
|
||||
optional bool readOnly = 3;
|
||||
}
|
||||
|
||||
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
|
||||
// +k8s:openapi-gen=false
|
||||
message Preconditions {
|
||||
|
@ -2776,6 +2891,20 @@ message Probe {
|
|||
optional int32 failureThreshold = 6;
|
||||
}
|
||||
|
||||
// Represents a projected volume source
|
||||
message ProjectedVolumeSource {
|
||||
// list of volume projections
|
||||
repeated VolumeProjection sources = 1;
|
||||
|
||||
// Mode bits to use on created files by default. Must be a value between
|
||||
// 0 and 0777.
|
||||
// Directories within the path are not affected by this setting.
|
||||
// This might be in conflict with other options that affect the file
|
||||
// mode, like fsGroup, and the result can be other mode bits set.
|
||||
// +optional
|
||||
optional int32 defaultMode = 2;
|
||||
}
|
||||
|
||||
// Represents a Quobyte mount that lasts the lifetime of a pod.
|
||||
// Quobyte volumes do not support ownership management or SELinux relabeling.
|
||||
message QuobyteVolumeSource {
|
||||
|
@ -2992,7 +3121,7 @@ message ResourceFieldSelector {
|
|||
|
||||
// Specifies the output format of the exposed resources, defaults to "1"
|
||||
// +optional
|
||||
optional k8s.io.kubernetes.pkg.api.resource.Quantity divisor = 3;
|
||||
optional k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3;
|
||||
}
|
||||
|
||||
// ResourceQuota sets aggregate quota restrictions enforced per namespace
|
||||
|
@ -3030,7 +3159,7 @@ message ResourceQuotaSpec {
|
|||
// Hard is the set of desired hard limits for each named resource.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> hard = 1;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> hard = 1;
|
||||
|
||||
// A collection of filters that must match each object tracked by a quota.
|
||||
// If not specified, the quota matches all objects.
|
||||
|
@ -3043,11 +3172,11 @@ message ResourceQuotaStatus {
|
|||
// Hard is the set of enforced hard limits for each named resource.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> hard = 1;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> hard = 1;
|
||||
|
||||
// Used is the current observed total usage of the resource in the namespace.
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> used = 2;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> used = 2;
|
||||
}
|
||||
|
||||
// ResourceRequirements describes the compute resource requirements.
|
||||
|
@ -3055,14 +3184,14 @@ message ResourceRequirements {
|
|||
// Limits describes the maximum amount of compute resources allowed.
|
||||
// More info: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> limits = 1;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> limits = 1;
|
||||
|
||||
// Requests describes the minimum amount of compute resources required.
|
||||
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
|
||||
// otherwise to an implementation-defined value.
|
||||
// More info: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
// +optional
|
||||
map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> requests = 2;
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> requests = 2;
|
||||
}
|
||||
|
||||
// SELinuxOptions are the labels to be applied to the container
|
||||
|
@ -3084,6 +3213,50 @@ message SELinuxOptions {
|
|||
optional string level = 4;
|
||||
}
|
||||
|
||||
// ScaleIOVolumeSource represents a persistent ScaleIO volume
|
||||
message ScaleIOVolumeSource {
|
||||
// The host address of the ScaleIO API Gateway.
|
||||
optional string gateway = 1;
|
||||
|
||||
// The name of the storage system as configured in ScaleIO.
|
||||
optional string system = 2;
|
||||
|
||||
// SecretRef references to the secret for ScaleIO user and other
|
||||
// sensitive information. If this is not provided, Login operation will fail.
|
||||
optional LocalObjectReference secretRef = 3;
|
||||
|
||||
// Flag to enable/disable SSL communication with Gateway, default false
|
||||
// +optional
|
||||
optional bool sslEnabled = 4;
|
||||
|
||||
// The name of the Protection Domain for the configured storage (defaults to "default").
|
||||
// +optional
|
||||
optional string protectionDomain = 5;
|
||||
|
||||
// The Storage Pool associated with the protection domain (defaults to "default").
|
||||
// +optional
|
||||
optional string storagePool = 6;
|
||||
|
||||
// Indicates whether the storage for a volume should be thick or thin (defaults to "thin").
|
||||
// +optional
|
||||
optional string storageMode = 7;
|
||||
|
||||
// The name of a volume already created in the ScaleIO system
|
||||
// that is associated with this volume source.
|
||||
optional string volumeName = 8;
|
||||
|
||||
// Filesystem type to mount.
|
||||
// Must be a filesystem type supported by the host operating system.
|
||||
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
|
||||
// +optional
|
||||
optional string fsType = 9;
|
||||
|
||||
// Defaults to false (read/write). ReadOnly here will force
|
||||
// the ReadOnly setting in VolumeMounts.
|
||||
// +optional
|
||||
optional bool readOnly = 10;
|
||||
}
|
||||
|
||||
// Secret holds secret data of a certain type. The total bytes of the values in
|
||||
// the Data field must be less than MaxSecretSize bytes.
|
||||
message Secret {
|
||||
|
@ -3152,6 +3325,30 @@ message SecretList {
|
|||
repeated Secret items = 2;
|
||||
}
|
||||
|
||||
// Adapts a secret into a projected volume.
|
||||
//
|
||||
// The contents of the target Secret's Data field will be presented in a
|
||||
// projected volume as files using the keys in the Data field as the file names.
|
||||
// Note that this is identical to a secret volume source without the default
|
||||
// mode.
|
||||
message SecretProjection {
|
||||
optional LocalObjectReference localObjectReference = 1;
|
||||
|
||||
// If unspecified, each key-value pair in the Data field of the referenced
|
||||
// Secret will be projected into the volume as a file whose name is the
|
||||
// key and content is the value. If specified, the listed keys will be
|
||||
// projected into the specified paths, and unlisted keys will not be
|
||||
// present. If a key is specified which is not present in the Secret,
|
||||
// the volume setup will error unless it is marked optional. Paths must be
|
||||
// relative and may not contain the '..' path or start with '..'.
|
||||
// +optional
|
||||
repeated KeyToPath items = 2;
|
||||
|
||||
// Specify whether the Secret or its key must be defined
|
||||
// +optional
|
||||
optional bool optional = 4;
|
||||
}
|
||||
|
||||
// Adapts a Secret into a volume.
|
||||
//
|
||||
// The contents of the target Secret's Data field will be presented in a volume
|
||||
|
@ -3280,6 +3477,11 @@ message ServiceAccount {
|
|||
// More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret
|
||||
// +optional
|
||||
repeated LocalObjectReference imagePullSecrets = 3;
|
||||
|
||||
// AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
|
||||
// Can be overridden at the pod level.
|
||||
// +optional
|
||||
optional bool automountServiceAccountToken = 4;
|
||||
}
|
||||
|
||||
// ServiceAccountList is a list of ServiceAccount objects
|
||||
|
@ -3331,7 +3533,7 @@ message ServicePort {
|
|||
// omitted or set equal to the 'port' field.
|
||||
// More info: http://kubernetes.io/docs/user-guide/services#defining-a-service
|
||||
// +optional
|
||||
optional k8s.io.kubernetes.pkg.util.intstr.IntOrString targetPort = 4;
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4;
|
||||
|
||||
// The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
|
||||
// Usually assigned by the system. If specified, it will be allocated to the service
|
||||
|
@ -3465,7 +3667,7 @@ message TCPSocketAction {
|
|||
// Number or name of the port to access on the container.
|
||||
// Number must be in the range 1 to 65535.
|
||||
// Name must be an IANA_SVC_NAME.
|
||||
optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 1;
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1;
|
||||
}
|
||||
|
||||
// The node this Taint is attached to has the effect "effect" on
|
||||
|
@ -3480,18 +3682,24 @@ message Taint {
|
|||
|
||||
// Required. The effect of the taint on pods
|
||||
// that do not tolerate the taint.
|
||||
// Valid effects are NoSchedule and PreferNoSchedule.
|
||||
// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
|
||||
optional string effect = 3;
|
||||
|
||||
// TimeAdded represents the time at which the taint was added.
|
||||
// It is only written for NoExecute taints.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4;
|
||||
}
|
||||
|
||||
// The pod this Toleration is attached to tolerates any taint that matches
|
||||
// the triple <key,value,effect> using the matching operator <operator>.
|
||||
message Toleration {
|
||||
// Required. Key is the taint key that the toleration applies to.
|
||||
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
|
||||
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
|
||||
// +optional
|
||||
optional string key = 1;
|
||||
|
||||
// operator represents a key's relationship to the value.
|
||||
// Operator represents a key's relationship to the value.
|
||||
// Valid operators are Exists and Equal. Defaults to Equal.
|
||||
// Exists is equivalent to wildcard for value, so that a pod can
|
||||
// tolerate all taints of a particular category.
|
||||
|
@ -3504,9 +3712,16 @@ message Toleration {
|
|||
optional string value = 3;
|
||||
|
||||
// Effect indicates the taint effect to match. Empty means match all taint effects.
|
||||
// When specified, allowed values are NoSchedule and PreferNoSchedule.
|
||||
// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
|
||||
// +optional
|
||||
optional string effect = 4;
|
||||
|
||||
// TolerationSeconds represents the period of time the toleration (which must be
|
||||
// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
|
||||
// it is not set, which means tolerate the taint forever (do not evict). Zero and
|
||||
// negative values will be treated as 0 (evict immediately) by the system.
|
||||
// +optional
|
||||
optional int64 tolerationSeconds = 5;
|
||||
}
|
||||
|
||||
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||
|
@ -3542,6 +3757,18 @@ message VolumeMount {
|
|||
optional string subPath = 4;
|
||||
}
|
||||
|
||||
// Projection that may be projected along with other supported volume types
|
||||
message VolumeProjection {
|
||||
// information about the secret data to project
|
||||
optional SecretProjection secret = 1;
|
||||
|
||||
// information about the downwardAPI data to project
|
||||
optional DownwardAPIProjection downwardAPI = 2;
|
||||
|
||||
// information about the configMap data to project
|
||||
optional ConfigMapProjection configMap = 3;
|
||||
}
|
||||
|
||||
// Represents the source of a volume to mount.
|
||||
// Only one of its members may be specified.
|
||||
message VolumeSource {
|
||||
|
@ -3658,6 +3885,17 @@ message VolumeSource {
|
|||
|
||||
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
|
||||
optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23;
|
||||
|
||||
// Items for all in one resources secrets, configmaps, and downward API
|
||||
optional ProjectedVolumeSource projected = 26;
|
||||
|
||||
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
|
||||
// +optional
|
||||
optional PortworxVolumeSource portworxVolume = 24;
|
||||
|
||||
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
|
||||
// +optional
|
||||
optional ScaleIOVolumeSource scaleIO = 25;
|
||||
}
|
||||
|
||||
// Represents a vSphere volume resource.
|
||||
|
|
327
vendor/k8s.io/kubernetes/pkg/api/v1/helpers.go
generated
vendored
327
vendor/k8s.io/kubernetes/pkg/api/v1/helpers.go
generated
vendored
|
@ -21,10 +21,12 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
|
@ -82,7 +84,7 @@ func IsServiceIPRequested(service *Service) bool {
|
|||
|
||||
var standardFinalizers = sets.NewString(
|
||||
string(FinalizerKubernetes),
|
||||
FinalizerOrphan,
|
||||
metav1.FinalizerOrphanDependents,
|
||||
)
|
||||
|
||||
func IsStandardFinalizerName(str string) bool {
|
||||
|
@ -233,18 +235,6 @@ func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.S
|
|||
}
|
||||
|
||||
const (
|
||||
// AffinityAnnotationKey represents the key of affinity data (json serialized)
|
||||
// in the Annotations of a Pod.
|
||||
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
|
||||
|
||||
// TolerationsAnnotationKey represents the key of tolerations data (json serialized)
|
||||
// in the Annotations of a Pod.
|
||||
TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations"
|
||||
|
||||
// TaintsAnnotationKey represents the key of taints data (json serialized)
|
||||
// in the Annotations of a Node.
|
||||
TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
|
||||
|
||||
// SeccompPodAnnotationKey represents the key of a seccomp profile applied
|
||||
// to all containers of a pod.
|
||||
SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod"
|
||||
|
@ -274,82 +264,179 @@ const (
|
|||
// is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet
|
||||
// will fail to launch.
|
||||
UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls"
|
||||
|
||||
// ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache
|
||||
// an object (e.g. secret, config map) before fetching it again from apiserver.
|
||||
// This annotation can be attached to node.
|
||||
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
|
||||
|
||||
// AffinityAnnotationKey represents the key of affinity data (json serialized)
|
||||
// in the Annotations of a Pod.
|
||||
// TODO: remove when alpha support for affinity is removed
|
||||
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
|
||||
)
|
||||
|
||||
// GetAffinityFromPod gets the json serialized affinity data from Pod.Annotations
|
||||
// and converts it to the Affinity type in api.
|
||||
func GetAffinityFromPodAnnotations(annotations map[string]string) (*Affinity, error) {
|
||||
if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" {
|
||||
var affinity Affinity
|
||||
err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec.
|
||||
// Returns true if something was updated, false otherwise.
|
||||
func AddOrUpdateTolerationInPodSpec(spec *PodSpec, toleration *Toleration) (bool, error) {
|
||||
podTolerations := spec.Tolerations
|
||||
|
||||
var newTolerations []Toleration
|
||||
updated := false
|
||||
for i := range podTolerations {
|
||||
if toleration.MatchToleration(&podTolerations[i]) {
|
||||
if api.Semantic.DeepEqual(toleration, podTolerations[i]) {
|
||||
return false, nil
|
||||
}
|
||||
newTolerations = append(newTolerations, *toleration)
|
||||
updated = true
|
||||
continue
|
||||
}
|
||||
return &affinity, nil
|
||||
|
||||
newTolerations = append(newTolerations, podTolerations[i])
|
||||
}
|
||||
return nil, nil
|
||||
|
||||
if !updated {
|
||||
newTolerations = append(newTolerations, *toleration)
|
||||
}
|
||||
|
||||
spec.Tolerations = newTolerations
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations
|
||||
// and converts it to the []Toleration type in api.
|
||||
func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]Toleration, error) {
|
||||
var tolerations []Toleration
|
||||
if len(annotations) > 0 && annotations[TolerationsAnnotationKey] != "" {
|
||||
err := json.Unmarshal([]byte(annotations[TolerationsAnnotationKey]), &tolerations)
|
||||
if err != nil {
|
||||
return tolerations, err
|
||||
}
|
||||
}
|
||||
return tolerations, nil
|
||||
// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list.
|
||||
// Returns true if something was updated, false otherwise.
|
||||
func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) {
|
||||
return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration)
|
||||
}
|
||||
|
||||
// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations
|
||||
// and converts it to the []Taint type in api.
|
||||
func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]Taint, error) {
|
||||
var taints []Taint
|
||||
if len(annotations) > 0 && annotations[TaintsAnnotationKey] != "" {
|
||||
err := json.Unmarshal([]byte(annotations[TaintsAnnotationKey]), &taints)
|
||||
if err != nil {
|
||||
return []Taint{}, err
|
||||
}
|
||||
}
|
||||
return taints, nil
|
||||
// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>,
|
||||
// if the two tolerations have same <key,effect,operator,value> combination, regard as they match.
|
||||
// TODO: uniqueness check for tolerations in api validations.
|
||||
func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool {
|
||||
return t.Key == tolerationToMatch.Key &&
|
||||
t.Effect == tolerationToMatch.Effect &&
|
||||
t.Operator == tolerationToMatch.Operator &&
|
||||
t.Value == tolerationToMatch.Value
|
||||
}
|
||||
|
||||
// TolerationToleratesTaint checks if the toleration tolerates the taint.
|
||||
func TolerationToleratesTaint(toleration *Toleration, taint *Taint) bool {
|
||||
if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect {
|
||||
// ToleratesTaint checks if the toleration tolerates the taint.
|
||||
// The matching follows the rules below:
|
||||
// (1) Empty toleration.effect means to match all taint effects,
|
||||
// otherwise taint effect must equal to toleration.effect.
|
||||
// (2) If toleration.operator is 'Exists', it means to match all taint values.
|
||||
// (3) Empty toleration.key means to match all taint keys.
|
||||
// If toleration.key is empty, toleration.operator must be 'Exists';
|
||||
// this combination means to match all taint values and all taint keys.
|
||||
func (t *Toleration) ToleratesTaint(taint *Taint) bool {
|
||||
if len(t.Effect) > 0 && t.Effect != taint.Effect {
|
||||
return false
|
||||
}
|
||||
|
||||
if toleration.Key != taint.Key {
|
||||
if len(t.Key) > 0 && t.Key != taint.Key {
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO: Use proper defaulting when Toleration becomes a field of PodSpec
|
||||
if (len(toleration.Operator) == 0 || toleration.Operator == TolerationOpEqual) && toleration.Value == taint.Value {
|
||||
switch t.Operator {
|
||||
// empty operator means Equal
|
||||
case "", TolerationOpEqual:
|
||||
return t.Value == taint.Value
|
||||
case TolerationOpExists:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
if toleration.Operator == TolerationOpExists {
|
||||
return true
|
||||
}
|
||||
|
||||
// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations.
|
||||
func TolerationsTolerateTaint(tolerations []Toleration, taint *Taint) bool {
|
||||
for i := range tolerations {
|
||||
if tolerations[i].ToleratesTaint(taint) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations.
|
||||
func TaintToleratedByTolerations(taint *Taint, tolerations []Toleration) bool {
|
||||
tolerated := false
|
||||
for i := range tolerations {
|
||||
if TolerationToleratesTaint(&tolerations[i], taint) {
|
||||
tolerated = true
|
||||
break
|
||||
type taintsFilterFunc func(*Taint) bool
|
||||
|
||||
// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates
|
||||
// all the taints that apply to the filter in given taint list.
|
||||
func TolerationsTolerateTaintsWithFilter(tolerations []Toleration, taints []Taint, applyFilter taintsFilterFunc) bool {
|
||||
if len(taints) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for i := range taints {
|
||||
if applyFilter != nil && !applyFilter(&taints[i]) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !TolerationsTolerateTaint(tolerations, &taints[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return tolerated
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// DeleteTaintsByKey removes all the taints that have the same key to given taintKey
|
||||
func DeleteTaintsByKey(taints []Taint, taintKey string) ([]Taint, bool) {
|
||||
newTaints := []Taint{}
|
||||
deleted := false
|
||||
for i := range taints {
|
||||
if taintKey == taints[i].Key {
|
||||
deleted = true
|
||||
continue
|
||||
}
|
||||
newTaints = append(newTaints, taints[i])
|
||||
}
|
||||
return newTaints, deleted
|
||||
}
|
||||
|
||||
// DeleteTaint removes all the the taints that have the same key and effect to given taintToDelete.
|
||||
func DeleteTaint(taints []Taint, taintToDelete *Taint) ([]Taint, bool) {
|
||||
newTaints := []Taint{}
|
||||
deleted := false
|
||||
for i := range taints {
|
||||
if taintToDelete.MatchTaint(&taints[i]) {
|
||||
deleted = true
|
||||
continue
|
||||
}
|
||||
newTaints = append(newTaints, taints[i])
|
||||
}
|
||||
return newTaints, deleted
|
||||
}
|
||||
|
||||
// Returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise.
|
||||
func GetMatchingTolerations(taints []Taint, tolerations []Toleration) (bool, []Toleration) {
|
||||
if len(taints) == 0 {
|
||||
return true, []Toleration{}
|
||||
}
|
||||
if len(tolerations) == 0 && len(taints) > 0 {
|
||||
return false, []Toleration{}
|
||||
}
|
||||
result := []Toleration{}
|
||||
for i := range taints {
|
||||
tolerated := false
|
||||
for j := range tolerations {
|
||||
if tolerations[j].ToleratesTaint(&taints[i]) {
|
||||
result = append(result, tolerations[j])
|
||||
tolerated = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !tolerated {
|
||||
return false, []Toleration{}
|
||||
}
|
||||
}
|
||||
return true, result
|
||||
}
|
||||
|
||||
// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect,
|
||||
// if the two taints have same key:effect, regard as they match.
|
||||
func (t *Taint) MatchTaint(taintToMatch Taint) bool {
|
||||
func (t *Taint) MatchTaint(taintToMatch *Taint) bool {
|
||||
return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect
|
||||
}
|
||||
|
||||
|
@ -431,3 +518,121 @@ type NodeResources struct {
|
|||
// Capacity represents the available resources of a node
|
||||
Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
|
||||
}
|
||||
|
||||
// Tries to add a taint to annotations list. Returns a new copy of updated Node and true if something was updated
|
||||
// false otherwise.
|
||||
func AddOrUpdateTaint(node *Node, taint *Taint) (*Node, bool, error) {
|
||||
objCopy, err := api.Scheme.DeepCopy(node)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
newNode := objCopy.(*Node)
|
||||
nodeTaints := newNode.Spec.Taints
|
||||
|
||||
var newTaints []Taint
|
||||
updated := false
|
||||
for i := range nodeTaints {
|
||||
if taint.MatchTaint(&nodeTaints[i]) {
|
||||
if api.Semantic.DeepEqual(taint, nodeTaints[i]) {
|
||||
return newNode, false, nil
|
||||
}
|
||||
newTaints = append(newTaints, *taint)
|
||||
updated = true
|
||||
continue
|
||||
}
|
||||
|
||||
newTaints = append(newTaints, nodeTaints[i])
|
||||
}
|
||||
|
||||
if !updated {
|
||||
newTaints = append(newTaints, *taint)
|
||||
}
|
||||
|
||||
newNode.Spec.Taints = newTaints
|
||||
return newNode, true, nil
|
||||
}
|
||||
|
||||
func TaintExists(taints []Taint, taintToFind *Taint) bool {
|
||||
for _, taint := range taints {
|
||||
if taint.MatchTaint(taintToFind) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Tries to remove a taint from annotations list. Returns a new copy of updated Node and true if something was updated
|
||||
// false otherwise.
|
||||
func RemoveTaint(node *Node, taint *Taint) (*Node, bool, error) {
|
||||
objCopy, err := api.Scheme.DeepCopy(node)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
newNode := objCopy.(*Node)
|
||||
nodeTaints := newNode.Spec.Taints
|
||||
if len(nodeTaints) == 0 {
|
||||
return newNode, false, nil
|
||||
}
|
||||
|
||||
if !TaintExists(nodeTaints, taint) {
|
||||
return newNode, false, nil
|
||||
}
|
||||
|
||||
newTaints, _ := DeleteTaint(nodeTaints, taint)
|
||||
newNode.Spec.Taints = newTaints
|
||||
return newNode, true, nil
|
||||
}
|
||||
|
||||
// GetAffinityFromPodAnnotations gets the json serialized affinity data from Pod.Annotations
|
||||
// and converts it to the Affinity type in api.
|
||||
// TODO: remove when alpha support for affinity is removed
|
||||
func GetAffinityFromPodAnnotations(annotations map[string]string) (*Affinity, error) {
|
||||
if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" {
|
||||
var affinity Affinity
|
||||
err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &affinity, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetPersistentVolumeClass returns StorageClassName.
|
||||
func GetPersistentVolumeClass(volume *PersistentVolume) string {
|
||||
// Use beta annotation first
|
||||
if class, found := volume.Annotations[BetaStorageClassAnnotation]; found {
|
||||
return class
|
||||
}
|
||||
|
||||
return volume.Spec.StorageClassName
|
||||
}
|
||||
|
||||
// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was
|
||||
// requested, it returns "".
|
||||
func GetPersistentVolumeClaimClass(claim *PersistentVolumeClaim) string {
|
||||
// Use beta annotation first
|
||||
if class, found := claim.Annotations[BetaStorageClassAnnotation]; found {
|
||||
return class
|
||||
}
|
||||
|
||||
if claim.Spec.StorageClassName != nil {
|
||||
return *claim.Spec.StorageClassName
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field.
|
||||
func PersistentVolumeClaimHasClass(claim *PersistentVolumeClaim) bool {
|
||||
// Use beta annotation first
|
||||
if _, found := claim.Annotations[BetaStorageClassAnnotation]; found {
|
||||
return true
|
||||
}
|
||||
|
||||
if claim.Spec.StorageClassName != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
480
vendor/k8s.io/kubernetes/pkg/api/v1/helpers_test.go
generated
vendored
480
vendor/k8s.io/kubernetes/pkg/api/v1/helpers_test.go
generated
vendored
|
@ -1,480 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
func TestAddToNodeAddresses(t *testing.T) {
|
||||
testCases := []struct {
|
||||
existing []NodeAddress
|
||||
toAdd []NodeAddress
|
||||
expected []NodeAddress
|
||||
}{
|
||||
{
|
||||
existing: []NodeAddress{},
|
||||
toAdd: []NodeAddress{},
|
||||
expected: []NodeAddress{},
|
||||
},
|
||||
{
|
||||
existing: []NodeAddress{},
|
||||
toAdd: []NodeAddress{
|
||||
{Type: NodeExternalIP, Address: "1.1.1.1"},
|
||||
{Type: NodeHostName, Address: "localhost"},
|
||||
},
|
||||
expected: []NodeAddress{
|
||||
{Type: NodeExternalIP, Address: "1.1.1.1"},
|
||||
{Type: NodeHostName, Address: "localhost"},
|
||||
},
|
||||
},
|
||||
{
|
||||
existing: []NodeAddress{},
|
||||
toAdd: []NodeAddress{
|
||||
{Type: NodeExternalIP, Address: "1.1.1.1"},
|
||||
{Type: NodeExternalIP, Address: "1.1.1.1"},
|
||||
},
|
||||
expected: []NodeAddress{
|
||||
{Type: NodeExternalIP, Address: "1.1.1.1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
existing: []NodeAddress{
|
||||
{Type: NodeExternalIP, Address: "1.1.1.1"},
|
||||
{Type: NodeInternalIP, Address: "10.1.1.1"},
|
||||
},
|
||||
toAdd: []NodeAddress{
|
||||
{Type: NodeExternalIP, Address: "1.1.1.1"},
|
||||
{Type: NodeHostName, Address: "localhost"},
|
||||
},
|
||||
expected: []NodeAddress{
|
||||
{Type: NodeExternalIP, Address: "1.1.1.1"},
|
||||
{Type: NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: NodeHostName, Address: "localhost"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
AddToNodeAddresses(&tc.existing, tc.toAdd...)
|
||||
if !api.Semantic.DeepEqual(tc.expected, tc.existing) {
|
||||
t.Errorf("case[%d], expected: %v, got: %v", i, tc.expected, tc.existing)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAccessModesFromString(t *testing.T) {
|
||||
modes := GetAccessModesFromString("ROX")
|
||||
if !containsAccessMode(modes, ReadOnlyMany) {
|
||||
t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes)
|
||||
}
|
||||
|
||||
modes = GetAccessModesFromString("ROX,RWX")
|
||||
if !containsAccessMode(modes, ReadOnlyMany) {
|
||||
t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes)
|
||||
}
|
||||
if !containsAccessMode(modes, ReadWriteMany) {
|
||||
t.Errorf("Expected mode %s, but got %+v", ReadWriteMany, modes)
|
||||
}
|
||||
|
||||
modes = GetAccessModesFromString("RWO,ROX,RWX")
|
||||
if !containsAccessMode(modes, ReadOnlyMany) {
|
||||
t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes)
|
||||
}
|
||||
if !containsAccessMode(modes, ReadWriteMany) {
|
||||
t.Errorf("Expected mode %s, but got %+v", ReadWriteMany, modes)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveDuplicateAccessModes(t *testing.T) {
|
||||
modes := []PersistentVolumeAccessMode{
|
||||
ReadWriteOnce, ReadOnlyMany, ReadOnlyMany, ReadOnlyMany,
|
||||
}
|
||||
modes = removeDuplicateAccessModes(modes)
|
||||
if len(modes) != 2 {
|
||||
t.Errorf("Expected 2 distinct modes in set but found %v", len(modes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeSelectorRequirementsAsSelector(t *testing.T) {
|
||||
matchExpressions := []NodeSelectorRequirement{{
|
||||
Key: "foo",
|
||||
Operator: NodeSelectorOpIn,
|
||||
Values: []string{"bar", "baz"},
|
||||
}}
|
||||
mustParse := func(s string) labels.Selector {
|
||||
out, e := labels.Parse(s)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
return out
|
||||
}
|
||||
tc := []struct {
|
||||
in []NodeSelectorRequirement
|
||||
out labels.Selector
|
||||
expectErr bool
|
||||
}{
|
||||
{in: nil, out: labels.Nothing()},
|
||||
{in: []NodeSelectorRequirement{}, out: labels.Nothing()},
|
||||
{
|
||||
in: matchExpressions,
|
||||
out: mustParse("foo in (baz,bar)"),
|
||||
},
|
||||
{
|
||||
in: []NodeSelectorRequirement{{
|
||||
Key: "foo",
|
||||
Operator: NodeSelectorOpExists,
|
||||
Values: []string{"bar", "baz"},
|
||||
}},
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
in: []NodeSelectorRequirement{{
|
||||
Key: "foo",
|
||||
Operator: NodeSelectorOpGt,
|
||||
Values: []string{"1"},
|
||||
}},
|
||||
out: mustParse("foo>1"),
|
||||
},
|
||||
{
|
||||
in: []NodeSelectorRequirement{{
|
||||
Key: "bar",
|
||||
Operator: NodeSelectorOpLt,
|
||||
Values: []string{"7"},
|
||||
}},
|
||||
out: mustParse("bar<7"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tc {
|
||||
out, err := NodeSelectorRequirementsAsSelector(tc.in)
|
||||
if err == nil && tc.expectErr {
|
||||
t.Errorf("[%v]expected error but got none.", i)
|
||||
}
|
||||
if err != nil && !tc.expectErr {
|
||||
t.Errorf("[%v]did not expect error but got: %v", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(out, tc.out) {
|
||||
t.Errorf("[%v]expected:\n\t%+v\nbut got:\n\t%+v", i, tc.out, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAffinityFromPod(t *testing.T) {
|
||||
testCases := []struct {
|
||||
pod *Pod
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
pod: &Pod{},
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
pod: &Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
AffinityAnnotationKey: `
|
||||
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [{
|
||||
"matchExpressions": [{
|
||||
"key": "foo",
|
||||
"operator": "In",
|
||||
"values": ["value1", "value2"]
|
||||
}]
|
||||
}]
|
||||
}}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
pod: &Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
AffinityAnnotationKey: `
|
||||
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [{
|
||||
"matchExpressions": [{
|
||||
"key": "foo",
|
||||
`,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
_, err := GetAffinityFromPodAnnotations(tc.pod.Annotations)
|
||||
if err == nil && tc.expectErr {
|
||||
t.Errorf("[%v]expected error but got none.", i)
|
||||
}
|
||||
if err != nil && !tc.expectErr {
|
||||
t.Errorf("[%v]did not expect error but got: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTaintToString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
taint *Taint
|
||||
expectedString string
|
||||
}{
|
||||
{
|
||||
taint: &Taint{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
Effect: TaintEffectNoSchedule,
|
||||
},
|
||||
expectedString: "foo=bar:NoSchedule",
|
||||
},
|
||||
{
|
||||
taint: &Taint{
|
||||
Key: "foo",
|
||||
Effect: TaintEffectNoSchedule,
|
||||
},
|
||||
expectedString: "foo:NoSchedule",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
if tc.expectedString != tc.taint.ToString() {
|
||||
t.Errorf("[%v] expected taint %v converted to %s, got %s", i, tc.taint, tc.expectedString, tc.taint.ToString())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchTaint(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
taint *Taint
|
||||
taintToMatch Taint
|
||||
expectMatch bool
|
||||
}{
|
||||
{
|
||||
description: "two taints with the same key,value,effect should match",
|
||||
taint: &Taint{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
Effect: TaintEffectNoSchedule,
|
||||
},
|
||||
taintToMatch: Taint{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
Effect: TaintEffectNoSchedule,
|
||||
},
|
||||
expectMatch: true,
|
||||
},
|
||||
{
|
||||
description: "two taints with the same key,effect but different value should match",
|
||||
taint: &Taint{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
Effect: TaintEffectNoSchedule,
|
||||
},
|
||||
taintToMatch: Taint{
|
||||
Key: "foo",
|
||||
Value: "different-value",
|
||||
Effect: TaintEffectNoSchedule,
|
||||
},
|
||||
expectMatch: true,
|
||||
},
|
||||
{
|
||||
description: "two taints with the different key cannot match",
|
||||
taint: &Taint{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
Effect: TaintEffectNoSchedule,
|
||||
},
|
||||
taintToMatch: Taint{
|
||||
Key: "different-key",
|
||||
Value: "bar",
|
||||
Effect: TaintEffectNoSchedule,
|
||||
},
|
||||
expectMatch: false,
|
||||
},
|
||||
{
|
||||
description: "two taints with the different effect cannot match",
|
||||
taint: &Taint{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
Effect: TaintEffectNoSchedule,
|
||||
},
|
||||
taintToMatch: Taint{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
Effect: TaintEffectPreferNoSchedule,
|
||||
},
|
||||
expectMatch: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if tc.expectMatch != tc.taint.MatchTaint(tc.taintToMatch) {
|
||||
t.Errorf("[%s] expect taint %s match taint %s", tc.description, tc.taint.ToString(), tc.taintToMatch.ToString())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAvoidPodsFromNode(t *testing.T) {
|
||||
controllerFlag := true
|
||||
testCases := []struct {
|
||||
node *Node
|
||||
expectValue AvoidPods
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
node: &Node{},
|
||||
expectValue: AvoidPods{},
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
node: &Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
PreferAvoidPodsAnnotationKey: `
|
||||
{
|
||||
"preferAvoidPods": [
|
||||
{
|
||||
"podSignature": {
|
||||
"podController": {
|
||||
"apiVersion": "v1",
|
||||
"kind": "ReplicationController",
|
||||
"name": "foo",
|
||||
"uid": "abcdef123456",
|
||||
"controller": true
|
||||
}
|
||||
},
|
||||
"reason": "some reason",
|
||||
"message": "some message"
|
||||
}
|
||||
]
|
||||
}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectValue: AvoidPods{
|
||||
PreferAvoidPods: []PreferAvoidPodsEntry{
|
||||
{
|
||||
PodSignature: PodSignature{
|
||||
PodController: &metav1.OwnerReference{
|
||||
APIVersion: "v1",
|
||||
Kind: "ReplicationController",
|
||||
Name: "foo",
|
||||
UID: "abcdef123456",
|
||||
Controller: &controllerFlag,
|
||||
},
|
||||
},
|
||||
Reason: "some reason",
|
||||
Message: "some message",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
node: &Node{
|
||||
// Missing end symbol of "podController" and "podSignature"
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
PreferAvoidPodsAnnotationKey: `
|
||||
{
|
||||
"preferAvoidPods": [
|
||||
{
|
||||
"podSignature": {
|
||||
"podController": {
|
||||
"kind": "ReplicationController",
|
||||
"apiVersion": "v1"
|
||||
"reason": "some reason",
|
||||
"message": "some message"
|
||||
}
|
||||
]
|
||||
}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectValue: AvoidPods{},
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
v, err := GetAvoidPodsFromNodeAnnotations(tc.node.Annotations)
|
||||
if err == nil && tc.expectErr {
|
||||
t.Errorf("[%v]expected error but got none.", i)
|
||||
}
|
||||
if err != nil && !tc.expectErr {
|
||||
t.Errorf("[%v]did not expect error but got: %v", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(tc.expectValue, v) {
|
||||
t.Errorf("[%v]expect value %v but got %v with %v", i, tc.expectValue, v, v.PreferAvoidPods[0].PodSignature.PodController.Controller)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSysctlsFromPodAnnotation(t *testing.T) {
|
||||
type Test struct {
|
||||
annotation string
|
||||
expectValue []Sysctl
|
||||
expectErr bool
|
||||
}
|
||||
for i, test := range []Test{
|
||||
{
|
||||
annotation: "",
|
||||
expectValue: nil,
|
||||
},
|
||||
{
|
||||
annotation: "foo.bar",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
annotation: "=123",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
annotation: "foo.bar=",
|
||||
expectValue: []Sysctl{{Name: "foo.bar", Value: ""}},
|
||||
},
|
||||
{
|
||||
annotation: "foo.bar=42",
|
||||
expectValue: []Sysctl{{Name: "foo.bar", Value: "42"}},
|
||||
},
|
||||
{
|
||||
annotation: "foo.bar=42,",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
annotation: "foo.bar=42,abc.def=1",
|
||||
expectValue: []Sysctl{{Name: "foo.bar", Value: "42"}, {Name: "abc.def", Value: "1"}},
|
||||
},
|
||||
} {
|
||||
sysctls, err := SysctlsFromPodAnnotation(test.annotation)
|
||||
if test.expectErr && err == nil {
|
||||
t.Errorf("[%v]expected error but got none", i)
|
||||
} else if !test.expectErr && err != nil {
|
||||
t.Errorf("[%v]did not expect error but got: %v", i, err)
|
||||
} else if !reflect.DeepEqual(sysctls, test.expectValue) {
|
||||
t.Errorf("[%v]expect value %v but got %v", i, test.expectValue, sysctls)
|
||||
}
|
||||
}
|
||||
}
|
8
vendor/k8s.io/kubernetes/pkg/api/v1/meta.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/api/v1/meta.go
generated
vendored
|
@ -63,6 +63,10 @@ func (meta *ObjectMeta) GetOwnerReferences() []metav1.OwnerReference {
|
|||
value := *meta.OwnerReferences[i].Controller
|
||||
ret[i].Controller = &value
|
||||
}
|
||||
if meta.OwnerReferences[i].BlockOwnerDeletion != nil {
|
||||
value := *meta.OwnerReferences[i].BlockOwnerDeletion
|
||||
ret[i].BlockOwnerDeletion = &value
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
@ -78,6 +82,10 @@ func (meta *ObjectMeta) SetOwnerReferences(references []metav1.OwnerReference) {
|
|||
value := *references[i].Controller
|
||||
newReferences[i].Controller = &value
|
||||
}
|
||||
if references[i].BlockOwnerDeletion != nil {
|
||||
value := *references[i].BlockOwnerDeletion
|
||||
newReferences[i].BlockOwnerDeletion = &value
|
||||
}
|
||||
}
|
||||
meta.OwnerReferences = newReferences
|
||||
}
|
||||
|
|
43
vendor/k8s.io/kubernetes/pkg/api/v1/pod/BUILD
generated
vendored
43
vendor/k8s.io/kubernetes/pkg/api/v1/pod/BUILD
generated
vendored
|
@ -1,43 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["util.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["util_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
120
vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go
generated
vendored
120
vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go
generated
vendored
|
@ -1,120 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Hostname field.
|
||||
// The annotation value is a string specifying the hostname to be used for the pod e.g 'my-webserver-1'
|
||||
PodHostnameAnnotation = "pod.beta.kubernetes.io/hostname"
|
||||
|
||||
// TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Subdomain field.
|
||||
// The annotation value is a string specifying the subdomain e.g. "my-web-service"
|
||||
// If specified, on the pod itself, "<hostname>.my-web-service.<namespace>.svc.<cluster domain>" would resolve to
|
||||
// the pod's IP.
|
||||
// If there is a headless service named "my-web-service" in the same namespace as the pod, then,
|
||||
// <hostname>.my-web-service.<namespace>.svc.<cluster domain>" would be resolved by the cluster DNS Server.
|
||||
PodSubdomainAnnotation = "pod.beta.kubernetes.io/subdomain"
|
||||
)
|
||||
|
||||
// FindPort locates the container port for the given pod and portName. If the
|
||||
// targetPort is a number, use that. If the targetPort is a string, look that
|
||||
// string up in all named ports in all containers in the target pod. If no
|
||||
// match is found, fail.
|
||||
func FindPort(pod *v1.Pod, svcPort *v1.ServicePort) (int, error) {
|
||||
portName := svcPort.TargetPort
|
||||
switch portName.Type {
|
||||
case intstr.String:
|
||||
name := portName.StrVal
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, port := range container.Ports {
|
||||
if port.Name == name && port.Protocol == svcPort.Protocol {
|
||||
return int(port.ContainerPort), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
case intstr.Int:
|
||||
return portName.IntValue(), nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID)
|
||||
}
|
||||
|
||||
// TODO: remove this function when init containers becomes a stable feature
|
||||
func SetInitContainersAndStatuses(pod *v1.Pod) error {
|
||||
var initContainersAnnotation string
|
||||
initContainersAnnotation = pod.Annotations[v1.PodInitContainersAnnotationKey]
|
||||
initContainersAnnotation = pod.Annotations[v1.PodInitContainersBetaAnnotationKey]
|
||||
if len(initContainersAnnotation) > 0 {
|
||||
var values []v1.Container
|
||||
if err := json.Unmarshal([]byte(initContainersAnnotation), &values); err != nil {
|
||||
return err
|
||||
}
|
||||
pod.Spec.InitContainers = values
|
||||
}
|
||||
|
||||
var initContainerStatusesAnnotation string
|
||||
initContainerStatusesAnnotation = pod.Annotations[v1.PodInitContainerStatusesAnnotationKey]
|
||||
initContainerStatusesAnnotation = pod.Annotations[v1.PodInitContainerStatusesBetaAnnotationKey]
|
||||
if len(initContainerStatusesAnnotation) > 0 {
|
||||
var values []v1.ContainerStatus
|
||||
if err := json.Unmarshal([]byte(initContainerStatusesAnnotation), &values); err != nil {
|
||||
return err
|
||||
}
|
||||
pod.Status.InitContainerStatuses = values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: remove this function when init containers becomes a stable feature
|
||||
func SetInitContainersAnnotations(pod *v1.Pod) error {
|
||||
if len(pod.Spec.InitContainers) > 0 {
|
||||
value, err := json.Marshal(pod.Spec.InitContainers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pod.Annotations == nil {
|
||||
pod.Annotations = make(map[string]string)
|
||||
}
|
||||
pod.Annotations[v1.PodInitContainersAnnotationKey] = string(value)
|
||||
pod.Annotations[v1.PodInitContainersBetaAnnotationKey] = string(value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: remove this function when init containers becomes a stable feature
|
||||
func SetInitContainersStatusesAnnotations(pod *v1.Pod) error {
|
||||
if len(pod.Status.InitContainerStatuses) > 0 {
|
||||
value, err := json.Marshal(pod.Status.InitContainerStatuses)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pod.Annotations == nil {
|
||||
pod.Annotations = make(map[string]string)
|
||||
}
|
||||
pod.Annotations[v1.PodInitContainerStatusesAnnotationKey] = string(value)
|
||||
pod.Annotations[v1.PodInitContainerStatusesBetaAnnotationKey] = string(value)
|
||||
}
|
||||
return nil
|
||||
}
|
192
vendor/k8s.io/kubernetes/pkg/api/v1/pod/util_test.go
generated
vendored
192
vendor/k8s.io/kubernetes/pkg/api/v1/pod/util_test.go
generated
vendored
|
@ -1,192 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func TestFindPort(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
containers []v1.Container
|
||||
port intstr.IntOrString
|
||||
expected int
|
||||
pass bool
|
||||
}{{
|
||||
name: "valid int, no ports",
|
||||
containers: []v1.Container{{}},
|
||||
port: intstr.FromInt(93),
|
||||
expected: 93,
|
||||
pass: true,
|
||||
}, {
|
||||
name: "valid int, with ports",
|
||||
containers: []v1.Container{{Ports: []v1.ContainerPort{{
|
||||
Name: "",
|
||||
ContainerPort: 11,
|
||||
Protocol: "TCP",
|
||||
}, {
|
||||
Name: "p",
|
||||
ContainerPort: 22,
|
||||
Protocol: "TCP",
|
||||
}}}},
|
||||
port: intstr.FromInt(93),
|
||||
expected: 93,
|
||||
pass: true,
|
||||
}, {
|
||||
name: "valid str, no ports",
|
||||
containers: []v1.Container{{}},
|
||||
port: intstr.FromString("p"),
|
||||
expected: 0,
|
||||
pass: false,
|
||||
}, {
|
||||
name: "valid str, one ctr with ports",
|
||||
containers: []v1.Container{{Ports: []v1.ContainerPort{{
|
||||
Name: "",
|
||||
ContainerPort: 11,
|
||||
Protocol: "UDP",
|
||||
}, {
|
||||
Name: "p",
|
||||
ContainerPort: 22,
|
||||
Protocol: "TCP",
|
||||
}, {
|
||||
Name: "q",
|
||||
ContainerPort: 33,
|
||||
Protocol: "TCP",
|
||||
}}}},
|
||||
port: intstr.FromString("q"),
|
||||
expected: 33,
|
||||
pass: true,
|
||||
}, {
|
||||
name: "valid str, two ctr with ports",
|
||||
containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{
|
||||
Name: "",
|
||||
ContainerPort: 11,
|
||||
Protocol: "UDP",
|
||||
}, {
|
||||
Name: "p",
|
||||
ContainerPort: 22,
|
||||
Protocol: "TCP",
|
||||
}, {
|
||||
Name: "q",
|
||||
ContainerPort: 33,
|
||||
Protocol: "TCP",
|
||||
}}}},
|
||||
port: intstr.FromString("q"),
|
||||
expected: 33,
|
||||
pass: true,
|
||||
}, {
|
||||
name: "valid str, two ctr with same port",
|
||||
containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{
|
||||
Name: "",
|
||||
ContainerPort: 11,
|
||||
Protocol: "UDP",
|
||||
}, {
|
||||
Name: "p",
|
||||
ContainerPort: 22,
|
||||
Protocol: "TCP",
|
||||
}, {
|
||||
Name: "q",
|
||||
ContainerPort: 22,
|
||||
Protocol: "TCP",
|
||||
}}}},
|
||||
port: intstr.FromString("q"),
|
||||
expected: 22,
|
||||
pass: true,
|
||||
}, {
|
||||
name: "valid str, invalid protocol",
|
||||
containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{
|
||||
Name: "a",
|
||||
ContainerPort: 11,
|
||||
Protocol: "snmp",
|
||||
},
|
||||
}}},
|
||||
port: intstr.FromString("a"),
|
||||
expected: 0,
|
||||
pass: false,
|
||||
}, {
|
||||
name: "valid hostPort",
|
||||
containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{
|
||||
Name: "a",
|
||||
ContainerPort: 11,
|
||||
HostPort: 81,
|
||||
Protocol: "TCP",
|
||||
},
|
||||
}}},
|
||||
port: intstr.FromString("a"),
|
||||
expected: 11,
|
||||
pass: true,
|
||||
},
|
||||
{
|
||||
name: "invalid hostPort",
|
||||
containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{
|
||||
Name: "a",
|
||||
ContainerPort: 11,
|
||||
HostPort: -1,
|
||||
Protocol: "TCP",
|
||||
},
|
||||
}}},
|
||||
port: intstr.FromString("a"),
|
||||
expected: 11,
|
||||
pass: true,
|
||||
//this should fail but passes.
|
||||
},
|
||||
{
|
||||
name: "invalid ContainerPort",
|
||||
containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{
|
||||
Name: "a",
|
||||
ContainerPort: -1,
|
||||
Protocol: "TCP",
|
||||
},
|
||||
}}},
|
||||
port: intstr.FromString("a"),
|
||||
expected: -1,
|
||||
pass: true,
|
||||
//this should fail but passes
|
||||
},
|
||||
{
|
||||
name: "HostIP Address",
|
||||
containers: []v1.Container{{}, {Ports: []v1.ContainerPort{{
|
||||
Name: "a",
|
||||
ContainerPort: 11,
|
||||
HostIP: "192.168.1.1",
|
||||
Protocol: "TCP",
|
||||
},
|
||||
}}},
|
||||
port: intstr.FromString("a"),
|
||||
expected: 11,
|
||||
pass: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
port, err := FindPort(&v1.Pod{Spec: v1.PodSpec{Containers: tc.containers}},
|
||||
&v1.ServicePort{Protocol: "TCP", TargetPort: tc.port})
|
||||
if err != nil && tc.pass {
|
||||
t.Errorf("unexpected error for %s: %v", tc.name, err)
|
||||
}
|
||||
if err == nil && !tc.pass {
|
||||
t.Errorf("unexpected non-error for %s: %d", tc.name, port)
|
||||
}
|
||||
if port != tc.expected {
|
||||
t.Errorf("wrong result for %s: expected %d, got %d", tc.name, tc.expected, port)
|
||||
}
|
||||
}
|
||||
}
|
9
vendor/k8s.io/kubernetes/pkg/api/v1/ref.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/api/v1/ref.go
generated
vendored
|
@ -23,7 +23,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -39,7 +38,7 @@ var (
|
|||
// object, or an error if the object doesn't follow the conventions
|
||||
// that would allow this.
|
||||
// TODO: should take a meta.Interface see http://issue.k8s.io/7127
|
||||
func GetReference(obj runtime.Object) (*ObjectReference, error) {
|
||||
func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, error) {
|
||||
if obj == nil {
|
||||
return nil, ErrNilObject
|
||||
}
|
||||
|
@ -55,7 +54,7 @@ func GetReference(obj runtime.Object) (*ObjectReference, error) {
|
|||
kind := gvk.Kind
|
||||
if len(kind) == 0 {
|
||||
// TODO: this is wrong
|
||||
gvks, _, err := api.Scheme.ObjectKinds(obj)
|
||||
gvks, _, err := scheme.ObjectKinds(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -113,8 +112,8 @@ func GetReference(obj runtime.Object) (*ObjectReference, error) {
|
|||
}
|
||||
|
||||
// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath.
|
||||
func GetPartialReference(obj runtime.Object, fieldPath string) (*ObjectReference, error) {
|
||||
ref, err := GetReference(obj)
|
||||
func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*ObjectReference, error) {
|
||||
ref, err := GetReference(scheme, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
30
vendor/k8s.io/kubernetes/pkg/api/v1/resource_helpers.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/api/v1/resource_helpers.go
generated
vendored
|
@ -19,8 +19,8 @@ package v1
|
|||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
)
|
||||
|
||||
// Returns string version of ResourceName.
|
||||
|
@ -227,3 +227,31 @@ func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, li
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
// finds and returns the request for a specific resource.
|
||||
func GetResourceRequest(pod *Pod, resource ResourceName) int64 {
|
||||
if resource == ResourcePods {
|
||||
return 1
|
||||
}
|
||||
totalResources := int64(0)
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if rQuantity, ok := container.Resources.Requests[resource]; ok {
|
||||
if resource == ResourceCPU {
|
||||
totalResources += rQuantity.MilliValue()
|
||||
} else {
|
||||
totalResources += rQuantity.Value()
|
||||
}
|
||||
}
|
||||
}
|
||||
// take max_resource(sum_pod, any_init_container)
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if rQuantity, ok := container.Resources.Requests[resource]; ok {
|
||||
if resource == ResourceCPU && rQuantity.MilliValue() > totalResources {
|
||||
totalResources = rQuantity.MilliValue()
|
||||
} else if rQuantity.Value() > totalResources {
|
||||
totalResources = rQuantity.Value()
|
||||
}
|
||||
}
|
||||
}
|
||||
return totalResources
|
||||
}
|
||||
|
|
120
vendor/k8s.io/kubernetes/pkg/api/v1/resource_helpers_test.go
generated
vendored
120
vendor/k8s.io/kubernetes/pkg/api/v1/resource_helpers_test.go
generated
vendored
|
@ -1,120 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
)
|
||||
|
||||
func TestResourceHelpers(t *testing.T) {
|
||||
cpuLimit := resource.MustParse("10")
|
||||
memoryLimit := resource.MustParse("10G")
|
||||
resourceSpec := ResourceRequirements{
|
||||
Limits: ResourceList{
|
||||
"cpu": cpuLimit,
|
||||
"memory": memoryLimit,
|
||||
"kube.io/storage": memoryLimit,
|
||||
},
|
||||
}
|
||||
if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 {
|
||||
t.Errorf("expected cpulimit %v, got %v", cpuLimit, res)
|
||||
}
|
||||
if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 {
|
||||
t.Errorf("expected memorylimit %v, got %v", memoryLimit, res)
|
||||
}
|
||||
resourceSpec = ResourceRequirements{
|
||||
Limits: ResourceList{
|
||||
"memory": memoryLimit,
|
||||
"kube.io/storage": memoryLimit,
|
||||
},
|
||||
}
|
||||
if res := resourceSpec.Limits.Cpu(); res.Value() != 0 {
|
||||
t.Errorf("expected cpulimit %v, got %v", 0, res)
|
||||
}
|
||||
if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 {
|
||||
t.Errorf("expected memorylimit %v, got %v", memoryLimit, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultResourceHelpers(t *testing.T) {
|
||||
resourceList := ResourceList{}
|
||||
if resourceList.Cpu().Format != resource.DecimalSI {
|
||||
t.Errorf("expected %v, actual %v", resource.DecimalSI, resourceList.Cpu().Format)
|
||||
}
|
||||
if resourceList.Memory().Format != resource.BinarySI {
|
||||
t.Errorf("expected %v, actual %v", resource.BinarySI, resourceList.Memory().Format)
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(now metav1.Time, ready bool, beforeSec int) *Pod {
|
||||
conditionStatus := ConditionFalse
|
||||
if ready {
|
||||
conditionStatus = ConditionTrue
|
||||
}
|
||||
return &Pod{
|
||||
Status: PodStatus{
|
||||
Conditions: []PodCondition{
|
||||
{
|
||||
Type: PodReady,
|
||||
LastTransitionTime: metav1.NewTime(now.Time.Add(-1 * time.Duration(beforeSec) * time.Second)),
|
||||
Status: conditionStatus,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPodAvailable(t *testing.T) {
|
||||
now := metav1.Now()
|
||||
tests := []struct {
|
||||
pod *Pod
|
||||
minReadySeconds int32
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
pod: newPod(now, false, 0),
|
||||
minReadySeconds: 0,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
pod: newPod(now, true, 0),
|
||||
minReadySeconds: 1,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
pod: newPod(now, true, 0),
|
||||
minReadySeconds: 0,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
pod: newPod(now, true, 51),
|
||||
minReadySeconds: 50,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
isAvailable := IsPodAvailable(test.pod, test.minReadySeconds, now)
|
||||
if isAvailable != test.expected {
|
||||
t.Errorf("[tc #%d] expected available pod: %t, got: %t", i, test.expected, isAvailable)
|
||||
}
|
||||
}
|
||||
}
|
47
vendor/k8s.io/kubernetes/pkg/api/v1/service/BUILD
generated
vendored
47
vendor/k8s.io/kubernetes/pkg/api/v1/service/BUILD
generated
vendored
|
@ -1,47 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"annotations.go",
|
||||
"util.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/util/net/sets:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["util_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/util/net/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
111
vendor/k8s.io/kubernetes/pkg/api/v1/service/annotations.go
generated
vendored
111
vendor/k8s.io/kubernetes/pkg/api/v1/service/annotations.go
generated
vendored
|
@ -1,111 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers
|
||||
//
|
||||
// It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to
|
||||
// allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow
|
||||
// access only from the CIDRs currently allocated to MIT & the USPS.
|
||||
//
|
||||
// Not all cloud providers support this annotation, though AWS & GCE do.
|
||||
AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges"
|
||||
|
||||
// AnnotationValueExternalTrafficLocal Value of annotation to specify local endpoints behaviour
|
||||
AnnotationValueExternalTrafficLocal = "OnlyLocal"
|
||||
// AnnotationValueExternalTrafficGlobal Value of annotation to specify global (legacy) behaviour
|
||||
AnnotationValueExternalTrafficGlobal = "Global"
|
||||
|
||||
// TODO: The alpha annotations have been deprecated, remove them when we move this feature to GA.
|
||||
|
||||
// AlphaAnnotationHealthCheckNodePort Annotation specifying the healthcheck nodePort for the service
|
||||
// If not specified, annotation is created by the service api backend with the allocated nodePort
|
||||
// Will use user-specified nodePort value if specified by the client
|
||||
AlphaAnnotationHealthCheckNodePort = "service.alpha.kubernetes.io/healthcheck-nodeport"
|
||||
|
||||
// AlphaAnnotationExternalTraffic An annotation that denotes if this Service desires to route external traffic to local
|
||||
// endpoints only. This preserves Source IP and avoids a second hop.
|
||||
AlphaAnnotationExternalTraffic = "service.alpha.kubernetes.io/external-traffic"
|
||||
|
||||
// BetaAnnotationHealthCheckNodePort is the beta version of AlphaAnnotationHealthCheckNodePort.
|
||||
BetaAnnotationHealthCheckNodePort = "service.beta.kubernetes.io/healthcheck-nodeport"
|
||||
|
||||
// BetaAnnotationExternalTraffic is the beta version of AlphaAnnotationExternalTraffic.
|
||||
BetaAnnotationExternalTraffic = "service.beta.kubernetes.io/external-traffic"
|
||||
)
|
||||
|
||||
// NeedsHealthCheck Check service for health check annotations
|
||||
func NeedsHealthCheck(service *v1.Service) bool {
|
||||
// First check the alpha annotation and then the beta. This is so existing
|
||||
// Services continue to work till the user decides to transition to beta.
|
||||
// If they transition to beta, there's no way to go back to alpha without
|
||||
// rolling back the cluster.
|
||||
for _, annotation := range []string{AlphaAnnotationExternalTraffic, BetaAnnotationExternalTraffic} {
|
||||
if l, ok := service.Annotations[annotation]; ok {
|
||||
if l == AnnotationValueExternalTrafficLocal {
|
||||
return true
|
||||
} else if l == AnnotationValueExternalTrafficGlobal {
|
||||
return false
|
||||
} else {
|
||||
glog.Errorf("Invalid value for annotation %v: %v", annotation, l)
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetServiceHealthCheckNodePort Return health check node port annotation for service, if one exists
|
||||
func GetServiceHealthCheckNodePort(service *v1.Service) int32 {
|
||||
if !NeedsHealthCheck(service) {
|
||||
return 0
|
||||
}
|
||||
// First check the alpha annotation and then the beta. This is so existing
|
||||
// Services continue to work till the user decides to transition to beta.
|
||||
// If they transition to beta, there's no way to go back to alpha without
|
||||
// rolling back the cluster.
|
||||
for _, annotation := range []string{AlphaAnnotationHealthCheckNodePort, BetaAnnotationHealthCheckNodePort} {
|
||||
if l, ok := service.Annotations[annotation]; ok {
|
||||
p, err := strconv.Atoi(l)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse annotation %v: %v", annotation, err)
|
||||
continue
|
||||
}
|
||||
return int32(p)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetServiceHealthCheckPathPort Return the path and nodePort programmed into the Cloud LB Health Check
|
||||
func GetServiceHealthCheckPathPort(service *v1.Service) (string, int32) {
|
||||
if !NeedsHealthCheck(service) {
|
||||
return "", 0
|
||||
}
|
||||
port := GetServiceHealthCheckNodePort(service)
|
||||
if port == 0 {
|
||||
return "", 0
|
||||
}
|
||||
return "/healthz", port
|
||||
}
|
68
vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go
generated
vendored
68
vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go
generated
vendored
|
@ -1,68 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
netsets "k8s.io/kubernetes/pkg/util/net/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLoadBalancerSourceRanges = "0.0.0.0/0"
|
||||
)
|
||||
|
||||
// IsAllowAll checks whether the netsets.IPNet allows traffic from 0.0.0.0/0
|
||||
func IsAllowAll(ipnets netsets.IPNet) bool {
|
||||
for _, s := range ipnets.StringSlice() {
|
||||
if s == "0.0.0.0/0" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service.
|
||||
// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service,
|
||||
// extracting the source ranges to allow, and if not present returns a default (allow-all) value.
|
||||
func GetLoadBalancerSourceRanges(service *v1.Service) (netsets.IPNet, error) {
|
||||
var ipnets netsets.IPNet
|
||||
var err error
|
||||
// if SourceRange field is specified, ignore sourceRange annotation
|
||||
if len(service.Spec.LoadBalancerSourceRanges) > 0 {
|
||||
specs := service.Spec.LoadBalancerSourceRanges
|
||||
ipnets, err = netsets.ParseIPNets(specs...)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err)
|
||||
}
|
||||
} else {
|
||||
val := service.Annotations[AnnotationLoadBalancerSourceRangesKey]
|
||||
val = strings.TrimSpace(val)
|
||||
if val == "" {
|
||||
val = defaultLoadBalancerSourceRanges
|
||||
}
|
||||
specs := strings.Split(val, ",")
|
||||
ipnets, err = netsets.ParseIPNets(specs...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", AnnotationLoadBalancerSourceRangesKey, val)
|
||||
}
|
||||
}
|
||||
return ipnets, nil
|
||||
}
|
131
vendor/k8s.io/kubernetes/pkg/api/v1/service/util_test.go
generated
vendored
131
vendor/k8s.io/kubernetes/pkg/api/v1/service/util_test.go
generated
vendored
|
@ -1,131 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
netsets "k8s.io/kubernetes/pkg/util/net/sets"
|
||||
)
|
||||
|
||||
func TestGetLoadBalancerSourceRanges(t *testing.T) {
|
||||
checkError := func(v string) {
|
||||
annotations := make(map[string]string)
|
||||
annotations[AnnotationLoadBalancerSourceRangesKey] = v
|
||||
svc := v1.Service{}
|
||||
svc.Annotations = annotations
|
||||
_, err := GetLoadBalancerSourceRanges(&svc)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error parsing: %q", v)
|
||||
}
|
||||
svc = v1.Service{}
|
||||
svc.Spec.LoadBalancerSourceRanges = strings.Split(v, ",")
|
||||
_, err = GetLoadBalancerSourceRanges(&svc)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error parsing: %q", v)
|
||||
}
|
||||
}
|
||||
checkError("10.0.0.1/33")
|
||||
checkError("foo.bar")
|
||||
checkError("10.0.0.1/32,*")
|
||||
checkError("10.0.0.1/32,")
|
||||
checkError("10.0.0.1/32, ")
|
||||
checkError("10.0.0.1")
|
||||
|
||||
checkOK := func(v string) netsets.IPNet {
|
||||
annotations := make(map[string]string)
|
||||
annotations[AnnotationLoadBalancerSourceRangesKey] = v
|
||||
svc := v1.Service{}
|
||||
svc.Annotations = annotations
|
||||
cidrs, err := GetLoadBalancerSourceRanges(&svc)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error parsing: %q", v)
|
||||
}
|
||||
svc = v1.Service{}
|
||||
svc.Spec.LoadBalancerSourceRanges = strings.Split(v, ",")
|
||||
cidrs, err = GetLoadBalancerSourceRanges(&svc)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error parsing: %q", v)
|
||||
}
|
||||
return cidrs
|
||||
}
|
||||
cidrs := checkOK("192.168.0.1/32")
|
||||
if len(cidrs) != 1 {
|
||||
t.Errorf("Expected exactly one CIDR: %v", cidrs.StringSlice())
|
||||
}
|
||||
cidrs = checkOK("192.168.0.1/32,192.168.0.1/32")
|
||||
if len(cidrs) != 1 {
|
||||
t.Errorf("Expected exactly one CIDR (after de-dup): %v", cidrs.StringSlice())
|
||||
}
|
||||
cidrs = checkOK("192.168.0.1/32,192.168.0.2/32")
|
||||
if len(cidrs) != 2 {
|
||||
t.Errorf("Expected two CIDRs: %v", cidrs.StringSlice())
|
||||
}
|
||||
cidrs = checkOK(" 192.168.0.1/32 , 192.168.0.2/32 ")
|
||||
if len(cidrs) != 2 {
|
||||
t.Errorf("Expected two CIDRs: %v", cidrs.StringSlice())
|
||||
}
|
||||
// check LoadBalancerSourceRanges not specified
|
||||
svc := v1.Service{}
|
||||
cidrs, err := GetLoadBalancerSourceRanges(&svc)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if len(cidrs) != 1 {
|
||||
t.Errorf("Expected exactly one CIDR: %v", cidrs.StringSlice())
|
||||
}
|
||||
if !IsAllowAll(cidrs) {
|
||||
t.Errorf("Expected default to be allow-all: %v", cidrs.StringSlice())
|
||||
}
|
||||
// check SourceRanges annotation is empty
|
||||
annotations := make(map[string]string)
|
||||
annotations[AnnotationLoadBalancerSourceRangesKey] = ""
|
||||
svc = v1.Service{}
|
||||
svc.Annotations = annotations
|
||||
cidrs, err = GetLoadBalancerSourceRanges(&svc)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if len(cidrs) != 1 {
|
||||
t.Errorf("Expected exactly one CIDR: %v", cidrs.StringSlice())
|
||||
}
|
||||
if !IsAllowAll(cidrs) {
|
||||
t.Errorf("Expected default to be allow-all: %v", cidrs.StringSlice())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllowAll(t *testing.T) {
|
||||
checkAllowAll := func(allowAll bool, cidrs ...string) {
|
||||
ipnets, err := netsets.ParseIPNets(cidrs...)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error parsing cidrs: %v", cidrs)
|
||||
}
|
||||
if allowAll != IsAllowAll(ipnets) {
|
||||
t.Errorf("IsAllowAll did not return expected value for %v", cidrs)
|
||||
}
|
||||
}
|
||||
checkAllowAll(false, "10.0.0.1/32")
|
||||
checkAllowAll(false, "10.0.0.1/32", "10.0.0.2/32")
|
||||
checkAllowAll(false, "10.0.0.1/32", "10.0.0.1/32")
|
||||
|
||||
checkAllowAll(true, "0.0.0.0/0")
|
||||
checkAllowAll(true, "192.168.0.0/0")
|
||||
checkAllowAll(true, "192.168.0.1/32", "0.0.0.0/0")
|
||||
}
|
7147
vendor/k8s.io/kubernetes/pkg/api/v1/types.generated.go
generated
vendored
7147
vendor/k8s.io/kubernetes/pkg/api/v1/types.generated.go
generated
vendored
File diff suppressed because it is too large
Load diff
284
vendor/k8s.io/kubernetes/pkg/api/v1/types.go
generated
vendored
284
vendor/k8s.io/kubernetes/pkg/api/v1/types.go
generated
vendored
|
@ -17,11 +17,11 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
// The comments for the structs and fields can be used from go-restful to
|
||||
|
@ -326,6 +326,14 @@ type VolumeSource struct {
|
|||
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
|
||||
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
|
||||
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
|
||||
// Items for all in one resources secrets, configmaps, and downward API
|
||||
Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"`
|
||||
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
|
||||
// +optional
|
||||
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"`
|
||||
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
|
||||
// +optional
|
||||
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"`
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
|
||||
|
@ -411,8 +419,25 @@ type PersistentVolumeSource struct {
|
|||
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
|
||||
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
|
||||
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
|
||||
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
|
||||
// +optional
|
||||
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
|
||||
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
|
||||
// +optional
|
||||
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
|
||||
}
|
||||
|
||||
const (
|
||||
// AlphaStorageClassAnnotation represents the previous alpha storage class
|
||||
// annotation. It's currently still used and will be held for backwards
|
||||
// compatibility
|
||||
AlphaStorageClassAnnotation = "volume.alpha.kubernetes.io/storage-class"
|
||||
|
||||
// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation.
|
||||
// It's currently still used and will be held for backwards compatibility
|
||||
BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class"
|
||||
)
|
||||
|
||||
// +genclient=true
|
||||
// +nonNamespaced=true
|
||||
|
||||
|
@ -464,6 +489,10 @@ type PersistentVolumeSpec struct {
|
|||
// More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy
|
||||
// +optional
|
||||
PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"`
|
||||
// Name of StorageClass to which this persistent volume belongs. Empty value
|
||||
// means that this volume does not belong to any StorageClass.
|
||||
// +optional
|
||||
StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"`
|
||||
}
|
||||
|
||||
// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes.
|
||||
|
@ -559,6 +588,10 @@ type PersistentVolumeClaimSpec struct {
|
|||
// VolumeName is the binding reference to the PersistentVolume backing this claim.
|
||||
// +optional
|
||||
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"`
|
||||
// Name of the StorageClass required by the claim.
|
||||
// More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1
|
||||
// +optional
|
||||
StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
|
||||
|
@ -944,6 +977,28 @@ const (
|
|||
SecretVolumeSourceDefaultMode int32 = 0644
|
||||
)
|
||||
|
||||
// Adapts a secret into a projected volume.
|
||||
//
|
||||
// The contents of the target Secret's Data field will be presented in a
|
||||
// projected volume as files using the keys in the Data field as the file names.
|
||||
// Note that this is identical to a secret volume source without the default
|
||||
// mode.
|
||||
type SecretProjection struct {
|
||||
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
|
||||
// If unspecified, each key-value pair in the Data field of the referenced
|
||||
// Secret will be projected into the volume as a file whose name is the
|
||||
// key and content is the value. If specified, the listed keys will be
|
||||
// projected into the specified paths, and unlisted keys will not be
|
||||
// present. If a key is specified which is not present in the Secret,
|
||||
// the volume setup will error unless it is marked optional. Paths must be
|
||||
// relative and may not contain the '..' path or start with '..'.
|
||||
// +optional
|
||||
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
|
||||
// Specify whether the Secret or its key must be defined
|
||||
// +optional
|
||||
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
|
||||
}
|
||||
|
||||
// Represents an NFS mount that lasts the lifetime of a pod.
|
||||
// NFS volumes do not support ownership management or SELinux relabeling.
|
||||
type NFSVolumeSource struct {
|
||||
|
@ -988,6 +1043,10 @@ type ISCSIVolumeSource struct {
|
|||
// Defaults to false.
|
||||
// +optional
|
||||
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
|
||||
// iSCSI target portal List. The portal is either an IP or ip_addr:port if the port
|
||||
// is other than default (typically TCP ports 860 and 3260).
|
||||
// +optional
|
||||
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
|
||||
}
|
||||
|
||||
// Represents a Fibre Channel volume.
|
||||
|
@ -1071,6 +1130,55 @@ type AzureDiskVolumeSource struct {
|
|||
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
|
||||
}
|
||||
|
||||
// PortworxVolumeSource represents a Portworx volume resource.
|
||||
type PortworxVolumeSource struct {
|
||||
// VolumeID uniquely identifies a Portworx volume
|
||||
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
|
||||
// FSType represents the filesystem type to mount
|
||||
// Must be a filesystem type supported by the host operating system.
|
||||
// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
|
||||
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
|
||||
// Defaults to false (read/write). ReadOnly here will force
|
||||
// the ReadOnly setting in VolumeMounts.
|
||||
// +optional
|
||||
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
|
||||
}
|
||||
|
||||
// ScaleIOVolumeSource represents a persistent ScaleIO volume
|
||||
type ScaleIOVolumeSource struct {
|
||||
// The host address of the ScaleIO API Gateway.
|
||||
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
|
||||
// The name of the storage system as configured in ScaleIO.
|
||||
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
|
||||
// SecretRef references to the secret for ScaleIO user and other
|
||||
// sensitive information. If this is not provided, Login operation will fail.
|
||||
SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
|
||||
// Flag to enable/disable SSL communication with Gateway, default false
|
||||
// +optional
|
||||
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
|
||||
// The name of the Protection Domain for the configured storage (defaults to "default").
|
||||
// +optional
|
||||
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
|
||||
// The Storage Pool associated with the protection domain (defaults to "default").
|
||||
// +optional
|
||||
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
|
||||
// Indicates whether the storage for a volume should be thick or thin (defaults to "thin").
|
||||
// +optional
|
||||
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
|
||||
// The name of a volume already created in the ScaleIO system
|
||||
// that is associated with this volume source.
|
||||
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
|
||||
// Filesystem type to mount.
|
||||
// Must be a filesystem type supported by the host operating system.
|
||||
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
|
||||
// +optional
|
||||
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
|
||||
// Defaults to false (read/write). ReadOnly here will force
|
||||
// the ReadOnly setting in VolumeMounts.
|
||||
// +optional
|
||||
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
|
||||
}
|
||||
|
||||
// Adapts a ConfigMap into a volume.
|
||||
//
|
||||
// The contents of the target ConfigMap's Data field will be presented in a
|
||||
|
@ -1104,6 +1212,58 @@ const (
|
|||
ConfigMapVolumeSourceDefaultMode int32 = 0644
|
||||
)
|
||||
|
||||
// Adapts a ConfigMap into a projected volume.
|
||||
//
|
||||
// The contents of the target ConfigMap's Data field will be presented in a
|
||||
// projected volume as files using the keys in the Data field as the file names,
|
||||
// unless the items element is populated with specific mappings of keys to paths.
|
||||
// Note that this is identical to a configmap volume source without the default
|
||||
// mode.
|
||||
type ConfigMapProjection struct {
|
||||
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
|
||||
// If unspecified, each key-value pair in the Data field of the referenced
|
||||
// ConfigMap will be projected into the volume as a file whose name is the
|
||||
// key and content is the value. If specified, the listed keys will be
|
||||
// projected into the specified paths, and unlisted keys will not be
|
||||
// present. If a key is specified which is not present in the ConfigMap,
|
||||
// the volume setup will error unless it is marked optional. Paths must be
|
||||
// relative and may not contain the '..' path or start with '..'.
|
||||
// +optional
|
||||
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
|
||||
// Specify whether the ConfigMap or it's keys must be defined
|
||||
// +optional
|
||||
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
|
||||
}
|
||||
|
||||
// Represents a projected volume source
|
||||
type ProjectedVolumeSource struct {
|
||||
// list of volume projections
|
||||
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
|
||||
// Mode bits to use on created files by default. Must be a value between
|
||||
// 0 and 0777.
|
||||
// Directories within the path are not affected by this setting.
|
||||
// This might be in conflict with other options that affect the file
|
||||
// mode, like fsGroup, and the result can be other mode bits set.
|
||||
// +optional
|
||||
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
|
||||
}
|
||||
|
||||
// Projection that may be projected along with other supported volume types
|
||||
type VolumeProjection struct {
|
||||
// all types below are the supported types for projection into the same volume
|
||||
|
||||
// information about the secret data to project
|
||||
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
|
||||
// information about the downwardAPI data to project
|
||||
DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"`
|
||||
// information about the configMap data to project
|
||||
ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"`
|
||||
}
|
||||
|
||||
const (
|
||||
ProjectedVolumeSourceDefaultMode int32 = 0644
|
||||
)
|
||||
|
||||
// Maps a string key to a path within a volume.
|
||||
type KeyToPath struct {
|
||||
// The key to project.
|
||||
|
@ -1476,8 +1636,8 @@ type Container struct {
|
|||
// +optional
|
||||
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
|
||||
// List of sources to populate environment variables in the container.
|
||||
// The keys defined within a source must be a C_IDENTIFIER. An invalid key
|
||||
// will prevent the container from starting. When a key exists in multiple
|
||||
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
// will be reported as an event when the container is starting. When a key exists in multiple
|
||||
// sources, the value associated with the last source will take precedence.
|
||||
// Values defined by an Env with a duplicate key will take precedence.
|
||||
// Cannot be updated.
|
||||
|
@ -1780,9 +1940,14 @@ const (
|
|||
type DNSPolicy string
|
||||
|
||||
const (
|
||||
// DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS
|
||||
// first, if it is available, then fall back on the default
|
||||
// (as determined by kubelet) DNS settings.
|
||||
DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet"
|
||||
|
||||
// DNSClusterFirst indicates that the pod should use cluster DNS
|
||||
// first, if it is available, then fall back on the default (as
|
||||
// determined by kubelet) DNS settings.
|
||||
// first unless hostNetwork is true, if it is available, then
|
||||
// fall back on the default (as determined by kubelet) DNS settings.
|
||||
DNSClusterFirst DNSPolicy = "ClusterFirst"
|
||||
|
||||
// DNSDefault indicates that the pod should use the default (as
|
||||
|
@ -1937,10 +2102,8 @@ type PodAffinityTerm struct {
|
|||
// +optional
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
|
||||
// namespaces specifies which namespaces the labelSelector applies to (matches against);
|
||||
// nil list means "this pod's namespace," empty list means "all namespaces"
|
||||
// The json tag here is not "omitempty" since we need to distinguish nil and empty.
|
||||
// See https://golang.org/pkg/encoding/json/#Marshal for more details.
|
||||
Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"`
|
||||
// null or empty list means "this pod's namespace"
|
||||
Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"`
|
||||
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
|
||||
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
|
||||
// whose value of the label with key topologyKey matches that of any node on which any of the
|
||||
|
@ -2002,8 +2165,12 @@ type Taint struct {
|
|||
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
|
||||
// Required. The effect of the taint on pods
|
||||
// that do not tolerate the taint.
|
||||
// Valid effects are NoSchedule and PreferNoSchedule.
|
||||
// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
|
||||
Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
|
||||
// TimeAdded represents the time at which the taint was added.
|
||||
// It is only written for NoExecute taints.
|
||||
// +optional
|
||||
TimeAdded metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
|
||||
}
|
||||
|
||||
type TaintEffect string
|
||||
|
@ -2019,26 +2186,23 @@ const (
|
|||
// onto the node entirely. Enforced by the scheduler.
|
||||
TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
|
||||
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
|
||||
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
|
||||
// do not allow pods to start on Kubelet unless they tolerate the taint,
|
||||
// but allow all already-running pods to continue running.
|
||||
// Enforced by the scheduler and Kubelet.
|
||||
// Like TaintEffectNoSchedule, but additionally do not allow pods submitted to
|
||||
// Kubelet without going through the scheduler to start.
|
||||
// Enforced by Kubelet and the scheduler.
|
||||
// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
|
||||
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
|
||||
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
|
||||
// do not allow pods to start on Kubelet unless they tolerate the taint,
|
||||
// and evict any already-running pods that do not tolerate the taint.
|
||||
// Enforced by the scheduler and Kubelet.
|
||||
// TaintEffectNoScheduleNoAdmitNoExecute = "NoScheduleNoAdmitNoExecute"
|
||||
// Evict any already-running pods that do not tolerate the taint.
|
||||
// Currently enforced by NodeController.
|
||||
TaintEffectNoExecute TaintEffect = "NoExecute"
|
||||
)
|
||||
|
||||
// The pod this Toleration is attached to tolerates any taint that matches
|
||||
// the triple <key,value,effect> using the matching operator <operator>.
|
||||
type Toleration struct {
|
||||
// Required. Key is the taint key that the toleration applies to.
|
||||
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
|
||||
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
|
||||
// +optional
|
||||
Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
|
||||
// operator represents a key's relationship to the value.
|
||||
// Operator represents a key's relationship to the value.
|
||||
// Valid operators are Exists and Equal. Defaults to Equal.
|
||||
// Exists is equivalent to wildcard for value, so that a pod can
|
||||
// tolerate all taints of a particular category.
|
||||
|
@ -2049,11 +2213,15 @@ type Toleration struct {
|
|||
// +optional
|
||||
Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
|
||||
// Effect indicates the taint effect to match. Empty means match all taint effects.
|
||||
// When specified, allowed values are NoSchedule and PreferNoSchedule.
|
||||
// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
|
||||
// +optional
|
||||
Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"`
|
||||
// TODO: For forgiveness (#1574), we'd eventually add at least a grace period
|
||||
// here, and possibly an occurrence threshold and period.
|
||||
// TolerationSeconds represents the period of time the toleration (which must be
|
||||
// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
|
||||
// it is not set, which means tolerate the taint forever (do not evict). Zero and
|
||||
// negative values will be treated as 0 (evict immediately) by the system.
|
||||
// +optional
|
||||
TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"`
|
||||
}
|
||||
|
||||
// A toleration operator is the set of operators that can be used in a toleration.
|
||||
|
@ -2102,10 +2270,9 @@ type PodSpec struct {
|
|||
// of that value or the sum of the normal containers. Limits are applied to init containers
|
||||
// in a similar fashion.
|
||||
// Init containers cannot currently be added or removed.
|
||||
// Init containers are in alpha state and may change without notice.
|
||||
// Cannot be updated.
|
||||
// More info: http://kubernetes.io/docs/user-guide/containers
|
||||
InitContainers []Container `json:"-" patchStrategy:"merge" patchMergeKey:"name"`
|
||||
InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
|
||||
// List of containers belonging to the pod.
|
||||
// Containers cannot currently be added or removed.
|
||||
// There must be at least one container in a Pod.
|
||||
|
@ -2133,8 +2300,9 @@ type PodSpec struct {
|
|||
// +optional
|
||||
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
|
||||
// Set DNS policy for containers within the pod.
|
||||
// One of 'ClusterFirst' or 'Default'.
|
||||
// One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'.
|
||||
// Defaults to "ClusterFirst".
|
||||
// To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.
|
||||
// +optional
|
||||
DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
|
||||
// NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
|
@ -2152,6 +2320,9 @@ type PodSpec struct {
|
|||
// +k8s:conversion-gen=false
|
||||
// +optional
|
||||
DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"`
|
||||
// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
|
||||
// +optional
|
||||
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
|
||||
|
||||
// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
|
||||
// the scheduler simply schedules this pod onto that node, assuming that it fits resource
|
||||
|
@ -2198,7 +2369,10 @@ type PodSpec struct {
|
|||
// If specified, the pod will be dispatched by specified scheduler.
|
||||
// If not specified, the pod will be dispatched by default scheduler.
|
||||
// +optional
|
||||
SchedulerName string `json:"schedulername,omitempty" protobuf:"bytes,19,opt,name=schedulername"`
|
||||
SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"`
|
||||
// If specified, the pod's tolerations.
|
||||
// +optional
|
||||
Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"`
|
||||
}
|
||||
|
||||
// PodSecurityContext holds pod-level security attributes and common container settings.
|
||||
|
@ -2292,9 +2466,9 @@ type PodStatus struct {
|
|||
// The list has one entry per init container in the manifest. The most recent successful
|
||||
// init container will have ready = true, the most recently started container will have
|
||||
// startTime set.
|
||||
// Init containers are in alpha state and may change without notice.
|
||||
// More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses
|
||||
InitContainerStatuses []ContainerStatus `json:"-"`
|
||||
InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"`
|
||||
|
||||
// The list has one entry per container in the manifest. Each entry is currently the output
|
||||
// of `docker inspect`.
|
||||
// More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses
|
||||
|
@ -2793,6 +2967,11 @@ type ServiceAccount struct {
|
|||
// More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret
|
||||
// +optional
|
||||
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"`
|
||||
|
||||
// AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
|
||||
// Can be overridden at the pod level.
|
||||
// +optional
|
||||
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"`
|
||||
}
|
||||
|
||||
// ServiceAccountList is a list of ServiceAccount objects
|
||||
|
@ -2930,6 +3109,9 @@ type NodeSpec struct {
|
|||
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration
|
||||
// +optional
|
||||
Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
|
||||
// If specified, the node's taints.
|
||||
// +optional
|
||||
Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
|
||||
}
|
||||
|
||||
// DaemonEndpoint contains information about a single Daemon endpoint.
|
||||
|
@ -3137,6 +3319,8 @@ const (
|
|||
NodeHostName NodeAddressType = "Hostname"
|
||||
NodeExternalIP NodeAddressType = "ExternalIP"
|
||||
NodeInternalIP NodeAddressType = "InternalIP"
|
||||
NodeExternalDNS NodeAddressType = "ExternalDNS"
|
||||
NodeInternalDNS NodeAddressType = "InternalDNS"
|
||||
)
|
||||
|
||||
// NodeAddress contains information for the node's address.
|
||||
|
@ -3215,10 +3399,10 @@ type NodeList struct {
|
|||
// FinalizerName is the name identifying a finalizer during namespace lifecycle.
|
||||
type FinalizerName string
|
||||
|
||||
// These are internal finalizer values to Kubernetes, must be qualified name unless defined here
|
||||
// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or
|
||||
// in metav1.
|
||||
const (
|
||||
FinalizerKubernetes FinalizerName = "kubernetes"
|
||||
FinalizerOrphan string = "orphan"
|
||||
)
|
||||
|
||||
// NamespaceSpec describes the attributes on a Namespace.
|
||||
|
@ -3304,6 +3488,20 @@ type Preconditions struct {
|
|||
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
|
||||
}
|
||||
|
||||
// DeletionPropagation decides if a deletion will propagate to the dependents of the object, and how the garbage collector will handle the propagation.
|
||||
type DeletionPropagation string
|
||||
|
||||
const (
|
||||
// Orphans the dependents.
|
||||
DeletePropagationOrphan DeletionPropagation = "Orphan"
|
||||
// Deletes the object from the key-value store, the garbage collector will delete the dependents in the background.
|
||||
DeletePropagationBackground DeletionPropagation = "Background"
|
||||
// The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store.
|
||||
// API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp.
|
||||
// This policy is cascading, i.e., the dependents will be deleted with Foreground.
|
||||
DeletePropagationForeground DeletionPropagation = "Foreground"
|
||||
)
|
||||
|
||||
// DeleteOptions may be provided when deleting an API object
|
||||
// DEPRECATED: This type has been moved to meta/v1 and will be removed soon.
|
||||
// +k8s:openapi-gen=false
|
||||
|
@ -3322,10 +3520,19 @@ type DeleteOptions struct {
|
|||
// +optional
|
||||
Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"`
|
||||
|
||||
// Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
|
||||
// Should the dependent objects be orphaned. If true/false, the "orphan"
|
||||
// finalizer will be added to/removed from the object's finalizers list.
|
||||
// Either this field or PropagationPolicy may be set, but not both.
|
||||
// +optional
|
||||
OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"`
|
||||
|
||||
// Whether and how garbage collection will be performed.
|
||||
// Either this field or OrphanDependents may be set, but not both.
|
||||
// The default policy is decided by the existing finalizer set in the
|
||||
// metadata.finalizers and the resource-specific default policy.
|
||||
// +optional
|
||||
PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"`
|
||||
}
|
||||
|
||||
// ListOptions is the query options to a standard REST list call.
|
||||
|
@ -4079,6 +4286,15 @@ type DownwardAPIVolumeFile struct {
|
|||
Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"`
|
||||
}
|
||||
|
||||
// Represents downward API info for projecting into a projected volume.
|
||||
// Note that this is identical to a downwardAPI volume source without the default
|
||||
// mode.
|
||||
type DownwardAPIProjection struct {
|
||||
// Items is a list of DownwardAPIVolume file
|
||||
// +optional
|
||||
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
|
||||
}
|
||||
|
||||
// SecurityContext holds security configuration that will be applied to a container.
|
||||
// Some fields are present in both SecurityContext and PodSecurityContext. When both
|
||||
// are set, the values in SecurityContext take precedence.
|
||||
|
|
162
vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go
generated
vendored
162
vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go
generated
vendored
|
@ -209,6 +209,16 @@ func (ConfigMapList) SwaggerDoc() map[string]string {
|
|||
return map_ConfigMapList
|
||||
}
|
||||
|
||||
var map_ConfigMapProjection = map[string]string{
|
||||
"": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.",
|
||||
"items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
|
||||
"optional": "Specify whether the ConfigMap or it's keys must be defined",
|
||||
}
|
||||
|
||||
func (ConfigMapProjection) SwaggerDoc() map[string]string {
|
||||
return map_ConfigMapProjection
|
||||
}
|
||||
|
||||
var map_ConfigMapVolumeSource = map[string]string{
|
||||
"": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.",
|
||||
"items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
|
||||
|
@ -228,7 +238,7 @@ var map_Container = map[string]string{
|
|||
"args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands",
|
||||
"workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
|
||||
"ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.",
|
||||
"envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. An invalid key will prevent the container from starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
|
||||
"envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
|
||||
"env": "List of environment variables to set in the container. Cannot be updated.",
|
||||
"resources": "Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources",
|
||||
"volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.",
|
||||
|
@ -345,13 +355,23 @@ var map_DeleteOptions = map[string]string{
|
|||
"": "DeleteOptions may be provided when deleting an API object DEPRECATED: This type has been moved to meta/v1 and will be removed soon.",
|
||||
"gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
|
||||
"preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.",
|
||||
"orphanDependents": "Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list.",
|
||||
"orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
|
||||
"PropagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.",
|
||||
}
|
||||
|
||||
func (DeleteOptions) SwaggerDoc() map[string]string {
|
||||
return map_DeleteOptions
|
||||
}
|
||||
|
||||
var map_DownwardAPIProjection = map[string]string{
|
||||
"": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.",
|
||||
"items": "Items is a list of DownwardAPIVolume file",
|
||||
}
|
||||
|
||||
func (DownwardAPIProjection) SwaggerDoc() map[string]string {
|
||||
return map_DownwardAPIProjection
|
||||
}
|
||||
|
||||
var map_DownwardAPIVolumeFile = map[string]string{
|
||||
"": "DownwardAPIVolumeFile represents information to create the file containing the pod field",
|
||||
"path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'",
|
||||
|
@ -637,6 +657,7 @@ var map_ISCSIVolumeSource = map[string]string{
|
|||
"iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.",
|
||||
"fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#iscsi",
|
||||
"readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.",
|
||||
"portals": "iSCSI target portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
|
||||
}
|
||||
|
||||
func (ISCSIVolumeSource) SwaggerDoc() map[string]string {
|
||||
|
@ -916,6 +937,7 @@ var map_NodeSpec = map[string]string{
|
|||
"externalID": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated.",
|
||||
"providerID": "ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>",
|
||||
"unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration",
|
||||
"taints": "If specified, the node's taints.",
|
||||
}
|
||||
|
||||
func (NodeSpec) SwaggerDoc() map[string]string {
|
||||
|
@ -1039,11 +1061,12 @@ func (PersistentVolumeClaimList) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_PersistentVolumeClaimSpec = map[string]string{
|
||||
"": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes",
|
||||
"accessModes": "AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1",
|
||||
"selector": "A label query over volumes to consider for binding.",
|
||||
"resources": "Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources",
|
||||
"volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.",
|
||||
"": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes",
|
||||
"accessModes": "AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1",
|
||||
"selector": "A label query over volumes to consider for binding.",
|
||||
"resources": "Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources",
|
||||
"volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.",
|
||||
"storageClassName": "Name of the StorageClass required by the claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1",
|
||||
}
|
||||
|
||||
func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
|
||||
|
@ -1100,6 +1123,8 @@ var map_PersistentVolumeSource = map[string]string{
|
|||
"quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
|
||||
"azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
|
||||
"photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
|
||||
"portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
|
||||
"scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
|
||||
}
|
||||
|
||||
func (PersistentVolumeSource) SwaggerDoc() map[string]string {
|
||||
|
@ -1112,6 +1137,7 @@ var map_PersistentVolumeSpec = map[string]string{
|
|||
"accessModes": "AccessModes contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes",
|
||||
"claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding",
|
||||
"persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy",
|
||||
"storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.",
|
||||
}
|
||||
|
||||
func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
|
||||
|
@ -1163,7 +1189,7 @@ func (PodAffinity) SwaggerDoc() map[string]string {
|
|||
var map_PodAffinityTerm = map[string]string{
|
||||
"": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> tches that of any node on which a pod of the set of pods is running",
|
||||
"labelSelector": "A label query over a set of resources, in this case pods.",
|
||||
"namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); nil list means \"this pod's namespace,\" empty list means \"all namespaces\" The json tag here is not \"omitempty\" since we need to distinguish nil and empty. See https://golang.org/pkg/encoding/json/#Marshal for more details.",
|
||||
"namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"",
|
||||
"topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.",
|
||||
}
|
||||
|
||||
|
@ -1291,14 +1317,16 @@ func (PodSignature) SwaggerDoc() map[string]string {
|
|||
var map_PodSpec = map[string]string{
|
||||
"": "PodSpec is a description of a pod.",
|
||||
"volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: http://kubernetes.io/docs/user-guide/volumes",
|
||||
"initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers",
|
||||
"containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers",
|
||||
"restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy",
|
||||
"terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.",
|
||||
"activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.",
|
||||
"dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\".",
|
||||
"dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.",
|
||||
"nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: http://kubernetes.io/docs/user-guide/node-selection/README",
|
||||
"serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md",
|
||||
"serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
|
||||
"automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
|
||||
"nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.",
|
||||
"hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
|
||||
"hostPID": "Use the host's pid namespace. Optional: Default to false.",
|
||||
|
@ -1308,7 +1336,8 @@ var map_PodSpec = map[string]string{
|
|||
"hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.",
|
||||
"subdomain": "If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all.",
|
||||
"affinity": "If specified, the pod's scheduling constraints",
|
||||
"schedulername": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.",
|
||||
"schedulerName": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.",
|
||||
"tolerations": "If specified, the pod's tolerations.",
|
||||
}
|
||||
|
||||
func (PodSpec) SwaggerDoc() map[string]string {
|
||||
|
@ -1316,16 +1345,17 @@ func (PodSpec) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_PodStatus = map[string]string{
|
||||
"": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.",
|
||||
"phase": "Current condition of the pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase",
|
||||
"conditions": "Current service state of pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions",
|
||||
"message": "A human readable message indicating details about why the pod is in this condition.",
|
||||
"reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'",
|
||||
"hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.",
|
||||
"podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
|
||||
"startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
|
||||
"containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses",
|
||||
"qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md",
|
||||
"": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.",
|
||||
"phase": "Current condition of the pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase",
|
||||
"conditions": "Current service state of pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions",
|
||||
"message": "A human readable message indicating details about why the pod is in this condition.",
|
||||
"reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'",
|
||||
"hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.",
|
||||
"podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
|
||||
"startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
|
||||
"initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses",
|
||||
"containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses",
|
||||
"qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md",
|
||||
}
|
||||
|
||||
func (PodStatus) SwaggerDoc() map[string]string {
|
||||
|
@ -1372,6 +1402,17 @@ func (PodTemplateSpec) SwaggerDoc() map[string]string {
|
|||
return map_PodTemplateSpec
|
||||
}
|
||||
|
||||
var map_PortworxVolumeSource = map[string]string{
|
||||
"": "PortworxVolumeSource represents a Portworx volume resource.",
|
||||
"volumeID": "VolumeID uniquely identifies a Portworx volume",
|
||||
"fsType": "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.",
|
||||
"readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
|
||||
}
|
||||
|
||||
func (PortworxVolumeSource) SwaggerDoc() map[string]string {
|
||||
return map_PortworxVolumeSource
|
||||
}
|
||||
|
||||
var map_Preconditions = map[string]string{
|
||||
"": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
|
||||
"uid": "Specifies the target UID.",
|
||||
|
@ -1416,6 +1457,16 @@ func (Probe) SwaggerDoc() map[string]string {
|
|||
return map_Probe
|
||||
}
|
||||
|
||||
var map_ProjectedVolumeSource = map[string]string{
|
||||
"": "Represents a projected volume source",
|
||||
"sources": "list of volume projections",
|
||||
"defaultMode": "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
|
||||
}
|
||||
|
||||
func (ProjectedVolumeSource) SwaggerDoc() map[string]string {
|
||||
return map_ProjectedVolumeSource
|
||||
}
|
||||
|
||||
var map_QuobyteVolumeSource = map[string]string{
|
||||
"": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.",
|
||||
"registry": "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes",
|
||||
|
@ -1590,6 +1641,24 @@ func (SELinuxOptions) SwaggerDoc() map[string]string {
|
|||
return map_SELinuxOptions
|
||||
}
|
||||
|
||||
var map_ScaleIOVolumeSource = map[string]string{
|
||||
"": "ScaleIOVolumeSource represents a persistent ScaleIO volume",
|
||||
"gateway": "The host address of the ScaleIO API Gateway.",
|
||||
"system": "The name of the storage system as configured in ScaleIO.",
|
||||
"secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.",
|
||||
"sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false",
|
||||
"protectionDomain": "The name of the Protection Domain for the configured storage (defaults to \"default\").",
|
||||
"storagePool": "The Storage Pool associated with the protection domain (defaults to \"default\").",
|
||||
"storageMode": "Indicates whether the storage for a volume should be thick or thin (defaults to \"thin\").",
|
||||
"volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.",
|
||||
"fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
|
||||
"readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
|
||||
}
|
||||
|
||||
func (ScaleIOVolumeSource) SwaggerDoc() map[string]string {
|
||||
return map_ScaleIOVolumeSource
|
||||
}
|
||||
|
||||
var map_Secret = map[string]string{
|
||||
"": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.",
|
||||
"metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
|
||||
|
@ -1631,6 +1700,16 @@ func (SecretList) SwaggerDoc() map[string]string {
|
|||
return map_SecretList
|
||||
}
|
||||
|
||||
var map_SecretProjection = map[string]string{
|
||||
"": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.",
|
||||
"items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
|
||||
"optional": "Specify whether the Secret or its key must be defined",
|
||||
}
|
||||
|
||||
func (SecretProjection) SwaggerDoc() map[string]string {
|
||||
return map_SecretProjection
|
||||
}
|
||||
|
||||
var map_SecretVolumeSource = map[string]string{
|
||||
"": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.",
|
||||
"secretName": "Name of the secret in the pod's namespace to use. More info: http://kubernetes.io/docs/user-guide/volumes#secrets",
|
||||
|
@ -1678,10 +1757,11 @@ func (Service) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ServiceAccount = map[string]string{
|
||||
"": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets",
|
||||
"metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
|
||||
"secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://kubernetes.io/docs/user-guide/secrets",
|
||||
"imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret",
|
||||
"": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets",
|
||||
"metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
|
||||
"secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://kubernetes.io/docs/user-guide/secrets",
|
||||
"imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret",
|
||||
"automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.",
|
||||
}
|
||||
|
||||
func (ServiceAccount) SwaggerDoc() map[string]string {
|
||||
|
@ -1767,10 +1847,11 @@ func (TCPSocketAction) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_Taint = map[string]string{
|
||||
"": "The node this Taint is attached to has the effect \"effect\" on any pod that that does not tolerate the Taint.",
|
||||
"key": "Required. The taint key to be applied to a node.",
|
||||
"value": "Required. The taint value corresponding to the taint key.",
|
||||
"effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule and PreferNoSchedule.",
|
||||
"": "The node this Taint is attached to has the effect \"effect\" on any pod that that does not tolerate the Taint.",
|
||||
"key": "Required. The taint key to be applied to a node.",
|
||||
"value": "Required. The taint value corresponding to the taint key.",
|
||||
"effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.",
|
||||
"timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.",
|
||||
}
|
||||
|
||||
func (Taint) SwaggerDoc() map[string]string {
|
||||
|
@ -1778,11 +1859,12 @@ func (Taint) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_Toleration = map[string]string{
|
||||
"": "The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.",
|
||||
"key": "Required. Key is the taint key that the toleration applies to.",
|
||||
"operator": "operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.",
|
||||
"value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.",
|
||||
"effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule and PreferNoSchedule.",
|
||||
"": "The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.",
|
||||
"key": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.",
|
||||
"operator": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.",
|
||||
"value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.",
|
||||
"effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.",
|
||||
"tolerationSeconds": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.",
|
||||
}
|
||||
|
||||
func (Toleration) SwaggerDoc() map[string]string {
|
||||
|
@ -1810,6 +1892,17 @@ func (VolumeMount) SwaggerDoc() map[string]string {
|
|||
return map_VolumeMount
|
||||
}
|
||||
|
||||
var map_VolumeProjection = map[string]string{
|
||||
"": "Projection that may be projected along with other supported volume types",
|
||||
"secret": "information about the secret data to project",
|
||||
"downwardAPI": "information about the downwardAPI data to project",
|
||||
"configMap": "information about the configMap data to project",
|
||||
}
|
||||
|
||||
func (VolumeProjection) SwaggerDoc() map[string]string {
|
||||
return map_VolumeProjection
|
||||
}
|
||||
|
||||
var map_VolumeSource = map[string]string{
|
||||
"": "Represents the source of a volume to mount. Only one of its members may be specified.",
|
||||
"hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath",
|
||||
|
@ -1835,6 +1928,9 @@ var map_VolumeSource = map[string]string{
|
|||
"quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
|
||||
"azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
|
||||
"photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
|
||||
"projected": "Items for all in one resources secrets, configmaps, and downward API",
|
||||
"portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
|
||||
"scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
|
||||
}
|
||||
|
||||
func (VolumeSource) SwaggerDoc() map[string]string {
|
||||
|
|
35
vendor/k8s.io/kubernetes/pkg/api/v1/validation/BUILD
generated
vendored
35
vendor/k8s.io/kubernetes/pkg/api/v1/validation/BUILD
generated
vendored
|
@ -1,35 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["validation.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/validation",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/validation/field",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
159
vendor/k8s.io/kubernetes/pkg/api/v1/validation/validation.go
generated
vendored
159
vendor/k8s.io/kubernetes/pkg/api/v1/validation/validation.go
generated
vendored
|
@ -1,159 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
const isNegativeErrorMsg string = `must be greater than or equal to 0`
|
||||
const isNotIntegerErrorMsg string = `must be an integer`
|
||||
|
||||
func ValidateResourceRequirements(requirements *v1.ResourceRequirements, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
limPath := fldPath.Child("limits")
|
||||
reqPath := fldPath.Child("requests")
|
||||
for resourceName, quantity := range requirements.Limits {
|
||||
fldPath := limPath.Key(string(resourceName))
|
||||
// Validate resource name.
|
||||
allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...)
|
||||
|
||||
// Validate resource quantity.
|
||||
allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...)
|
||||
|
||||
// Check that request <= limit.
|
||||
requestQuantity, exists := requirements.Requests[resourceName]
|
||||
if exists {
|
||||
// For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal.
|
||||
if resourceName == v1.ResourceNvidiaGPU && quantity.Cmp(requestQuantity) != 0 {
|
||||
allErrs = append(allErrs, field.Invalid(reqPath, requestQuantity.String(), fmt.Sprintf("must be equal to %s limit", v1.ResourceNvidiaGPU)))
|
||||
} else if quantity.Cmp(requestQuantity) < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(limPath, quantity.String(), fmt.Sprintf("must be greater than or equal to %s request", resourceName)))
|
||||
}
|
||||
}
|
||||
}
|
||||
for resourceName, quantity := range requirements.Requests {
|
||||
fldPath := reqPath.Key(string(resourceName))
|
||||
// Validate resource name.
|
||||
allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...)
|
||||
// Validate resource quantity.
|
||||
allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := validateResourceName(value, fldPath)
|
||||
if len(strings.Split(value, "/")) == 1 {
|
||||
if !api.IsStandardContainerResourceName(value) {
|
||||
return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers"))
|
||||
}
|
||||
}
|
||||
return field.ErrorList{}
|
||||
}
|
||||
|
||||
// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource
|
||||
func ValidateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...)
|
||||
if api.IsIntegerResourceName(resource) {
|
||||
if value.MilliValue()%int64(1000) != int64(0) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// Validates that a Quantity is not negative
|
||||
func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if value.Cmp(resource.Quantity{}) < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// Validate compute resource typename.
|
||||
// Refer to docs/design/resources.md for more details.
|
||||
func validateResourceName(value string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for _, msg := range validation.IsQualifiedName(value) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
|
||||
}
|
||||
if len(allErrs) != 0 {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
if len(strings.Split(value, "/")) == 1 {
|
||||
if !api.IsStandardResourceName(value) {
|
||||
return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified"))
|
||||
}
|
||||
}
|
||||
|
||||
return field.ErrorList{}
|
||||
}
|
||||
|
||||
func ValidatePodLogOptions(opts *v1.PodLogOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if opts.TailLines != nil && *opts.TailLines < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg))
|
||||
}
|
||||
if opts.LimitBytes != nil && *opts.LimitBytes < 1 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0"))
|
||||
}
|
||||
switch {
|
||||
case opts.SinceSeconds != nil && opts.SinceTime != nil:
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified"))
|
||||
case opts.SinceSeconds != nil:
|
||||
if *opts.SinceSeconds < 1 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0"))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func AccumulateUniqueHostPorts(containers []v1.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
for ci, ctr := range containers {
|
||||
idxPath := fldPath.Index(ci)
|
||||
portsPath := idxPath.Child("ports")
|
||||
for pi := range ctr.Ports {
|
||||
idxPath := portsPath.Index(pi)
|
||||
port := ctr.Ports[pi].HostPort
|
||||
if port == 0 {
|
||||
continue
|
||||
}
|
||||
str := fmt.Sprintf("%d/%s", port, ctr.Ports[pi].Protocol)
|
||||
if accumulator.Has(str) {
|
||||
allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str))
|
||||
} else {
|
||||
accumulator.Insert(str)
|
||||
}
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
370
vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go
generated
vendored
370
vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go
generated
vendored
|
@ -71,6 +71,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector,
|
||||
Convert_v1_ConfigMapList_To_api_ConfigMapList,
|
||||
Convert_api_ConfigMapList_To_v1_ConfigMapList,
|
||||
Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection,
|
||||
Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection,
|
||||
Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource,
|
||||
Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource,
|
||||
Convert_v1_Container_To_api_Container,
|
||||
|
@ -93,6 +95,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint,
|
||||
Convert_v1_DeleteOptions_To_api_DeleteOptions,
|
||||
Convert_api_DeleteOptions_To_v1_DeleteOptions,
|
||||
Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection,
|
||||
Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection,
|
||||
Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile,
|
||||
Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile,
|
||||
Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource,
|
||||
|
@ -271,6 +275,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_api_PodTemplateList_To_v1_PodTemplateList,
|
||||
Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec,
|
||||
Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec,
|
||||
Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource,
|
||||
Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource,
|
||||
Convert_v1_Preconditions_To_api_Preconditions,
|
||||
Convert_api_Preconditions_To_v1_Preconditions,
|
||||
Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry,
|
||||
|
@ -279,6 +285,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm,
|
||||
Convert_v1_Probe_To_api_Probe,
|
||||
Convert_api_Probe_To_v1_Probe,
|
||||
Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource,
|
||||
Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource,
|
||||
Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource,
|
||||
Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource,
|
||||
Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource,
|
||||
|
@ -309,6 +317,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_api_ResourceRequirements_To_v1_ResourceRequirements,
|
||||
Convert_v1_SELinuxOptions_To_api_SELinuxOptions,
|
||||
Convert_api_SELinuxOptions_To_v1_SELinuxOptions,
|
||||
Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource,
|
||||
Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource,
|
||||
Convert_v1_Secret_To_api_Secret,
|
||||
Convert_api_Secret_To_v1_Secret,
|
||||
Convert_v1_SecretEnvSource_To_api_SecretEnvSource,
|
||||
|
@ -317,6 +327,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_api_SecretKeySelector_To_v1_SecretKeySelector,
|
||||
Convert_v1_SecretList_To_api_SecretList,
|
||||
Convert_api_SecretList_To_v1_SecretList,
|
||||
Convert_v1_SecretProjection_To_api_SecretProjection,
|
||||
Convert_api_SecretProjection_To_v1_SecretProjection,
|
||||
Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource,
|
||||
Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource,
|
||||
Convert_v1_SecurityContext_To_api_SecurityContext,
|
||||
|
@ -351,6 +363,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_api_Volume_To_v1_Volume,
|
||||
Convert_v1_VolumeMount_To_api_VolumeMount,
|
||||
Convert_api_VolumeMount_To_v1_VolumeMount,
|
||||
Convert_v1_VolumeProjection_To_api_VolumeProjection,
|
||||
Convert_api_VolumeProjection_To_v1_VolumeProjection,
|
||||
Convert_v1_VolumeSource_To_api_VolumeSource,
|
||||
Convert_api_VolumeSource_To_v1_VolumeSource,
|
||||
Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource,
|
||||
|
@ -551,7 +565,11 @@ func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSou
|
|||
}
|
||||
|
||||
func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error {
|
||||
out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors))
|
||||
if in.Monitors == nil {
|
||||
out.Monitors = make([]string, 0)
|
||||
} else {
|
||||
out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors))
|
||||
}
|
||||
out.Path = in.Path
|
||||
out.User = in.User
|
||||
out.SecretFile = in.SecretFile
|
||||
|
@ -642,7 +660,11 @@ func Convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStat
|
|||
|
||||
func autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]ComponentStatus)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]ComponentStatus, 0)
|
||||
} else {
|
||||
out.Items = *(*[]ComponentStatus)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -732,7 +754,11 @@ func Convert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.C
|
|||
|
||||
func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]ConfigMap)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]ConfigMap, 0)
|
||||
} else {
|
||||
out.Items = *(*[]ConfigMap)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -740,6 +766,32 @@ func Convert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *C
|
|||
return autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error {
|
||||
if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items))
|
||||
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error {
|
||||
return autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *ConfigMapProjection, s conversion.Scope) error {
|
||||
if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items))
|
||||
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *ConfigMapProjection, s conversion.Scope) error {
|
||||
return autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error {
|
||||
if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
|
||||
return err
|
||||
|
@ -839,7 +891,11 @@ func Convert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *ap
|
|||
}
|
||||
|
||||
func autoConvert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error {
|
||||
out.Names = *(*[]string)(unsafe.Pointer(&in.Names))
|
||||
if in.Names == nil {
|
||||
out.Names = make([]string, 0)
|
||||
} else {
|
||||
out.Names = *(*[]string)(unsafe.Pointer(&in.Names))
|
||||
}
|
||||
out.SizeBytes = in.SizeBytes
|
||||
return nil
|
||||
}
|
||||
|
@ -1026,6 +1082,7 @@ func autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *a
|
|||
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
|
||||
out.Preconditions = (*api.Preconditions)(unsafe.Pointer(in.Preconditions))
|
||||
out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents))
|
||||
out.PropagationPolicy = (*api.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1037,6 +1094,7 @@ func autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, ou
|
|||
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
|
||||
out.Preconditions = (*Preconditions)(unsafe.Pointer(in.Preconditions))
|
||||
out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents))
|
||||
out.PropagationPolicy = (*DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1044,6 +1102,24 @@ func Convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *D
|
|||
return autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error {
|
||||
out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error {
|
||||
return autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *DownwardAPIProjection, s conversion.Scope) error {
|
||||
out.Items = *(*[]DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *DownwardAPIProjection, s conversion.Scope) error {
|
||||
return autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error {
|
||||
out.Path = in.Path
|
||||
out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef))
|
||||
|
@ -1186,7 +1262,11 @@ func Convert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s
|
|||
|
||||
func autoConvert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Subsets = *(*[]EndpointSubset)(unsafe.Pointer(&in.Subsets))
|
||||
if in.Subsets == nil {
|
||||
out.Subsets = make([]EndpointSubset, 0)
|
||||
} else {
|
||||
out.Subsets = *(*[]EndpointSubset)(unsafe.Pointer(&in.Subsets))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1206,7 +1286,11 @@ func Convert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.E
|
|||
|
||||
func autoConvert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]Endpoints)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]Endpoints, 0)
|
||||
} else {
|
||||
out.Items = *(*[]Endpoints)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1336,7 +1420,11 @@ func Convert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s
|
|||
|
||||
func autoConvert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]Event)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]Event, 0)
|
||||
} else {
|
||||
out.Items = *(*[]Event)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1395,7 +1483,11 @@ func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *ap
|
|||
}
|
||||
|
||||
func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error {
|
||||
out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs))
|
||||
if in.TargetWWNs == nil {
|
||||
out.TargetWWNs = make([]string, 0)
|
||||
} else {
|
||||
out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs))
|
||||
}
|
||||
out.Lun = (*int32)(unsafe.Pointer(in.Lun))
|
||||
out.FSType = in.FSType
|
||||
out.ReadOnly = in.ReadOnly
|
||||
|
@ -1613,6 +1705,7 @@ func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSo
|
|||
out.ISCSIInterface = in.ISCSIInterface
|
||||
out.FSType = in.FSType
|
||||
out.ReadOnly = in.ReadOnly
|
||||
out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1627,6 +1720,7 @@ func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolu
|
|||
out.ISCSIInterface = in.ISCSIInterface
|
||||
out.FSType = in.FSType
|
||||
out.ReadOnly = in.ReadOnly
|
||||
out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1740,7 +1834,11 @@ func Convert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *ap
|
|||
|
||||
func autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]LimitRange)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]LimitRange, 0)
|
||||
} else {
|
||||
out.Items = *(*[]LimitRange)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1758,7 +1856,11 @@ func Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *ap
|
|||
}
|
||||
|
||||
func autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error {
|
||||
out.Limits = *(*[]LimitRangeItem)(unsafe.Pointer(&in.Limits))
|
||||
if in.Limits == nil {
|
||||
out.Limits = make([]LimitRangeItem, 0)
|
||||
} else {
|
||||
out.Limits = *(*[]LimitRangeItem)(unsafe.Pointer(&in.Limits))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1797,7 +1899,7 @@ func autoConvert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope
|
|||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
out.Items = make([]runtime.RawExtension, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1807,10 +1909,10 @@ func Convert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) er
|
|||
}
|
||||
|
||||
func autoConvert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error {
|
||||
if err := api.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil {
|
||||
if err := meta_v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := api.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil {
|
||||
if err := meta_v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Watch = in.Watch
|
||||
|
@ -1824,10 +1926,10 @@ func Convert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOpt
|
|||
}
|
||||
|
||||
func autoConvert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error {
|
||||
if err := api.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil {
|
||||
if err := meta_v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := api.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil {
|
||||
if err := meta_v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Watch = in.Watch
|
||||
|
@ -1960,7 +2062,11 @@ func Convert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.N
|
|||
|
||||
func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]Namespace)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]Namespace, 0)
|
||||
} else {
|
||||
out.Items = *(*[]Namespace)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2136,7 +2242,11 @@ func Convert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conv
|
|||
|
||||
func autoConvert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]Node)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]Node, 0)
|
||||
} else {
|
||||
out.Items = *(*[]Node)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2190,7 +2300,11 @@ func Convert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.Node
|
|||
}
|
||||
|
||||
func autoConvert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error {
|
||||
out.NodeSelectorTerms = *(*[]NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms))
|
||||
if in.NodeSelectorTerms == nil {
|
||||
out.NodeSelectorTerms = make([]NodeSelectorTerm, 0)
|
||||
} else {
|
||||
out.NodeSelectorTerms = *(*[]NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2230,7 +2344,11 @@ func Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, o
|
|||
}
|
||||
|
||||
func autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error {
|
||||
out.MatchExpressions = *(*[]NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions))
|
||||
if in.MatchExpressions == nil {
|
||||
out.MatchExpressions = make([]NodeSelectorRequirement, 0)
|
||||
} else {
|
||||
out.MatchExpressions = *(*[]NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2243,6 +2361,7 @@ func autoConvert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s
|
|||
out.ExternalID = in.ExternalID
|
||||
out.ProviderID = in.ProviderID
|
||||
out.Unschedulable = in.Unschedulable
|
||||
out.Taints = *(*[]api.Taint)(unsafe.Pointer(&in.Taints))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2255,6 +2374,7 @@ func autoConvert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s
|
|||
out.ExternalID = in.ExternalID
|
||||
out.ProviderID = in.ProviderID
|
||||
out.Unschedulable = in.Unschedulable
|
||||
out.Taints = *(*[]Taint)(unsafe.Pointer(&in.Taints))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2510,7 +2630,11 @@ func Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *P
|
|||
|
||||
func autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]PersistentVolumeClaim)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]PersistentVolumeClaim, 0)
|
||||
} else {
|
||||
out.Items = *(*[]PersistentVolumeClaim)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2525,6 +2649,7 @@ func autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(i
|
|||
return err
|
||||
}
|
||||
out.VolumeName = in.VolumeName
|
||||
out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2539,6 +2664,7 @@ func autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(i
|
|||
return err
|
||||
}
|
||||
out.VolumeName = in.VolumeName
|
||||
out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2619,7 +2745,7 @@ func autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.Per
|
|||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
out.Items = make([]PersistentVolume, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -2646,6 +2772,8 @@ func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *Per
|
|||
out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte))
|
||||
out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
|
||||
out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
|
||||
out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume))
|
||||
out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2671,6 +2799,8 @@ func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api
|
|||
out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume))
|
||||
out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
|
||||
out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
|
||||
out.PortworxVolume = (*PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume))
|
||||
out.ScaleIO = (*ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2686,6 +2816,7 @@ func autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *Persist
|
|||
out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes))
|
||||
out.ClaimRef = (*api.ObjectReference)(unsafe.Pointer(in.ClaimRef))
|
||||
out.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy)
|
||||
out.StorageClassName = in.StorageClassName
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2701,6 +2832,7 @@ func autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.Per
|
|||
out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes))
|
||||
out.ClaimRef = (*ObjectReference)(unsafe.Pointer(in.ClaimRef))
|
||||
out.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy)
|
||||
out.StorageClassName = in.StorageClassName
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2908,7 +3040,11 @@ func autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions,
|
|||
out.Stderr = in.Stderr
|
||||
out.TTY = in.TTY
|
||||
out.Container = in.Container
|
||||
out.Command = *(*[]string)(unsafe.Pointer(&in.Command))
|
||||
if in.Command == nil {
|
||||
out.Command = make([]string, 0)
|
||||
} else {
|
||||
out.Command = *(*[]string)(unsafe.Pointer(&in.Command))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2947,7 +3083,7 @@ func autoConvert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conv
|
|||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
out.Items = make([]Pod, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -3084,6 +3220,7 @@ func autoConvert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conv
|
|||
out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector))
|
||||
out.ServiceAccountName = in.ServiceAccountName
|
||||
// INFO: in.DeprecatedServiceAccount opted out of conversion generation
|
||||
out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken))
|
||||
out.NodeName = in.NodeName
|
||||
// INFO: in.HostNetwork opted out of conversion generation
|
||||
// INFO: in.HostPID opted out of conversion generation
|
||||
|
@ -3102,6 +3239,7 @@ func autoConvert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conv
|
|||
out.Subdomain = in.Subdomain
|
||||
out.Affinity = (*api.Affinity)(unsafe.Pointer(in.Affinity))
|
||||
out.SchedulerName = in.SchedulerName
|
||||
out.Tolerations = *(*[]api.Toleration)(unsafe.Pointer(&in.Tolerations))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -3118,13 +3256,18 @@ func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conv
|
|||
out.Volumes = nil
|
||||
}
|
||||
out.InitContainers = *(*[]Container)(unsafe.Pointer(&in.InitContainers))
|
||||
out.Containers = *(*[]Container)(unsafe.Pointer(&in.Containers))
|
||||
if in.Containers == nil {
|
||||
out.Containers = make([]Container, 0)
|
||||
} else {
|
||||
out.Containers = *(*[]Container)(unsafe.Pointer(&in.Containers))
|
||||
}
|
||||
out.RestartPolicy = RestartPolicy(in.RestartPolicy)
|
||||
out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds))
|
||||
out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds))
|
||||
out.DNSPolicy = DNSPolicy(in.DNSPolicy)
|
||||
out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector))
|
||||
out.ServiceAccountName = in.ServiceAccountName
|
||||
out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken))
|
||||
out.NodeName = in.NodeName
|
||||
if in.SecurityContext != nil {
|
||||
in, out := &in.SecurityContext, &out.SecurityContext
|
||||
|
@ -3140,6 +3283,7 @@ func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conv
|
|||
out.Subdomain = in.Subdomain
|
||||
out.Affinity = (*Affinity)(unsafe.Pointer(in.Affinity))
|
||||
out.SchedulerName = in.SchedulerName
|
||||
out.Tolerations = *(*[]Toleration)(unsafe.Pointer(&in.Tolerations))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -3250,7 +3394,7 @@ func autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateLi
|
|||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
out.Items = make([]PodTemplate, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -3275,6 +3419,28 @@ func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSp
|
|||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error {
|
||||
out.VolumeID = in.VolumeID
|
||||
out.FSType = in.FSType
|
||||
out.ReadOnly = in.ReadOnly
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error {
|
||||
return autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *PortworxVolumeSource, s conversion.Scope) error {
|
||||
out.VolumeID = in.VolumeID
|
||||
out.FSType = in.FSType
|
||||
out.ReadOnly = in.ReadOnly
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *PortworxVolumeSource, s conversion.Scope) error {
|
||||
return autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error {
|
||||
out.UID = (*types.UID)(unsafe.Pointer(in.UID))
|
||||
return nil
|
||||
|
@ -3377,6 +3543,30 @@ func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope
|
|||
return autoConvert_api_Probe_To_v1_Probe(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error {
|
||||
out.Sources = *(*[]api.VolumeProjection)(unsafe.Pointer(&in.Sources))
|
||||
out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error {
|
||||
return autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *ProjectedVolumeSource, s conversion.Scope) error {
|
||||
if in.Sources == nil {
|
||||
out.Sources = make([]VolumeProjection, 0)
|
||||
} else {
|
||||
out.Sources = *(*[]VolumeProjection)(unsafe.Pointer(&in.Sources))
|
||||
}
|
||||
out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *ProjectedVolumeSource, s conversion.Scope) error {
|
||||
return autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error {
|
||||
out.Registry = in.Registry
|
||||
out.Volume = in.Volume
|
||||
|
@ -3420,7 +3610,11 @@ func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out
|
|||
}
|
||||
|
||||
func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error {
|
||||
out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors))
|
||||
if in.CephMonitors == nil {
|
||||
out.CephMonitors = make([]string, 0)
|
||||
} else {
|
||||
out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors))
|
||||
}
|
||||
out.RBDImage = in.RBDImage
|
||||
out.FSType = in.FSType
|
||||
out.RBDPool = in.RBDPool
|
||||
|
@ -3449,7 +3643,11 @@ func Convert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out
|
|||
func autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Range = in.Range
|
||||
out.Data = *(*[]byte)(unsafe.Pointer(&in.Data))
|
||||
if in.Data == nil {
|
||||
out.Data = make([]byte, 0)
|
||||
} else {
|
||||
out.Data = *(*[]byte)(unsafe.Pointer(&in.Data))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -3544,7 +3742,7 @@ func autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(i
|
|||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
out.Items = make([]ReplicationController, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -3554,7 +3752,7 @@ func Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *a
|
|||
}
|
||||
|
||||
func autoConvert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error {
|
||||
if err := api.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MinReadySeconds = in.MinReadySeconds
|
||||
|
@ -3572,7 +3770,7 @@ func autoConvert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(i
|
|||
}
|
||||
|
||||
func autoConvert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error {
|
||||
if err := api.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MinReadySeconds = in.MinReadySeconds
|
||||
|
@ -3681,7 +3879,11 @@ func Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList
|
|||
|
||||
func autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]ResourceQuota)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]ResourceQuota, 0)
|
||||
} else {
|
||||
out.Items = *(*[]ResourceQuota)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -3773,6 +3975,42 @@ func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out
|
|||
return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error {
|
||||
out.Gateway = in.Gateway
|
||||
out.System = in.System
|
||||
out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
|
||||
out.SSLEnabled = in.SSLEnabled
|
||||
out.ProtectionDomain = in.ProtectionDomain
|
||||
out.StoragePool = in.StoragePool
|
||||
out.StorageMode = in.StorageMode
|
||||
out.VolumeName = in.VolumeName
|
||||
out.FSType = in.FSType
|
||||
out.ReadOnly = in.ReadOnly
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error {
|
||||
return autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *ScaleIOVolumeSource, s conversion.Scope) error {
|
||||
out.Gateway = in.Gateway
|
||||
out.System = in.System
|
||||
out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef))
|
||||
out.SSLEnabled = in.SSLEnabled
|
||||
out.ProtectionDomain = in.ProtectionDomain
|
||||
out.StoragePool = in.StoragePool
|
||||
out.StorageMode = in.StorageMode
|
||||
out.VolumeName = in.VolumeName
|
||||
out.FSType = in.FSType
|
||||
out.ReadOnly = in.ReadOnly
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *ScaleIOVolumeSource, s conversion.Scope) error {
|
||||
return autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data))
|
||||
|
@ -3873,7 +4111,7 @@ func autoConvert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *Secret
|
|||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
out.Items = make([]Secret, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -3882,6 +4120,32 @@ func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList
|
|||
return autoConvert_api_SecretList_To_v1_SecretList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_SecretProjection_To_api_SecretProjection(in *SecretProjection, out *api.SecretProjection, s conversion.Scope) error {
|
||||
if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items))
|
||||
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_SecretProjection_To_api_SecretProjection(in *SecretProjection, out *api.SecretProjection, s conversion.Scope) error {
|
||||
return autoConvert_v1_SecretProjection_To_api_SecretProjection(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *SecretProjection, s conversion.Scope) error {
|
||||
if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items))
|
||||
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *SecretProjection, s conversion.Scope) error {
|
||||
return autoConvert_api_SecretProjection_To_v1_SecretProjection(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error {
|
||||
out.SecretName = in.SecretName
|
||||
out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items))
|
||||
|
@ -3990,6 +4254,7 @@ func autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out
|
|||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Secrets = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Secrets))
|
||||
out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets))
|
||||
out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -4001,6 +4266,7 @@ func autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount,
|
|||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Secrets = *(*[]ObjectReference)(unsafe.Pointer(&in.Secrets))
|
||||
out.ImagePullSecrets = *(*[]LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets))
|
||||
out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -4020,7 +4286,11 @@ func Convert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountL
|
|||
|
||||
func autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]ServiceAccount)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]ServiceAccount, 0)
|
||||
} else {
|
||||
out.Items = *(*[]ServiceAccount)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -4059,7 +4329,7 @@ func autoConvert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *Ser
|
|||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
out.Items = make([]Service, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -4203,6 +4473,7 @@ func autoConvert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.S
|
|||
out.Key = in.Key
|
||||
out.Value = in.Value
|
||||
out.Effect = api.TaintEffect(in.Effect)
|
||||
out.TimeAdded = in.TimeAdded
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -4214,6 +4485,7 @@ func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.S
|
|||
out.Key = in.Key
|
||||
out.Value = in.Value
|
||||
out.Effect = TaintEffect(in.Effect)
|
||||
out.TimeAdded = in.TimeAdded
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -4226,6 +4498,7 @@ func autoConvert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Tolera
|
|||
out.Operator = api.TolerationOperator(in.Operator)
|
||||
out.Value = in.Value
|
||||
out.Effect = api.TaintEffect(in.Effect)
|
||||
out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -4238,6 +4511,7 @@ func autoConvert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Tolera
|
|||
out.Operator = TolerationOperator(in.Operator)
|
||||
out.Value = in.Value
|
||||
out.Effect = TaintEffect(in.Effect)
|
||||
out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -4293,6 +4567,28 @@ func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeM
|
|||
return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in *VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error {
|
||||
out.Secret = (*api.SecretProjection)(unsafe.Pointer(in.Secret))
|
||||
out.DownwardAPI = (*api.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI))
|
||||
out.ConfigMap = (*api.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1_VolumeProjection_To_api_VolumeProjection(in *VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error {
|
||||
return autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *VolumeProjection, s conversion.Scope) error {
|
||||
out.Secret = (*SecretProjection)(unsafe.Pointer(in.Secret))
|
||||
out.DownwardAPI = (*DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI))
|
||||
out.ConfigMap = (*ConfigMapProjection)(unsafe.Pointer(in.ConfigMap))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *VolumeProjection, s conversion.Scope) error {
|
||||
return autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error {
|
||||
out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath))
|
||||
out.EmptyDir = (*api.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir))
|
||||
|
@ -4317,6 +4613,9 @@ func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.
|
|||
out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte))
|
||||
out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
|
||||
out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
|
||||
out.Projected = (*api.ProjectedVolumeSource)(unsafe.Pointer(in.Projected))
|
||||
out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume))
|
||||
out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -4348,6 +4647,9 @@ func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *
|
|||
out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume))
|
||||
out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
|
||||
out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
|
||||
out.Projected = (*ProjectedVolumeSource)(unsafe.Pointer(in.Projected))
|
||||
out.PortworxVolume = (*PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume))
|
||||
out.ScaleIO = (*ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
238
vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go
generated
vendored
238
vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go
generated
vendored
|
@ -53,6 +53,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
|||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapEnvSource, InType: reflect.TypeOf(&ConfigMapEnvSource{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapKeySelector, InType: reflect.TypeOf(&ConfigMapKeySelector{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapList, InType: reflect.TypeOf(&ConfigMapList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapProjection, InType: reflect.TypeOf(&ConfigMapProjection{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapVolumeSource, InType: reflect.TypeOf(&ConfigMapVolumeSource{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Container, InType: reflect.TypeOf(&Container{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerImage, InType: reflect.TypeOf(&ContainerImage{})},
|
||||
|
@ -64,6 +65,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
|||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStatus, InType: reflect.TypeOf(&ContainerStatus{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DaemonEndpoint, InType: reflect.TypeOf(&DaemonEndpoint{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIProjection, InType: reflect.TypeOf(&DownwardAPIProjection{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIVolumeFile, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIVolumeSource, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EmptyDirVolumeSource, InType: reflect.TypeOf(&EmptyDirVolumeSource{})},
|
||||
|
@ -153,10 +155,12 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
|||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplate, InType: reflect.TypeOf(&PodTemplate{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplateList, InType: reflect.TypeOf(&PodTemplateList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplateSpec, InType: reflect.TypeOf(&PodTemplateSpec{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PortworxVolumeSource, InType: reflect.TypeOf(&PortworxVolumeSource{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Preconditions, InType: reflect.TypeOf(&Preconditions{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PreferAvoidPodsEntry, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PreferredSchedulingTerm, InType: reflect.TypeOf(&PreferredSchedulingTerm{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Probe, InType: reflect.TypeOf(&Probe{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ProjectedVolumeSource, InType: reflect.TypeOf(&ProjectedVolumeSource{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_QuobyteVolumeSource, InType: reflect.TypeOf(&QuobyteVolumeSource{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_RBDVolumeSource, InType: reflect.TypeOf(&RBDVolumeSource{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_RangeAllocation, InType: reflect.TypeOf(&RangeAllocation{})},
|
||||
|
@ -172,10 +176,12 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
|||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaStatus, InType: reflect.TypeOf(&ResourceQuotaStatus{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceRequirements, InType: reflect.TypeOf(&ResourceRequirements{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SELinuxOptions, InType: reflect.TypeOf(&SELinuxOptions{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleIOVolumeSource, InType: reflect.TypeOf(&ScaleIOVolumeSource{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Secret, InType: reflect.TypeOf(&Secret{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretEnvSource, InType: reflect.TypeOf(&SecretEnvSource{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretKeySelector, InType: reflect.TypeOf(&SecretKeySelector{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretList, InType: reflect.TypeOf(&SecretList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretProjection, InType: reflect.TypeOf(&SecretProjection{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretVolumeSource, InType: reflect.TypeOf(&SecretVolumeSource{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecurityContext, InType: reflect.TypeOf(&SecurityContext{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SerializedReference, InType: reflect.TypeOf(&SerializedReference{})},
|
||||
|
@ -193,6 +199,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
|||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Toleration, InType: reflect.TypeOf(&Toleration{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Volume, InType: reflect.TypeOf(&Volume{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeMount, InType: reflect.TypeOf(&VolumeMount{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeProjection, InType: reflect.TypeOf(&VolumeProjection{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeSource, InType: reflect.TypeOf(&VolumeSource{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VsphereVirtualDiskVolumeSource, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_WeightedPodAffinityTerm, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})},
|
||||
|
@ -472,6 +479,29 @@ func DeepCopy_v1_ConfigMapList(in interface{}, out interface{}, c *conversion.Cl
|
|||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1_ConfigMapProjection(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*ConfigMapProjection)
|
||||
out := out.(*ConfigMapProjection)
|
||||
*out = *in
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]KeyToPath, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if in.Optional != nil {
|
||||
in, out := &in.Optional, &out.Optional
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1_ConfigMapVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*ConfigMapVolumeSource)
|
||||
|
@ -705,6 +735,29 @@ func DeepCopy_v1_DeleteOptions(in interface{}, out interface{}, c *conversion.Cl
|
|||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.PropagationPolicy != nil {
|
||||
in, out := &in.PropagationPolicy, &out.PropagationPolicy
|
||||
*out = new(DeletionPropagation)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1_DownwardAPIProjection(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*DownwardAPIProjection)
|
||||
out := out.(*DownwardAPIProjection)
|
||||
*out = *in
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]DownwardAPIVolumeFile, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -1140,6 +1193,11 @@ func DeepCopy_v1_ISCSIVolumeSource(in interface{}, out interface{}, c *conversio
|
|||
in := in.(*ISCSIVolumeSource)
|
||||
out := out.(*ISCSIVolumeSource)
|
||||
*out = *in
|
||||
if in.Portals != nil {
|
||||
in, out := &in.Portals, &out.Portals
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -1421,6 +1479,9 @@ func DeepCopy_v1_Node(in interface{}, out interface{}, c *conversion.Cloner) err
|
|||
} else {
|
||||
out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta)
|
||||
}
|
||||
if err := DeepCopy_v1_NodeSpec(&in.Spec, &out.Spec, c); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := DeepCopy_v1_NodeStatus(&in.Status, &out.Status, c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1580,6 +1641,15 @@ func DeepCopy_v1_NodeSpec(in interface{}, out interface{}, c *conversion.Cloner)
|
|||
in := in.(*NodeSpec)
|
||||
out := out.(*NodeSpec)
|
||||
*out = *in
|
||||
if in.Taints != nil {
|
||||
in, out := &in.Taints, &out.Taints
|
||||
*out = make([]Taint, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1_Taint(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -1793,6 +1863,11 @@ func DeepCopy_v1_PersistentVolumeClaimSpec(in interface{}, out interface{}, c *c
|
|||
if err := DeepCopy_v1_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil {
|
||||
return err
|
||||
}
|
||||
if in.StorageClassName != nil {
|
||||
in, out := &in.StorageClassName, &out.StorageClassName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -1885,7 +1960,9 @@ func DeepCopy_v1_PersistentVolumeSource(in interface{}, out interface{}, c *conv
|
|||
if in.ISCSI != nil {
|
||||
in, out := &in.ISCSI, &out.ISCSI
|
||||
*out = new(ISCSIVolumeSource)
|
||||
**out = **in
|
||||
if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if in.Cinder != nil {
|
||||
in, out := &in.Cinder, &out.Cinder
|
||||
|
@ -1945,6 +2022,18 @@ func DeepCopy_v1_PersistentVolumeSource(in interface{}, out interface{}, c *conv
|
|||
*out = new(PhotonPersistentDiskVolumeSource)
|
||||
**out = **in
|
||||
}
|
||||
if in.PortworxVolume != nil {
|
||||
in, out := &in.PortworxVolume, &out.PortworxVolume
|
||||
*out = new(PortworxVolumeSource)
|
||||
**out = **in
|
||||
}
|
||||
if in.ScaleIO != nil {
|
||||
in, out := &in.ScaleIO, &out.ScaleIO
|
||||
*out = new(ScaleIOVolumeSource)
|
||||
if err := DeepCopy_v1_ScaleIOVolumeSource(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -2296,6 +2385,11 @@ func DeepCopy_v1_PodSpec(in interface{}, out interface{}, c *conversion.Cloner)
|
|||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.AutomountServiceAccountToken != nil {
|
||||
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.SecurityContext != nil {
|
||||
in, out := &in.SecurityContext, &out.SecurityContext
|
||||
*out = new(PodSecurityContext)
|
||||
|
@ -2315,6 +2409,15 @@ func DeepCopy_v1_PodSpec(in interface{}, out interface{}, c *conversion.Cloner)
|
|||
return err
|
||||
}
|
||||
}
|
||||
if in.Tolerations != nil {
|
||||
in, out := &in.Tolerations, &out.Tolerations
|
||||
*out = make([]Toleration, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1_Toleration(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -2429,6 +2532,15 @@ func DeepCopy_v1_PodTemplateSpec(in interface{}, out interface{}, c *conversion.
|
|||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1_PortworxVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*PortworxVolumeSource)
|
||||
out := out.(*PortworxVolumeSource)
|
||||
*out = *in
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*Preconditions)
|
||||
|
@ -2480,6 +2592,29 @@ func DeepCopy_v1_Probe(in interface{}, out interface{}, c *conversion.Cloner) er
|
|||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1_ProjectedVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*ProjectedVolumeSource)
|
||||
out := out.(*ProjectedVolumeSource)
|
||||
*out = *in
|
||||
if in.Sources != nil {
|
||||
in, out := &in.Sources, &out.Sources
|
||||
*out = make([]VolumeProjection, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1_VolumeProjection(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if in.DefaultMode != nil {
|
||||
in, out := &in.DefaultMode, &out.DefaultMode
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1_QuobyteVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*QuobyteVolumeSource)
|
||||
|
@ -2745,6 +2880,20 @@ func DeepCopy_v1_SELinuxOptions(in interface{}, out interface{}, c *conversion.C
|
|||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1_ScaleIOVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*ScaleIOVolumeSource)
|
||||
out := out.(*ScaleIOVolumeSource)
|
||||
*out = *in
|
||||
if in.SecretRef != nil {
|
||||
in, out := &in.SecretRef, &out.SecretRef
|
||||
*out = new(LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1_Secret(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*Secret)
|
||||
|
@ -2823,6 +2972,29 @@ func DeepCopy_v1_SecretList(in interface{}, out interface{}, c *conversion.Clone
|
|||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1_SecretProjection(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*SecretProjection)
|
||||
out := out.(*SecretProjection)
|
||||
*out = *in
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]KeyToPath, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if in.Optional != nil {
|
||||
in, out := &in.Optional, &out.Optional
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1_SecretVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*SecretVolumeSource)
|
||||
|
@ -2941,6 +3113,11 @@ func DeepCopy_v1_ServiceAccount(in interface{}, out interface{}, c *conversion.C
|
|||
*out = make([]LocalObjectReference, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.AutomountServiceAccountToken != nil {
|
||||
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -3070,6 +3247,7 @@ func DeepCopy_v1_Taint(in interface{}, out interface{}, c *conversion.Cloner) er
|
|||
in := in.(*Taint)
|
||||
out := out.(*Taint)
|
||||
*out = *in
|
||||
out.TimeAdded = in.TimeAdded.DeepCopy()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -3079,6 +3257,11 @@ func DeepCopy_v1_Toleration(in interface{}, out interface{}, c *conversion.Clone
|
|||
in := in.(*Toleration)
|
||||
out := out.(*Toleration)
|
||||
*out = *in
|
||||
if in.TolerationSeconds != nil {
|
||||
in, out := &in.TolerationSeconds, &out.TolerationSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -3104,6 +3287,36 @@ func DeepCopy_v1_VolumeMount(in interface{}, out interface{}, c *conversion.Clon
|
|||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1_VolumeProjection(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*VolumeProjection)
|
||||
out := out.(*VolumeProjection)
|
||||
*out = *in
|
||||
if in.Secret != nil {
|
||||
in, out := &in.Secret, &out.Secret
|
||||
*out = new(SecretProjection)
|
||||
if err := DeepCopy_v1_SecretProjection(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if in.DownwardAPI != nil {
|
||||
in, out := &in.DownwardAPI, &out.DownwardAPI
|
||||
*out = new(DownwardAPIProjection)
|
||||
if err := DeepCopy_v1_DownwardAPIProjection(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if in.ConfigMap != nil {
|
||||
in, out := &in.ConfigMap, &out.ConfigMap
|
||||
*out = new(ConfigMapProjection)
|
||||
if err := DeepCopy_v1_ConfigMapProjection(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*VolumeSource)
|
||||
|
@ -3149,7 +3362,9 @@ func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Clo
|
|||
if in.ISCSI != nil {
|
||||
in, out := &in.ISCSI, &out.ISCSI
|
||||
*out = new(ISCSIVolumeSource)
|
||||
**out = **in
|
||||
if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if in.Glusterfs != nil {
|
||||
in, out := &in.Glusterfs, &out.Glusterfs
|
||||
|
@ -3240,6 +3455,25 @@ func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Clo
|
|||
*out = new(PhotonPersistentDiskVolumeSource)
|
||||
**out = **in
|
||||
}
|
||||
if in.Projected != nil {
|
||||
in, out := &in.Projected, &out.Projected
|
||||
*out = new(ProjectedVolumeSource)
|
||||
if err := DeepCopy_v1_ProjectedVolumeSource(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if in.PortworxVolume != nil {
|
||||
in, out := &in.PortworxVolume, &out.PortworxVolume
|
||||
*out = new(PortworxVolumeSource)
|
||||
**out = **in
|
||||
}
|
||||
if in.ScaleIO != nil {
|
||||
in, out := &in.ScaleIO, &out.ScaleIO
|
||||
*out = new(ScaleIOVolumeSource)
|
||||
if err := DeepCopy_v1_ScaleIOVolumeSource(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
54
vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go
generated
vendored
54
vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go
generated
vendored
|
@ -137,6 +137,9 @@ func SetObjectDefaults_PersistentVolume(in *PersistentVolume) {
|
|||
if in.Spec.PersistentVolumeSource.AzureDisk != nil {
|
||||
SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk)
|
||||
}
|
||||
if in.Spec.PersistentVolumeSource.ScaleIO != nil {
|
||||
SetDefaults_ScaleIOVolumeSource(in.Spec.PersistentVolumeSource.ScaleIO)
|
||||
}
|
||||
}
|
||||
|
||||
func SetObjectDefaults_PersistentVolumeClaim(in *PersistentVolumeClaim) {
|
||||
|
@ -190,6 +193,23 @@ func SetObjectDefaults_Pod(in *Pod) {
|
|||
if a.VolumeSource.AzureDisk != nil {
|
||||
SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
|
||||
}
|
||||
if a.VolumeSource.Projected != nil {
|
||||
SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
|
||||
for j := range a.VolumeSource.Projected.Sources {
|
||||
b := &a.VolumeSource.Projected.Sources[j]
|
||||
if b.DownwardAPI != nil {
|
||||
for k := range b.DownwardAPI.Items {
|
||||
c := &b.DownwardAPI.Items[k]
|
||||
if c.FieldRef != nil {
|
||||
SetDefaults_ObjectFieldSelector(c.FieldRef)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.InitContainers {
|
||||
a := &in.Spec.InitContainers[i]
|
||||
|
@ -321,6 +341,23 @@ func SetObjectDefaults_PodTemplate(in *PodTemplate) {
|
|||
if a.VolumeSource.AzureDisk != nil {
|
||||
SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
|
||||
}
|
||||
if a.VolumeSource.Projected != nil {
|
||||
SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
|
||||
for j := range a.VolumeSource.Projected.Sources {
|
||||
b := &a.VolumeSource.Projected.Sources[j]
|
||||
if b.DownwardAPI != nil {
|
||||
for k := range b.DownwardAPI.Items {
|
||||
c := &b.DownwardAPI.Items[k]
|
||||
if c.FieldRef != nil {
|
||||
SetDefaults_ObjectFieldSelector(c.FieldRef)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
}
|
||||
for i := range in.Template.Spec.InitContainers {
|
||||
a := &in.Template.Spec.InitContainers[i]
|
||||
|
@ -446,6 +483,23 @@ func SetObjectDefaults_ReplicationController(in *ReplicationController) {
|
|||
if a.VolumeSource.AzureDisk != nil {
|
||||
SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
|
||||
}
|
||||
if a.VolumeSource.Projected != nil {
|
||||
SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
|
||||
for j := range a.VolumeSource.Projected.Sources {
|
||||
b := &a.VolumeSource.Projected.Sources[j]
|
||||
if b.DownwardAPI != nil {
|
||||
for k := range b.DownwardAPI.Items {
|
||||
c := &b.DownwardAPI.Items[k]
|
||||
if c.FieldRef != nil {
|
||||
SetDefaults_ObjectFieldSelector(c.FieldRef)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue