vendor: remove dep and use vndr
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
parent
16f44674a4
commit
148e72d81e
16131 changed files with 73815 additions and 4235138 deletions
56
vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD
generated
vendored
56
vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD
generated
vendored
|
@ -1,56 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"helpers.go",
|
||||
"register.go",
|
||||
"types.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/apis/autoscaling:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/conversion",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["helpers_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/apis/extensions/install:all-srcs",
|
||||
"//pkg/apis/extensions/v1beta1:all-srcs",
|
||||
"//pkg/apis/extensions/validation:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
41
vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS
generated
vendored
41
vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS
generated
vendored
|
@ -1,41 +0,0 @@
|
|||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- caesarxuchao
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- erictune
|
||||
- pmorie
|
||||
- sttts
|
||||
- kargakis
|
||||
- saad-ali
|
||||
- janetkuo
|
||||
- justinsb
|
||||
- ncdc
|
||||
- timstclair
|
||||
- mwielgus
|
||||
- timothysc
|
||||
- soltysh
|
||||
- piosz
|
||||
- dims
|
||||
- errordeveloper
|
||||
- madhusudancs
|
||||
- rootfs
|
||||
- jszczepkowski
|
||||
- mml
|
||||
- resouer
|
||||
- mbohlool
|
||||
- david-mcmahon
|
||||
- therc
|
||||
- pweil-
|
||||
- tmrts
|
||||
- mqliang
|
||||
- lukaszo
|
||||
- jianhuiz
|
62
vendor/k8s.io/kubernetes/pkg/apis/extensions/helpers_test.go
generated
vendored
62
vendor/k8s.io/kubernetes/pkg/apis/extensions/helpers_test.go
generated
vendored
|
@ -1,62 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package extensions
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPodAnnotationsFromSysctls(t *testing.T) {
|
||||
type Test struct {
|
||||
sysctls []string
|
||||
expectedValue string
|
||||
}
|
||||
for _, test := range []Test{
|
||||
{sysctls: []string{"a.b"}, expectedValue: "a.b"},
|
||||
{sysctls: []string{"a.b", "c.d"}, expectedValue: "a.b,c.d"},
|
||||
{sysctls: []string{"a.b", "a.b"}, expectedValue: "a.b,a.b"},
|
||||
{sysctls: []string{}, expectedValue: ""},
|
||||
{sysctls: nil, expectedValue: ""},
|
||||
} {
|
||||
a := PodAnnotationsFromSysctls(test.sysctls)
|
||||
if a != test.expectedValue {
|
||||
t.Errorf("wrong value for %v: got=%q wanted=%q", test.sysctls, a, test.expectedValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSysctlsFromPodSecurityPolicyAnnotation(t *testing.T) {
|
||||
type Test struct {
|
||||
expectedValue []string
|
||||
annotation string
|
||||
}
|
||||
for _, test := range []Test{
|
||||
{annotation: "a.b", expectedValue: []string{"a.b"}},
|
||||
{annotation: "a.b,c.d", expectedValue: []string{"a.b", "c.d"}},
|
||||
{annotation: "a.b,a.b", expectedValue: []string{"a.b", "a.b"}},
|
||||
{annotation: "", expectedValue: []string{}},
|
||||
} {
|
||||
sysctls, err := SysctlsFromPodSecurityPolicyAnnotation(test.annotation)
|
||||
if err != nil {
|
||||
t.Errorf("error for %q: %v", test.annotation, err)
|
||||
}
|
||||
if !reflect.DeepEqual(sysctls, test.expectedValue) {
|
||||
t.Errorf("wrong value for %q: got=%v wanted=%v", test.annotation, sysctls, test.expectedValue)
|
||||
}
|
||||
}
|
||||
}
|
36
vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD
generated
vendored
|
@ -1,36 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["install.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apimachinery/announced",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apimachinery/registered",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
3
vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go
generated
vendored
|
@ -19,7 +19,6 @@ package extensions
|
|||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
|
@ -50,8 +49,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||
&Deployment{},
|
||||
&DeploymentList{},
|
||||
&DeploymentRollback{},
|
||||
&autoscaling.HorizontalPodAutoscaler{},
|
||||
&autoscaling.HorizontalPodAutoscalerList{},
|
||||
&ReplicationControllerDummy{},
|
||||
&Scale{},
|
||||
&ThirdPartyResource{},
|
||||
|
|
123
vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go
generated
vendored
123
vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go
generated
vendored
|
@ -29,10 +29,10 @@ support is experimental.
|
|||
package extensions
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -370,19 +370,18 @@ type DeploymentList struct {
|
|||
Items []Deployment
|
||||
}
|
||||
|
||||
// TODO(madhusudancs): Uncomment while implementing DaemonSet updates.
|
||||
/* Commenting out for v1.2. We are planning to bring these types back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting the types out.
|
||||
type DaemonSetUpdateStrategy struct {
|
||||
// Type of daemon set update. Only "RollingUpdate" is supported at this time. Default is RollingUpdate.
|
||||
// +optional
|
||||
// Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
|
||||
// Default is OnDelete.
|
||||
// +optional
|
||||
Type DaemonSetUpdateStrategyType
|
||||
|
||||
// Rolling update config params. Present only if DaemonSetUpdateStrategy =
|
||||
// RollingUpdate.
|
||||
// Rolling update config params. Present only if type = "RollingUpdate".
|
||||
//---
|
||||
// TODO: Update this to follow our convention for oneOf, whatever we decide it
|
||||
// to be. Same as DeploymentStrategy.RollingUpdate.
|
||||
// +optional
|
||||
// See https://github.com/kubernetes/kubernetes/issues/35345
|
||||
// +optional
|
||||
RollingUpdate *RollingUpdateDaemonSet
|
||||
}
|
||||
|
||||
|
@ -391,6 +390,9 @@ type DaemonSetUpdateStrategyType string
|
|||
const (
|
||||
// Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
|
||||
RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
|
||||
|
||||
// Replace the old daemons only when it's killed
|
||||
OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
|
||||
)
|
||||
|
||||
// Spec to control the desired behavior of daemon set rolling update.
|
||||
|
@ -401,87 +403,88 @@ type RollingUpdateDaemonSet struct {
|
|||
// number is calculated from percentage by rounding up.
|
||||
// This cannot be 0.
|
||||
// Default value is 1.
|
||||
// Example: when this is set to 30%, 30% of the currently running DaemonSet
|
||||
// pods can be stopped for an update at any given time. The update starts
|
||||
// by stopping at most 30% of the currently running DaemonSet pods and then
|
||||
// brings up new DaemonSet pods in their place. Once the new pods are ready,
|
||||
// it then proceeds onto other DaemonSet pods, thus ensuring that at least
|
||||
// 70% of original number of DaemonSet pods are available at all times
|
||||
// during the update.
|
||||
// +optional
|
||||
// Example: when this is set to 30%, at most 30% of the total number of nodes
|
||||
// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
|
||||
// can have their pods stopped for an update at any given
|
||||
// time. The update starts by stopping at most 30% of those DaemonSet pods
|
||||
// and then brings up new DaemonSet pods in their place. Once the new pods
|
||||
// are available, it then proceeds onto other DaemonSet pods, thus ensuring
|
||||
// that at least 70% of original number of DaemonSet pods are available at
|
||||
// all times during the update.
|
||||
// +optional
|
||||
MaxUnavailable intstr.IntOrString
|
||||
|
||||
// Minimum number of seconds for which a newly created DaemonSet pod should
|
||||
// be ready without any of its container crashing, for it to be considered
|
||||
// available. Defaults to 0 (pod will be considered available as soon as it
|
||||
// is ready).
|
||||
// +optional
|
||||
MinReadySeconds int
|
||||
}
|
||||
*/
|
||||
|
||||
// DaemonSetSpec is the specification of a daemon set.
|
||||
type DaemonSetSpec struct {
|
||||
// Selector is a label query over pods that are managed by the daemon set.
|
||||
// A label query over pods that are managed by the daemon set.
|
||||
// Must match in order to be controlled.
|
||||
// If empty, defaulted to labels on Pod template.
|
||||
// More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
|
||||
// +optional
|
||||
Selector *metav1.LabelSelector
|
||||
|
||||
// Template is the object that describes the pod that will be created.
|
||||
// An object that describes the pod that will be created.
|
||||
// The DaemonSet will create exactly one copy of this pod on every node
|
||||
// that matches the template's node selector (or on every node if no node
|
||||
// selector is specified).
|
||||
// More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template
|
||||
Template api.PodTemplateSpec
|
||||
|
||||
// TODO(madhusudancs): Uncomment while implementing DaemonSet updates.
|
||||
/* Commenting out for v1.2. We are planning to bring these fields back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting these fields out.
|
||||
// Update strategy to replace existing DaemonSet pods with new pods.
|
||||
// An update strategy to replace existing DaemonSet pods with new pods.
|
||||
// +optional
|
||||
UpdateStrategy DaemonSetUpdateStrategy
|
||||
UpdateStrategy DaemonSetUpdateStrategy
|
||||
|
||||
// Label key that is added to DaemonSet pods to distinguish between old and
|
||||
// new pod templates during DaemonSet update.
|
||||
// Users can set this to an empty string to indicate that the system should
|
||||
// not add any label. If unspecified, system uses
|
||||
// DefaultDaemonSetUniqueLabelKey("daemonset.kubernetes.io/podTemplateHash").
|
||||
// Value of this key is hash of DaemonSetSpec.PodTemplateSpec.
|
||||
// No label is added if this is set to empty string.
|
||||
// The minimum number of seconds for which a newly created DaemonSet pod should
|
||||
// be ready without any of its container crashing, for it to be considered
|
||||
// available. Defaults to 0 (pod will be considered available as soon as it
|
||||
// is ready).
|
||||
// +optional
|
||||
UniqueLabelKey string
|
||||
*/
|
||||
MinReadySeconds int32
|
||||
|
||||
// A sequence number representing a specific generation of the template.
|
||||
// Populated by the system. It can be set only during the creation.
|
||||
// +optional
|
||||
TemplateGeneration int64
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultDaemonSetUniqueLabelKey is the default key of the labels that is added
|
||||
// to daemon set pods to distinguish between old and new pod templates during
|
||||
// DaemonSet update. See DaemonSetSpec's UniqueLabelKey field for more information.
|
||||
DefaultDaemonSetUniqueLabelKey string = "daemonset.kubernetes.io/podTemplateHash"
|
||||
)
|
||||
|
||||
// DaemonSetStatus represents the current status of a daemon set.
|
||||
type DaemonSetStatus struct {
|
||||
// CurrentNumberScheduled is the number of nodes that are running at least 1
|
||||
// The number of nodes that are running at least 1
|
||||
// daemon pod and are supposed to run the daemon pod.
|
||||
CurrentNumberScheduled int32
|
||||
|
||||
// NumberMisscheduled is the number of nodes that are running the daemon pod, but are
|
||||
// The number of nodes that are running the daemon pod, but are
|
||||
// not supposed to run the daemon pod.
|
||||
NumberMisscheduled int32
|
||||
|
||||
// DesiredNumberScheduled is the total number of nodes that should be running the daemon
|
||||
// The total number of nodes that should be running the daemon
|
||||
// pod (including nodes correctly running the daemon pod).
|
||||
DesiredNumberScheduled int32
|
||||
|
||||
// NumberReady is the number of nodes that should be running the daemon pod and have one
|
||||
// The number of nodes that should be running the daemon pod and have one
|
||||
// or more of the daemon pod running and ready.
|
||||
NumberReady int32
|
||||
|
||||
// ObservedGeneration is the most recent generation observed by the daemon set controller.
|
||||
// The most recent generation observed by the daemon set controller.
|
||||
// +optional
|
||||
ObservedGeneration int64
|
||||
|
||||
// The total number of nodes that are running updated daemon pod
|
||||
// +optional
|
||||
UpdatedNumberScheduled int32
|
||||
|
||||
// The number of nodes that should be running the
|
||||
// daemon pod and have one or more of the daemon pod running and
|
||||
// available (ready for at least spec.minReadySeconds)
|
||||
// +optional
|
||||
NumberAvailable int32
|
||||
|
||||
// The number of nodes that should be running the
|
||||
// daemon pod and have none of the daemon pod running and available
|
||||
// (ready for at least spec.minReadySeconds)
|
||||
// +optional
|
||||
NumberUnavailable int32
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
|
@ -494,12 +497,12 @@ type DaemonSet struct {
|
|||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
// Spec defines the desired behavior of this daemon set.
|
||||
// The desired behavior of this daemon set.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Spec DaemonSetSpec
|
||||
|
||||
// Status is the current status of this daemon set. This data may be
|
||||
// The current status of this daemon set. This data may be
|
||||
// out of date by some window of time.
|
||||
// Populated by the system.
|
||||
// Read-only.
|
||||
|
@ -508,6 +511,13 @@ type DaemonSet struct {
|
|||
Status DaemonSetStatus
|
||||
}
|
||||
|
||||
const (
|
||||
// DaemonSetTemplateGenerationKey is the key of the labels that is added
|
||||
// to daemon set pods to distinguish between old and new pod templates
|
||||
// during DaemonSet template update.
|
||||
DaemonSetTemplateGenerationKey string = "pod-template-generation"
|
||||
)
|
||||
|
||||
// DaemonSetList is a collection of daemon sets.
|
||||
type DaemonSetList struct {
|
||||
metav1.TypeMeta
|
||||
|
@ -516,7 +526,7 @@ type DaemonSetList struct {
|
|||
// +optional
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is a list of daemon sets.
|
||||
// A list of daemon sets.
|
||||
Items []DaemonSet
|
||||
}
|
||||
|
||||
|
@ -905,6 +915,9 @@ var (
|
|||
Quobyte FSType = "quobyte"
|
||||
AzureDisk FSType = "azureDisk"
|
||||
PhotonPersistentDisk FSType = "photonPersistentDisk"
|
||||
Projected FSType = "projected"
|
||||
PortworxVolume FSType = "portworxVolume"
|
||||
ScaleIO FSType = "scaleIO"
|
||||
All FSType = "*"
|
||||
)
|
||||
|
||||
|
|
73
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD
generated
vendored
73
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD
generated
vendored
|
@ -1,73 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"conversion.go",
|
||||
"defaults.go",
|
||||
"doc.go",
|
||||
"generated.pb.go",
|
||||
"register.go",
|
||||
"types.generated.go",
|
||||
"types.go",
|
||||
"types_swagger_doc_generated.go",
|
||||
"zz_generated.conversion.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
"zz_generated.defaults.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/autoscaling:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//vendor:github.com/gogo/protobuf/proto",
|
||||
"//vendor:github.com/gogo/protobuf/sortkeys",
|
||||
"//vendor:github.com/ugorji/go/codec",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/conversion",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_xtest",
|
||||
srcs = ["defaults_test.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/install:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/extensions/install:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
80
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go
generated
vendored
80
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go
generated
vendored
|
@ -22,11 +22,9 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
v1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func addConversionFuncs(scheme *runtime.Scheme) error {
|
||||
|
@ -40,13 +38,10 @@ func addConversionFuncs(scheme *runtime.Scheme) error {
|
|||
Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy,
|
||||
Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment,
|
||||
Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment,
|
||||
Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet,
|
||||
Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet,
|
||||
Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec,
|
||||
Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec,
|
||||
// autoscaling
|
||||
Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference,
|
||||
Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference,
|
||||
Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec,
|
||||
Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -55,7 +50,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error {
|
|||
// Add field label conversions for kinds having selectable nothing but ObjectMeta fields.
|
||||
for _, k := range []string{"DaemonSet", "Deployment", "Ingress"} {
|
||||
kind := k // don't close over range variables
|
||||
err = api.Scheme.AddFieldLabelConversionFunc("extensions/v1beta1", kind,
|
||||
err = scheme.AddFieldLabelConversionFunc("extensions/v1beta1", kind,
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name", "metadata.namespace":
|
||||
|
@ -226,6 +221,23 @@ func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployme
|
|||
return nil
|
||||
}
|
||||
|
||||
func Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *RollingUpdateDaemonSet, s conversion.Scope) error {
|
||||
if out.MaxUnavailable == nil {
|
||||
out.MaxUnavailable = &intstr.IntOrString{}
|
||||
}
|
||||
if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error {
|
||||
if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error {
|
||||
out.Replicas = new(int32)
|
||||
*out.Replicas = int32(in.Replicas)
|
||||
|
@ -248,53 +260,3 @@ func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetS
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference(in *autoscaling.CrossVersionObjectReference, out *SubresourceReference, s conversion.Scope) error {
|
||||
out.Kind = in.Kind
|
||||
out.Name = in.Name
|
||||
out.APIVersion = in.APIVersion
|
||||
out.Subresource = "scale"
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference(in *SubresourceReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error {
|
||||
out.Kind = in.Kind
|
||||
out.Name = in.Name
|
||||
out.APIVersion = in.APIVersion
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error {
|
||||
if err := Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference(&in.ScaleTargetRef, &out.ScaleRef, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if in.MinReplicas != nil {
|
||||
out.MinReplicas = new(int32)
|
||||
*out.MinReplicas = *in.MinReplicas
|
||||
} else {
|
||||
out.MinReplicas = nil
|
||||
}
|
||||
out.MaxReplicas = in.MaxReplicas
|
||||
if in.TargetCPUUtilizationPercentage != nil {
|
||||
out.CPUUtilization = &CPUTargetUtilization{TargetPercentage: *in.TargetCPUUtilizationPercentage}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
|
||||
if err := Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleRef, &out.ScaleTargetRef, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if in.MinReplicas != nil {
|
||||
out.MinReplicas = new(int32)
|
||||
*out.MinReplicas = int32(*in.MinReplicas)
|
||||
} else {
|
||||
out.MinReplicas = nil
|
||||
}
|
||||
out.MaxReplicas = int32(in.MaxReplicas)
|
||||
if in.CPUUtilization != nil {
|
||||
out.TargetCPUUtilizationPercentage = new(int32)
|
||||
*out.TargetCPUUtilizationPercentage = int32(in.CPUUtilization.TargetPercentage)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
28
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go
generated
vendored
|
@ -19,8 +19,8 @@ package v1beta1
|
|||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
|
@ -28,7 +28,6 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
|||
return scheme.AddDefaultingFuncs(
|
||||
SetDefaults_DaemonSet,
|
||||
SetDefaults_Deployment,
|
||||
SetDefaults_HorizontalPodAutoscaler,
|
||||
SetDefaults_ReplicaSet,
|
||||
SetDefaults_NetworkPolicy,
|
||||
)
|
||||
|
@ -48,6 +47,21 @@ func SetDefaults_DaemonSet(obj *DaemonSet) {
|
|||
obj.Labels = labels
|
||||
}
|
||||
}
|
||||
updateStrategy := &obj.Spec.UpdateStrategy
|
||||
if updateStrategy.Type == "" {
|
||||
updateStrategy.Type = OnDeleteDaemonSetStrategyType
|
||||
}
|
||||
if updateStrategy.Type == RollingUpdateDaemonSetStrategyType {
|
||||
if updateStrategy.RollingUpdate == nil {
|
||||
rollingUpdate := RollingUpdateDaemonSet{}
|
||||
updateStrategy.RollingUpdate = &rollingUpdate
|
||||
}
|
||||
if updateStrategy.RollingUpdate.MaxUnavailable == nil {
|
||||
// Set default MaxUnavailable as 1 by default.
|
||||
maxUnavailable := intstr.FromInt(1)
|
||||
updateStrategy.RollingUpdate.MaxUnavailable = &maxUnavailable
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SetDefaults_Deployment(obj *Deployment) {
|
||||
|
@ -90,16 +104,6 @@ func SetDefaults_Deployment(obj *Deployment) {
|
|||
}
|
||||
}
|
||||
|
||||
func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) {
|
||||
if obj.Spec.MinReplicas == nil {
|
||||
minReplicas := int32(1)
|
||||
obj.Spec.MinReplicas = &minReplicas
|
||||
}
|
||||
if obj.Spec.CPUUtilization == nil {
|
||||
obj.Spec.CPUUtilization = &CPUTargetUtilization{TargetPercentage: 80}
|
||||
}
|
||||
}
|
||||
|
||||
func SetDefaults_ReplicaSet(obj *ReplicaSet) {
|
||||
labels := obj.Spec.Template.Labels
|
||||
|
||||
|
|
610
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults_test.go
generated
vendored
610
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults_test.go
generated
vendored
|
@ -1,610 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
_ "k8s.io/kubernetes/pkg/api/install"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
|
||||
. "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func TestSetDefaultDaemonSet(t *testing.T) {
|
||||
defaultLabels := map[string]string{"foo": "bar"}
|
||||
period := int64(v1.DefaultTerminationGracePeriodSeconds)
|
||||
defaultTemplate := v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
DNSPolicy: v1.DNSClusterFirst,
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
TerminationGracePeriodSeconds: &period,
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: defaultLabels,
|
||||
},
|
||||
}
|
||||
templateNoLabel := v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
DNSPolicy: v1.DNSClusterFirst,
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
TerminationGracePeriodSeconds: &period,
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
original *DaemonSet
|
||||
expected *DaemonSet
|
||||
}{
|
||||
{ // Labels change/defaulting test.
|
||||
original: &DaemonSet{
|
||||
Spec: DaemonSetSpec{
|
||||
Template: defaultTemplate,
|
||||
},
|
||||
},
|
||||
expected: &DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: defaultLabels,
|
||||
},
|
||||
Spec: DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: defaultLabels,
|
||||
},
|
||||
Template: defaultTemplate,
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // Labels change/defaulting test.
|
||||
original: &DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"bar": "foo",
|
||||
},
|
||||
},
|
||||
Spec: DaemonSetSpec{
|
||||
Template: defaultTemplate,
|
||||
},
|
||||
},
|
||||
expected: &DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"bar": "foo",
|
||||
},
|
||||
},
|
||||
Spec: DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: defaultLabels,
|
||||
},
|
||||
Template: defaultTemplate,
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // Update strategy.
|
||||
original: &DaemonSet{},
|
||||
expected: &DaemonSet{
|
||||
Spec: DaemonSetSpec{
|
||||
Template: templateNoLabel,
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // Update strategy.
|
||||
original: &DaemonSet{
|
||||
Spec: DaemonSetSpec{},
|
||||
},
|
||||
expected: &DaemonSet{
|
||||
Spec: DaemonSetSpec{
|
||||
Template: templateNoLabel,
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // Custom unique label key.
|
||||
original: &DaemonSet{
|
||||
Spec: DaemonSetSpec{},
|
||||
},
|
||||
expected: &DaemonSet{
|
||||
Spec: DaemonSetSpec{
|
||||
Template: templateNoLabel,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
original := test.original
|
||||
expected := test.expected
|
||||
obj2 := roundTrip(t, runtime.Object(original))
|
||||
got, ok := obj2.(*DaemonSet)
|
||||
if !ok {
|
||||
t.Errorf("(%d) unexpected object: %v", i, got)
|
||||
t.FailNow()
|
||||
}
|
||||
if !reflect.DeepEqual(got.Spec, expected.Spec) {
|
||||
t.Errorf("(%d) got different than expected\ngot:\n\t%+v\nexpected:\n\t%+v", i, got.Spec, expected.Spec)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultDeployment(t *testing.T) {
|
||||
defaultIntOrString := intstr.FromInt(1)
|
||||
differentIntOrString := intstr.FromInt(5)
|
||||
period := int64(v1.DefaultTerminationGracePeriodSeconds)
|
||||
defaultTemplate := v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
DNSPolicy: v1.DNSClusterFirst,
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
TerminationGracePeriodSeconds: &period,
|
||||
SchedulerName: api.DefaultSchedulerName,
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
original *Deployment
|
||||
expected *Deployment
|
||||
}{
|
||||
{
|
||||
original: &Deployment{},
|
||||
expected: &Deployment{
|
||||
Spec: DeploymentSpec{
|
||||
Replicas: newInt32(1),
|
||||
Strategy: DeploymentStrategy{
|
||||
Type: RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &RollingUpdateDeployment{
|
||||
MaxSurge: &defaultIntOrString,
|
||||
MaxUnavailable: &defaultIntOrString,
|
||||
},
|
||||
},
|
||||
Template: defaultTemplate,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
original: &Deployment{
|
||||
Spec: DeploymentSpec{
|
||||
Replicas: newInt32(5),
|
||||
Strategy: DeploymentStrategy{
|
||||
RollingUpdate: &RollingUpdateDeployment{
|
||||
MaxSurge: &differentIntOrString,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Deployment{
|
||||
Spec: DeploymentSpec{
|
||||
Replicas: newInt32(5),
|
||||
Strategy: DeploymentStrategy{
|
||||
Type: RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &RollingUpdateDeployment{
|
||||
MaxSurge: &differentIntOrString,
|
||||
MaxUnavailable: &defaultIntOrString,
|
||||
},
|
||||
},
|
||||
Template: defaultTemplate,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
original: &Deployment{
|
||||
Spec: DeploymentSpec{
|
||||
Replicas: newInt32(3),
|
||||
Strategy: DeploymentStrategy{
|
||||
Type: RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Deployment{
|
||||
Spec: DeploymentSpec{
|
||||
Replicas: newInt32(3),
|
||||
Strategy: DeploymentStrategy{
|
||||
Type: RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &RollingUpdateDeployment{
|
||||
MaxSurge: &defaultIntOrString,
|
||||
MaxUnavailable: &defaultIntOrString,
|
||||
},
|
||||
},
|
||||
Template: defaultTemplate,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
original: &Deployment{
|
||||
Spec: DeploymentSpec{
|
||||
Replicas: newInt32(5),
|
||||
Strategy: DeploymentStrategy{
|
||||
Type: RecreateDeploymentStrategyType,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Deployment{
|
||||
Spec: DeploymentSpec{
|
||||
Replicas: newInt32(5),
|
||||
Strategy: DeploymentStrategy{
|
||||
Type: RecreateDeploymentStrategyType,
|
||||
},
|
||||
Template: defaultTemplate,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
original: &Deployment{
|
||||
Spec: DeploymentSpec{
|
||||
Replicas: newInt32(5),
|
||||
Strategy: DeploymentStrategy{
|
||||
Type: RecreateDeploymentStrategyType,
|
||||
},
|
||||
ProgressDeadlineSeconds: newInt32(30),
|
||||
},
|
||||
},
|
||||
expected: &Deployment{
|
||||
Spec: DeploymentSpec{
|
||||
Replicas: newInt32(5),
|
||||
Strategy: DeploymentStrategy{
|
||||
Type: RecreateDeploymentStrategyType,
|
||||
},
|
||||
Template: defaultTemplate,
|
||||
ProgressDeadlineSeconds: newInt32(30),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
original := test.original
|
||||
expected := test.expected
|
||||
obj2 := roundTrip(t, runtime.Object(original))
|
||||
got, ok := obj2.(*Deployment)
|
||||
if !ok {
|
||||
t.Errorf("unexpected object: %v", got)
|
||||
t.FailNow()
|
||||
}
|
||||
if !reflect.DeepEqual(got.Spec, expected.Spec) {
|
||||
t.Errorf("object mismatch!\nexpected:\n\t%+v\ngot:\n\t%+v", got.Spec, expected.Spec)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultReplicaSet(t *testing.T) {
|
||||
tests := []struct {
|
||||
rs *ReplicaSet
|
||||
expectLabels bool
|
||||
expectSelector bool
|
||||
}{
|
||||
{
|
||||
rs: &ReplicaSet{
|
||||
Spec: ReplicaSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectLabels: true,
|
||||
expectSelector: true,
|
||||
},
|
||||
{
|
||||
rs: &ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"bar": "foo",
|
||||
},
|
||||
},
|
||||
Spec: ReplicaSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectLabels: false,
|
||||
expectSelector: true,
|
||||
},
|
||||
{
|
||||
rs: &ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"bar": "foo",
|
||||
},
|
||||
},
|
||||
Spec: ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"some": "other",
|
||||
},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectLabels: false,
|
||||
expectSelector: false,
|
||||
},
|
||||
{
|
||||
rs: &ReplicaSet{
|
||||
Spec: ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"some": "other",
|
||||
},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectLabels: true,
|
||||
expectSelector: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
rs := test.rs
|
||||
obj2 := roundTrip(t, runtime.Object(rs))
|
||||
rs2, ok := obj2.(*ReplicaSet)
|
||||
if !ok {
|
||||
t.Errorf("unexpected object: %v", rs2)
|
||||
t.FailNow()
|
||||
}
|
||||
if test.expectSelector != reflect.DeepEqual(rs2.Spec.Selector.MatchLabels, rs2.Spec.Template.Labels) {
|
||||
if test.expectSelector {
|
||||
t.Errorf("expected: %v, got: %v", rs2.Spec.Template.Labels, rs2.Spec.Selector)
|
||||
} else {
|
||||
t.Errorf("unexpected equality: %v", rs.Spec.Selector)
|
||||
}
|
||||
}
|
||||
if test.expectLabels != reflect.DeepEqual(rs2.Labels, rs2.Spec.Template.Labels) {
|
||||
if test.expectLabels {
|
||||
t.Errorf("expected: %v, got: %v", rs2.Spec.Template.Labels, rs2.Labels)
|
||||
} else {
|
||||
t.Errorf("unexpected equality: %v", rs.Labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultReplicaSetReplicas(t *testing.T) {
|
||||
tests := []struct {
|
||||
rs ReplicaSet
|
||||
expectReplicas int32
|
||||
}{
|
||||
{
|
||||
rs: ReplicaSet{
|
||||
Spec: ReplicaSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectReplicas: 1,
|
||||
},
|
||||
{
|
||||
rs: ReplicaSet{
|
||||
Spec: ReplicaSetSpec{
|
||||
Replicas: newInt32(0),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectReplicas: 0,
|
||||
},
|
||||
{
|
||||
rs: ReplicaSet{
|
||||
Spec: ReplicaSetSpec{
|
||||
Replicas: newInt32(3),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectReplicas: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
rs := &test.rs
|
||||
obj2 := roundTrip(t, runtime.Object(rs))
|
||||
rs2, ok := obj2.(*ReplicaSet)
|
||||
if !ok {
|
||||
t.Errorf("unexpected object: %v", rs2)
|
||||
t.FailNow()
|
||||
}
|
||||
if rs2.Spec.Replicas == nil {
|
||||
t.Errorf("unexpected nil Replicas")
|
||||
} else if test.expectReplicas != *rs2.Spec.Replicas {
|
||||
t.Errorf("expected: %d replicas, got: %d", test.expectReplicas, *rs2.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultRequestIsNotSetForReplicaSet(t *testing.T) {
|
||||
s := v1.PodSpec{}
|
||||
s.Containers = []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
rs := &ReplicaSet{
|
||||
Spec: ReplicaSetSpec{
|
||||
Replicas: newInt32(3),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: s,
|
||||
},
|
||||
},
|
||||
}
|
||||
output := roundTrip(t, runtime.Object(rs))
|
||||
rs2 := output.(*ReplicaSet)
|
||||
defaultRequest := rs2.Spec.Template.Spec.Containers[0].Resources.Requests
|
||||
requestValue := defaultRequest[v1.ResourceCPU]
|
||||
if requestValue.String() != "0" {
|
||||
t.Errorf("Expected 0 request value, got: %s", requestValue.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultHorizontalPodAutoscalerMinReplicas(t *testing.T) {
|
||||
tests := []struct {
|
||||
hpa HorizontalPodAutoscaler
|
||||
expectReplicas int32
|
||||
}{
|
||||
{
|
||||
hpa: HorizontalPodAutoscaler{},
|
||||
expectReplicas: 1,
|
||||
},
|
||||
{
|
||||
hpa: HorizontalPodAutoscaler{
|
||||
Spec: HorizontalPodAutoscalerSpec{
|
||||
MinReplicas: newInt32(3),
|
||||
},
|
||||
},
|
||||
expectReplicas: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
hpa := &test.hpa
|
||||
obj2 := roundTrip(t, runtime.Object(hpa))
|
||||
hpa2, ok := obj2.(*HorizontalPodAutoscaler)
|
||||
if !ok {
|
||||
t.Errorf("unexpected object: %v", hpa2)
|
||||
t.FailNow()
|
||||
}
|
||||
if hpa2.Spec.MinReplicas == nil {
|
||||
t.Errorf("unexpected nil MinReplicas")
|
||||
} else if test.expectReplicas != *hpa2.Spec.MinReplicas {
|
||||
t.Errorf("expected: %d MinReplicas, got: %d", test.expectReplicas, *hpa2.Spec.MinReplicas)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultHorizontalPodAutoscalerCpuUtilization(t *testing.T) {
|
||||
tests := []struct {
|
||||
hpa HorizontalPodAutoscaler
|
||||
expectUtilization int32
|
||||
}{
|
||||
{
|
||||
hpa: HorizontalPodAutoscaler{},
|
||||
expectUtilization: 80,
|
||||
},
|
||||
{
|
||||
hpa: HorizontalPodAutoscaler{
|
||||
Spec: HorizontalPodAutoscalerSpec{
|
||||
CPUUtilization: &CPUTargetUtilization{
|
||||
TargetPercentage: int32(50),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectUtilization: 50,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
hpa := &test.hpa
|
||||
obj2 := roundTrip(t, runtime.Object(hpa))
|
||||
hpa2, ok := obj2.(*HorizontalPodAutoscaler)
|
||||
if !ok {
|
||||
t.Errorf("unexpected object: %v", hpa2)
|
||||
t.FailNow()
|
||||
}
|
||||
if hpa2.Spec.CPUUtilization == nil {
|
||||
t.Errorf("unexpected nil CPUUtilization")
|
||||
} else if test.expectUtilization != hpa2.Spec.CPUUtilization.TargetPercentage {
|
||||
t.Errorf("expected: %d CPUUtilization, got: %d", test.expectUtilization, hpa2.Spec.CPUUtilization.TargetPercentage)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func roundTrip(t *testing.T, obj runtime.Object) runtime.Object {
|
||||
data, err := runtime.Encode(api.Codecs.LegacyCodec(SchemeGroupVersion), obj)
|
||||
if err != nil {
|
||||
t.Errorf("%v\n %#v", err, obj)
|
||||
return nil
|
||||
}
|
||||
obj2, err := runtime.Decode(api.Codecs.UniversalDecoder(), data)
|
||||
if err != nil {
|
||||
t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj)
|
||||
return nil
|
||||
}
|
||||
obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object)
|
||||
err = api.Scheme.Convert(obj2, obj3, nil)
|
||||
if err != nil {
|
||||
t.Errorf("%v\nSource: %#v", err, obj2)
|
||||
return nil
|
||||
}
|
||||
return obj3
|
||||
}
|
||||
|
||||
func newInt32(val int32) *int32 {
|
||||
p := new(int32)
|
||||
*p = val
|
||||
return p
|
||||
}
|
||||
|
||||
func newString(val string) *string {
|
||||
p := new(string)
|
||||
*p = val
|
||||
return p
|
||||
}
|
||||
|
||||
func newBool(val bool) *bool {
|
||||
b := new(bool)
|
||||
*b = val
|
||||
return b
|
||||
}
|
2473
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go
generated
vendored
2473
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
196
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto
generated
vendored
196
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto
generated
vendored
|
@ -21,12 +21,13 @@ syntax = 'proto2';
|
|||
|
||||
package k8s.io.kubernetes.pkg.apis.extensions.v1beta1;
|
||||
|
||||
import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
import "k8s.io/kubernetes/pkg/api/resource/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
|
||||
import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto";
|
||||
import "k8s.io/kubernetes/pkg/api/v1/generated.proto";
|
||||
import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "v1beta1";
|
||||
|
@ -38,18 +39,12 @@ message APIVersion {
|
|||
optional string name = 1;
|
||||
}
|
||||
|
||||
message CPUTargetUtilization {
|
||||
// fraction of the requested CPU that should be utilized/used,
|
||||
// e.g. 70 means that 70% of the requested CPU should be in use.
|
||||
optional int32 targetPercentage = 1;
|
||||
}
|
||||
|
||||
message CustomMetricCurrentStatus {
|
||||
// Custom Metric name.
|
||||
optional string name = 1;
|
||||
|
||||
// Custom Metric value (average).
|
||||
optional k8s.io.kubernetes.pkg.api.resource.Quantity value = 2;
|
||||
optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2;
|
||||
}
|
||||
|
||||
message CustomMetricCurrentStatusList {
|
||||
|
@ -62,7 +57,7 @@ message CustomMetricTarget {
|
|||
optional string name = 1;
|
||||
|
||||
// Custom Metric value (average).
|
||||
optional k8s.io.kubernetes.pkg.api.resource.Quantity value = 2;
|
||||
optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2;
|
||||
}
|
||||
|
||||
message CustomMetricTargetList {
|
||||
|
@ -76,12 +71,12 @@ message DaemonSet {
|
|||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Spec defines the desired behavior of this daemon set.
|
||||
// The desired behavior of this daemon set.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
optional DaemonSetSpec spec = 2;
|
||||
|
||||
// Status is the current status of this daemon set. This data may be
|
||||
// The current status of this daemon set. This data may be
|
||||
// out of date by some window of time.
|
||||
// Populated by the system.
|
||||
// Read-only.
|
||||
|
@ -97,51 +92,98 @@ message DaemonSetList {
|
|||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// Items is a list of daemon sets.
|
||||
// A list of daemon sets.
|
||||
repeated DaemonSet items = 2;
|
||||
}
|
||||
|
||||
// DaemonSetSpec is the specification of a daemon set.
|
||||
message DaemonSetSpec {
|
||||
// Selector is a label query over pods that are managed by the daemon set.
|
||||
// A label query over pods that are managed by the daemon set.
|
||||
// Must match in order to be controlled.
|
||||
// If empty, defaulted to labels on Pod template.
|
||||
// More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
|
||||
|
||||
// Template is the object that describes the pod that will be created.
|
||||
// An object that describes the pod that will be created.
|
||||
// The DaemonSet will create exactly one copy of this pod on every node
|
||||
// that matches the template's node selector (or on every node if no node
|
||||
// selector is specified).
|
||||
// More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template
|
||||
optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 2;
|
||||
|
||||
// An update strategy to replace existing DaemonSet pods with new pods.
|
||||
// +optional
|
||||
optional DaemonSetUpdateStrategy updateStrategy = 3;
|
||||
|
||||
// The minimum number of seconds for which a newly created DaemonSet pod should
|
||||
// be ready without any of its container crashing, for it to be considered
|
||||
// available. Defaults to 0 (pod will be considered available as soon as it
|
||||
// is ready).
|
||||
// +optional
|
||||
optional int32 minReadySeconds = 4;
|
||||
|
||||
// A sequence number representing a specific generation of the template.
|
||||
// Populated by the system. It can be set only during the creation.
|
||||
// +optional
|
||||
optional int64 templateGeneration = 5;
|
||||
}
|
||||
|
||||
// DaemonSetStatus represents the current status of a daemon set.
|
||||
message DaemonSetStatus {
|
||||
// CurrentNumberScheduled is the number of nodes that are running at least 1
|
||||
// The number of nodes that are running at least 1
|
||||
// daemon pod and are supposed to run the daemon pod.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
|
||||
optional int32 currentNumberScheduled = 1;
|
||||
|
||||
// NumberMisscheduled is the number of nodes that are running the daemon pod, but are
|
||||
// The number of nodes that are running the daemon pod, but are
|
||||
// not supposed to run the daemon pod.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
|
||||
optional int32 numberMisscheduled = 2;
|
||||
|
||||
// DesiredNumberScheduled is the total number of nodes that should be running the daemon
|
||||
// The total number of nodes that should be running the daemon
|
||||
// pod (including nodes correctly running the daemon pod).
|
||||
// More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
|
||||
optional int32 desiredNumberScheduled = 3;
|
||||
|
||||
// NumberReady is the number of nodes that should be running the daemon pod and have one
|
||||
// The number of nodes that should be running the daemon pod and have one
|
||||
// or more of the daemon pod running and ready.
|
||||
optional int32 numberReady = 4;
|
||||
|
||||
// ObservedGeneration is the most recent generation observed by the daemon set controller.
|
||||
// The most recent generation observed by the daemon set controller.
|
||||
// +optional
|
||||
optional int64 observedGeneration = 5;
|
||||
|
||||
// The total number of nodes that are running updated daemon pod
|
||||
// +optional
|
||||
optional int32 updatedNumberScheduled = 6;
|
||||
|
||||
// The number of nodes that should be running the
|
||||
// daemon pod and have one or more of the daemon pod running and
|
||||
// available (ready for at least spec.minReadySeconds)
|
||||
// +optional
|
||||
optional int32 numberAvailable = 7;
|
||||
|
||||
// The number of nodes that should be running the
|
||||
// daemon pod and have none of the daemon pod running and available
|
||||
// (ready for at least spec.minReadySeconds)
|
||||
// +optional
|
||||
optional int32 numberUnavailable = 8;
|
||||
}
|
||||
|
||||
message DaemonSetUpdateStrategy {
|
||||
// Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
|
||||
// Default is OnDelete.
|
||||
// +optional
|
||||
optional string type = 1;
|
||||
|
||||
// Rolling update config params. Present only if type = "RollingUpdate".
|
||||
// ---
|
||||
// TODO: Update this to follow our convention for oneOf, whatever we decide it
|
||||
// to be. Same as DeploymentStrategy.RollingUpdate.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/35345
|
||||
// +optional
|
||||
optional RollingUpdateDaemonSet rollingUpdate = 2;
|
||||
}
|
||||
|
||||
// Deployment enables declarative updates for Pods and ReplicaSets.
|
||||
|
@ -337,73 +379,6 @@ message HTTPIngressRuleValue {
|
|||
repeated HTTPIngressPath paths = 1;
|
||||
}
|
||||
|
||||
// configuration of a horizontal pod autoscaler.
|
||||
message HorizontalPodAutoscaler {
|
||||
// Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
|
||||
// +optional
|
||||
optional HorizontalPodAutoscalerSpec spec = 2;
|
||||
|
||||
// current information about the autoscaler.
|
||||
// +optional
|
||||
optional HorizontalPodAutoscalerStatus status = 3;
|
||||
}
|
||||
|
||||
// list of horizontal pod autoscaler objects.
|
||||
message HorizontalPodAutoscalerList {
|
||||
// Standard list metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// list of horizontal pod autoscaler objects.
|
||||
repeated HorizontalPodAutoscaler items = 2;
|
||||
}
|
||||
|
||||
// specification of a horizontal pod autoscaler.
|
||||
message HorizontalPodAutoscalerSpec {
|
||||
// reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status,
|
||||
// and will set the desired number of pods by modifying its spec.
|
||||
optional SubresourceReference scaleRef = 1;
|
||||
|
||||
// lower limit for the number of pods that can be set by the autoscaler, default 1.
|
||||
// +optional
|
||||
optional int32 minReplicas = 2;
|
||||
|
||||
// upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
|
||||
optional int32 maxReplicas = 3;
|
||||
|
||||
// target average CPU utilization (represented as a percentage of requested CPU) over all the pods;
|
||||
// if not specified it defaults to the target CPU utilization at 80% of the requested resources.
|
||||
// +optional
|
||||
optional CPUTargetUtilization cpuUtilization = 4;
|
||||
}
|
||||
|
||||
// current status of a horizontal pod autoscaler
|
||||
message HorizontalPodAutoscalerStatus {
|
||||
// most recent generation observed by this autoscaler.
|
||||
// +optional
|
||||
optional int64 observedGeneration = 1;
|
||||
|
||||
// last time the HorizontalPodAutoscaler scaled the number of pods;
|
||||
// used by the autoscaler to control how often the number of pods is changed.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2;
|
||||
|
||||
// current number of replicas of pods managed by this autoscaler.
|
||||
optional int32 currentReplicas = 3;
|
||||
|
||||
// desired number of replicas of pods managed by this autoscaler.
|
||||
optional int32 desiredReplicas = 4;
|
||||
|
||||
// current average CPU utilization over all pods, represented as a percentage of requested CPU,
|
||||
// e.g. 70 means that an average pod is using now 70% of its requested CPU.
|
||||
// +optional
|
||||
optional int32 currentCPUUtilizationPercentage = 5;
|
||||
}
|
||||
|
||||
// Host Port Range defines a range of host ports that will be enabled by a policy
|
||||
// for pods to use. It requires both the start and end to be defined.
|
||||
message HostPortRange {
|
||||
|
@ -450,7 +425,7 @@ message IngressBackend {
|
|||
optional string serviceName = 1;
|
||||
|
||||
// Specifies the port of the referenced service.
|
||||
optional k8s.io.kubernetes.pkg.util.intstr.IntOrString servicePort = 2;
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2;
|
||||
}
|
||||
|
||||
// IngressList is a collection of Ingress.
|
||||
|
@ -623,7 +598,7 @@ message NetworkPolicyPort {
|
|||
// If present, only traffic on the specified protocol AND port
|
||||
// will be matched.
|
||||
// +optional
|
||||
optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 2;
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2;
|
||||
}
|
||||
|
||||
message NetworkPolicySpec {
|
||||
|
@ -856,6 +831,26 @@ message RollbackConfig {
|
|||
optional int64 revision = 1;
|
||||
}
|
||||
|
||||
// Spec to control the desired behavior of daemon set rolling update.
|
||||
message RollingUpdateDaemonSet {
|
||||
// The maximum number of DaemonSet pods that can be unavailable during the
|
||||
// update. Value can be an absolute number (ex: 5) or a percentage of total
|
||||
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
|
||||
// number is calculated from percentage by rounding up.
|
||||
// This cannot be 0.
|
||||
// Default value is 1.
|
||||
// Example: when this is set to 30%, at most 30% of the total number of nodes
|
||||
// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
|
||||
// can have their pods stopped for an update at any given
|
||||
// time. The update starts by stopping at most 30% of those DaemonSet pods
|
||||
// and then brings up new DaemonSet pods in their place. Once the new pods
|
||||
// are available, it then proceeds onto other DaemonSet pods, thus ensuring
|
||||
// that at least 70% of original number of DaemonSet pods are available at
|
||||
// all times during the update.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
|
||||
}
|
||||
|
||||
// Spec to control the desired behavior of rolling update.
|
||||
message RollingUpdateDeployment {
|
||||
// The maximum number of pods that can be unavailable during the update.
|
||||
|
@ -869,7 +864,7 @@ message RollingUpdateDeployment {
|
|||
// that the total number of pods available at all times during the update is at
|
||||
// least 70% of desired pods.
|
||||
// +optional
|
||||
optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxUnavailable = 1;
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
|
||||
|
||||
// The maximum number of pods that can be scheduled above the desired number of
|
||||
// pods.
|
||||
|
@ -883,7 +878,7 @@ message RollingUpdateDeployment {
|
|||
// new RC can be scaled up further, ensuring that total number of pods running
|
||||
// at any time during the update is atmost 130% of desired pods.
|
||||
// +optional
|
||||
optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxSurge = 2;
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
|
||||
}
|
||||
|
||||
// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy.
|
||||
|
@ -948,25 +943,6 @@ message ScaleStatus {
|
|||
optional string targetSelector = 3;
|
||||
}
|
||||
|
||||
// SubresourceReference contains enough information to let you inspect or modify the referred subresource.
|
||||
message SubresourceReference {
|
||||
// Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional string kind = 1;
|
||||
|
||||
// Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
|
||||
// +optional
|
||||
optional string name = 2;
|
||||
|
||||
// API version of the referent
|
||||
// +optional
|
||||
optional string apiVersion = 3;
|
||||
|
||||
// Subresource name of the referent
|
||||
// +optional
|
||||
optional string subresource = 4;
|
||||
}
|
||||
|
||||
// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
|
||||
message SupplementalGroupsStrategyOptions {
|
||||
// Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go
generated
vendored
|
@ -44,8 +44,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||
&Deployment{},
|
||||
&DeploymentList{},
|
||||
&DeploymentRollback{},
|
||||
&HorizontalPodAutoscaler{},
|
||||
&HorizontalPodAutoscalerList{},
|
||||
&ReplicationControllerDummy{},
|
||||
&Scale{},
|
||||
&ThirdPartyResource{},
|
||||
|
|
3283
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go
generated
vendored
3283
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go
generated
vendored
File diff suppressed because it is too large
Load diff
214
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go
generated
vendored
214
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go
generated
vendored
|
@ -17,10 +17,10 @@ limitations under the License.
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
// describes the attributes of a scale subresource
|
||||
|
@ -73,28 +73,6 @@ type ReplicationControllerDummy struct {
|
|||
metav1.TypeMeta `json:",inline"`
|
||||
}
|
||||
|
||||
// SubresourceReference contains enough information to let you inspect or modify the referred subresource.
|
||||
type SubresourceReference struct {
|
||||
// Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
|
||||
// Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
|
||||
// +optional
|
||||
Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"`
|
||||
// API version of the referent
|
||||
// +optional
|
||||
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"`
|
||||
// Subresource name of the referent
|
||||
// +optional
|
||||
Subresource string `json:"subresource,omitempty" protobuf:"bytes,4,opt,name=subresource"`
|
||||
}
|
||||
|
||||
type CPUTargetUtilization struct {
|
||||
// fraction of the requested CPU that should be utilized/used,
|
||||
// e.g. 70 means that 70% of the requested CPU should be in use.
|
||||
TargetPercentage int32 `json:"targetPercentage" protobuf:"varint,1,opt,name=targetPercentage"`
|
||||
}
|
||||
|
||||
// Alpha-level support for Custom Metrics in HPA (as annotations).
|
||||
type CustomMetricTarget struct {
|
||||
// Custom Metric name.
|
||||
|
@ -118,72 +96,6 @@ type CustomMetricCurrentStatusList struct {
|
|||
Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"`
|
||||
}
|
||||
|
||||
// specification of a horizontal pod autoscaler.
|
||||
type HorizontalPodAutoscalerSpec struct {
|
||||
// reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status,
|
||||
// and will set the desired number of pods by modifying its spec.
|
||||
ScaleRef SubresourceReference `json:"scaleRef" protobuf:"bytes,1,opt,name=scaleRef"`
|
||||
// lower limit for the number of pods that can be set by the autoscaler, default 1.
|
||||
// +optional
|
||||
MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"`
|
||||
// upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
|
||||
MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"`
|
||||
// target average CPU utilization (represented as a percentage of requested CPU) over all the pods;
|
||||
// if not specified it defaults to the target CPU utilization at 80% of the requested resources.
|
||||
// +optional
|
||||
CPUUtilization *CPUTargetUtilization `json:"cpuUtilization,omitempty" protobuf:"bytes,4,opt,name=cpuUtilization"`
|
||||
}
|
||||
|
||||
// current status of a horizontal pod autoscaler
|
||||
type HorizontalPodAutoscalerStatus struct {
|
||||
// most recent generation observed by this autoscaler.
|
||||
// +optional
|
||||
ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
|
||||
|
||||
// last time the HorizontalPodAutoscaler scaled the number of pods;
|
||||
// used by the autoscaler to control how often the number of pods is changed.
|
||||
// +optional
|
||||
LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"`
|
||||
|
||||
// current number of replicas of pods managed by this autoscaler.
|
||||
CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"`
|
||||
|
||||
// desired number of replicas of pods managed by this autoscaler.
|
||||
DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"`
|
||||
|
||||
// current average CPU utilization over all pods, represented as a percentage of requested CPU,
|
||||
// e.g. 70 means that an average pod is using now 70% of its requested CPU.
|
||||
// +optional
|
||||
CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"`
|
||||
}
|
||||
|
||||
// configuration of a horizontal pod autoscaler.
|
||||
type HorizontalPodAutoscaler struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
|
||||
// +optional
|
||||
Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
|
||||
// current information about the autoscaler.
|
||||
// +optional
|
||||
Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// list of horizontal pod autoscaler objects.
|
||||
type HorizontalPodAutoscalerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// list of horizontal pod autoscaler objects.
|
||||
Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
// +nonNamespaced=true
|
||||
|
||||
|
@ -457,20 +369,19 @@ type DeploymentList struct {
|
|||
Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// TODO(madhusudancs): Uncomment while implementing DaemonSet updates.
|
||||
/* Commenting out for v1.2. We are planning to bring these types back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting the types out.
|
||||
type DaemonSetUpdateStrategy struct {
|
||||
// Type of daemon set update. Only "RollingUpdate" is supported at this time. Default is RollingUpdate.
|
||||
// +optional
|
||||
Type DaemonSetUpdateStrategyType `json:"type,omitempty"`
|
||||
// Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
|
||||
// Default is OnDelete.
|
||||
// +optional
|
||||
Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
|
||||
|
||||
// Rolling update config params. Present only if DaemonSetUpdateStrategy =
|
||||
// RollingUpdate.
|
||||
// Rolling update config params. Present only if type = "RollingUpdate".
|
||||
//---
|
||||
// TODO: Update this to follow our convention for oneOf, whatever we decide it
|
||||
// to be. Same as DeploymentStrategy.RollingUpdate.
|
||||
// +optional
|
||||
RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty"`
|
||||
// See https://github.com/kubernetes/kubernetes/issues/35345
|
||||
// +optional
|
||||
RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
|
||||
}
|
||||
|
||||
type DaemonSetUpdateStrategyType string
|
||||
|
@ -478,6 +389,9 @@ type DaemonSetUpdateStrategyType string
|
|||
const (
|
||||
// Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
|
||||
RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
|
||||
|
||||
// Replace the old daemons only when it's killed
|
||||
OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
|
||||
)
|
||||
|
||||
// Spec to control the desired behavior of daemon set rolling update.
|
||||
|
@ -488,90 +402,91 @@ type RollingUpdateDaemonSet struct {
|
|||
// number is calculated from percentage by rounding up.
|
||||
// This cannot be 0.
|
||||
// Default value is 1.
|
||||
// Example: when this is set to 30%, 30% of the currently running DaemonSet
|
||||
// pods can be stopped for an update at any given time. The update starts
|
||||
// by stopping at most 30% of the currently running DaemonSet pods and then
|
||||
// brings up new DaemonSet pods in their place. Once the new pods are ready,
|
||||
// it then proceeds onto other DaemonSet pods, thus ensuring that at least
|
||||
// 70% of original number of DaemonSet pods are available at all times
|
||||
// during the update.
|
||||
// +optional
|
||||
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
|
||||
|
||||
// Minimum number of seconds for which a newly created DaemonSet pod should
|
||||
// be ready without any of its container crashing, for it to be considered
|
||||
// available. Defaults to 0 (pod will be considered available as soon as it
|
||||
// is ready).
|
||||
// +optional
|
||||
MinReadySeconds int32 `json:"minReadySeconds,omitempty"`
|
||||
// Example: when this is set to 30%, at most 30% of the total number of nodes
|
||||
// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
|
||||
// can have their pods stopped for an update at any given
|
||||
// time. The update starts by stopping at most 30% of those DaemonSet pods
|
||||
// and then brings up new DaemonSet pods in their place. Once the new pods
|
||||
// are available, it then proceeds onto other DaemonSet pods, thus ensuring
|
||||
// that at least 70% of original number of DaemonSet pods are available at
|
||||
// all times during the update.
|
||||
// +optional
|
||||
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
|
||||
}
|
||||
*/
|
||||
|
||||
// DaemonSetSpec is the specification of a daemon set.
|
||||
type DaemonSetSpec struct {
|
||||
// Selector is a label query over pods that are managed by the daemon set.
|
||||
// A label query over pods that are managed by the daemon set.
|
||||
// Must match in order to be controlled.
|
||||
// If empty, defaulted to labels on Pod template.
|
||||
// More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
|
||||
// +optional
|
||||
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"`
|
||||
|
||||
// Template is the object that describes the pod that will be created.
|
||||
// An object that describes the pod that will be created.
|
||||
// The DaemonSet will create exactly one copy of this pod on every node
|
||||
// that matches the template's node selector (or on every node if no node
|
||||
// selector is specified).
|
||||
// More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template
|
||||
Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"`
|
||||
|
||||
// TODO(madhusudancs): Uncomment while implementing DaemonSet updates.
|
||||
/* Commenting out for v1.2. We are planning to bring these fields back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting these fields out.
|
||||
// Update strategy to replace existing DaemonSet pods with new pods.
|
||||
// An update strategy to replace existing DaemonSet pods with new pods.
|
||||
// +optional
|
||||
UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty"`
|
||||
UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"`
|
||||
|
||||
// Label key that is added to DaemonSet pods to distinguish between old and
|
||||
// new pod templates during DaemonSet update.
|
||||
// Users can set this to an empty string to indicate that the system should
|
||||
// not add any label. If unspecified, system uses
|
||||
// DefaultDaemonSetUniqueLabelKey("daemonset.kubernetes.io/podTemplateHash").
|
||||
// Value of this key is hash of DaemonSetSpec.PodTemplateSpec.
|
||||
// No label is added if this is set to empty string.
|
||||
// The minimum number of seconds for which a newly created DaemonSet pod should
|
||||
// be ready without any of its container crashing, for it to be considered
|
||||
// available. Defaults to 0 (pod will be considered available as soon as it
|
||||
// is ready).
|
||||
// +optional
|
||||
UniqueLabelKey *string `json:"uniqueLabelKey,omitempty"`
|
||||
*/
|
||||
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
|
||||
|
||||
// A sequence number representing a specific generation of the template.
|
||||
// Populated by the system. It can be set only during the creation.
|
||||
// +optional
|
||||
TemplateGeneration int64 `json:"templateGeneration,omitempty" protobuf:"varint,5,opt,name=templateGeneration"`
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultDaemonSetUniqueLabelKey is the default key of the labels that is added
|
||||
// to daemon set pods to distinguish between old and new pod templates during
|
||||
// DaemonSet update. See DaemonSetSpec's UniqueLabelKey field for more information.
|
||||
DefaultDaemonSetUniqueLabelKey string = "daemonset.kubernetes.io/podTemplateHash"
|
||||
)
|
||||
|
||||
// DaemonSetStatus represents the current status of a daemon set.
|
||||
type DaemonSetStatus struct {
|
||||
// CurrentNumberScheduled is the number of nodes that are running at least 1
|
||||
// The number of nodes that are running at least 1
|
||||
// daemon pod and are supposed to run the daemon pod.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
|
||||
CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"`
|
||||
|
||||
// NumberMisscheduled is the number of nodes that are running the daemon pod, but are
|
||||
// The number of nodes that are running the daemon pod, but are
|
||||
// not supposed to run the daemon pod.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
|
||||
NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"`
|
||||
|
||||
// DesiredNumberScheduled is the total number of nodes that should be running the daemon
|
||||
// The total number of nodes that should be running the daemon
|
||||
// pod (including nodes correctly running the daemon pod).
|
||||
// More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
|
||||
DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"`
|
||||
|
||||
// NumberReady is the number of nodes that should be running the daemon pod and have one
|
||||
// The number of nodes that should be running the daemon pod and have one
|
||||
// or more of the daemon pod running and ready.
|
||||
NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"`
|
||||
|
||||
// ObservedGeneration is the most recent generation observed by the daemon set controller.
|
||||
// The most recent generation observed by the daemon set controller.
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"`
|
||||
|
||||
// The total number of nodes that are running updated daemon pod
|
||||
// +optional
|
||||
UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"`
|
||||
|
||||
// The number of nodes that should be running the
|
||||
// daemon pod and have one or more of the daemon pod running and
|
||||
// available (ready for at least spec.minReadySeconds)
|
||||
// +optional
|
||||
NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"`
|
||||
|
||||
// The number of nodes that should be running the
|
||||
// daemon pod and have none of the daemon pod running and available
|
||||
// (ready for at least spec.minReadySeconds)
|
||||
// +optional
|
||||
NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
|
@ -584,12 +499,12 @@ type DaemonSet struct {
|
|||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Spec defines the desired behavior of this daemon set.
|
||||
// The desired behavior of this daemon set.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
|
||||
// Status is the current status of this daemon set. This data may be
|
||||
// The current status of this daemon set. This data may be
|
||||
// out of date by some window of time.
|
||||
// Populated by the system.
|
||||
// Read-only.
|
||||
|
@ -598,6 +513,13 @@ type DaemonSet struct {
|
|||
Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
const (
|
||||
// DaemonSetTemplateGenerationKey is the key of the labels that is added
|
||||
// to daemon set pods to distinguish between old and new pod templates
|
||||
// during DaemonSet template update.
|
||||
DaemonSetTemplateGenerationKey string = "pod-template-generation"
|
||||
)
|
||||
|
||||
// DaemonSetList is a collection of daemon sets.
|
||||
type DaemonSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
@ -606,7 +528,7 @@ type DaemonSetList struct {
|
|||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Items is a list of daemon sets.
|
||||
// A list of daemon sets.
|
||||
Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
|
|
112
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go
generated
vendored
112
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go
generated
vendored
|
@ -36,14 +36,6 @@ func (APIVersion) SwaggerDoc() map[string]string {
|
|||
return map_APIVersion
|
||||
}
|
||||
|
||||
var map_CPUTargetUtilization = map[string]string{
|
||||
"targetPercentage": "fraction of the requested CPU that should be utilized/used, e.g. 70 means that 70% of the requested CPU should be in use.",
|
||||
}
|
||||
|
||||
func (CPUTargetUtilization) SwaggerDoc() map[string]string {
|
||||
return map_CPUTargetUtilization
|
||||
}
|
||||
|
||||
var map_CustomMetricCurrentStatus = map[string]string{
|
||||
"name": "Custom Metric name.",
|
||||
"value": "Custom Metric value (average).",
|
||||
|
@ -66,8 +58,8 @@ func (CustomMetricTarget) SwaggerDoc() map[string]string {
|
|||
var map_DaemonSet = map[string]string{
|
||||
"": "DaemonSet represents the configuration of a daemon set.",
|
||||
"metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
|
||||
"spec": "Spec defines the desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
|
||||
"status": "Status is the current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
|
||||
"spec": "The desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
|
||||
"status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status",
|
||||
}
|
||||
|
||||
func (DaemonSet) SwaggerDoc() map[string]string {
|
||||
|
@ -77,7 +69,7 @@ func (DaemonSet) SwaggerDoc() map[string]string {
|
|||
var map_DaemonSetList = map[string]string{
|
||||
"": "DaemonSetList is a collection of daemon sets.",
|
||||
"metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
|
||||
"items": "Items is a list of daemon sets.",
|
||||
"items": "A list of daemon sets.",
|
||||
}
|
||||
|
||||
func (DaemonSetList) SwaggerDoc() map[string]string {
|
||||
|
@ -85,9 +77,12 @@ func (DaemonSetList) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_DaemonSetSpec = map[string]string{
|
||||
"": "DaemonSetSpec is the specification of a daemon set.",
|
||||
"selector": "Selector is a label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors",
|
||||
"template": "Template is the object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template",
|
||||
"": "DaemonSetSpec is the specification of a daemon set.",
|
||||
"selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors",
|
||||
"template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template",
|
||||
"updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.",
|
||||
"minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).",
|
||||
"templateGeneration": "A sequence number representing a specific generation of the template. Populated by the system. It can be set only during the creation.",
|
||||
}
|
||||
|
||||
func (DaemonSetSpec) SwaggerDoc() map[string]string {
|
||||
|
@ -96,17 +91,29 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_DaemonSetStatus = map[string]string{
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"currentNumberScheduled": "CurrentNumberScheduled is the number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md",
|
||||
"numberMisscheduled": "NumberMisscheduled is the number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md",
|
||||
"desiredNumberScheduled": "DesiredNumberScheduled is the total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md",
|
||||
"numberReady": "NumberReady is the number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.",
|
||||
"observedGeneration": "ObservedGeneration is the most recent generation observed by the daemon set controller.",
|
||||
"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md",
|
||||
"numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md",
|
||||
"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md",
|
||||
"numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.",
|
||||
"observedGeneration": "The most recent generation observed by the daemon set controller.",
|
||||
"updatedNumberScheduled": "The total number of nodes that are running updated daemon pod",
|
||||
"numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)",
|
||||
"numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)",
|
||||
}
|
||||
|
||||
func (DaemonSetStatus) SwaggerDoc() map[string]string {
|
||||
return map_DaemonSetStatus
|
||||
}
|
||||
|
||||
var map_DaemonSetUpdateStrategy = map[string]string{
|
||||
"type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is OnDelete.",
|
||||
"rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".",
|
||||
}
|
||||
|
||||
func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string {
|
||||
return map_DaemonSetUpdateStrategy
|
||||
}
|
||||
|
||||
var map_Deployment = map[string]string{
|
||||
"": "Deployment enables declarative updates for Pods and ReplicaSets.",
|
||||
"metadata": "Standard object metadata.",
|
||||
|
@ -224,52 +231,6 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string {
|
|||
return map_HTTPIngressRuleValue
|
||||
}
|
||||
|
||||
var map_HorizontalPodAutoscaler = map[string]string{
|
||||
"": "configuration of a horizontal pod autoscaler.",
|
||||
"metadata": "Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
|
||||
"spec": "behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.",
|
||||
"status": "current information about the autoscaler.",
|
||||
}
|
||||
|
||||
func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string {
|
||||
return map_HorizontalPodAutoscaler
|
||||
}
|
||||
|
||||
var map_HorizontalPodAutoscalerList = map[string]string{
|
||||
"": "list of horizontal pod autoscaler objects.",
|
||||
"metadata": "Standard list metadata.",
|
||||
"items": "list of horizontal pod autoscaler objects.",
|
||||
}
|
||||
|
||||
func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string {
|
||||
return map_HorizontalPodAutoscalerList
|
||||
}
|
||||
|
||||
var map_HorizontalPodAutoscalerSpec = map[string]string{
|
||||
"": "specification of a horizontal pod autoscaler.",
|
||||
"scaleRef": "reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, and will set the desired number of pods by modifying its spec.",
|
||||
"minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.",
|
||||
"maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.",
|
||||
"cpuUtilization": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified it defaults to the target CPU utilization at 80% of the requested resources.",
|
||||
}
|
||||
|
||||
func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string {
|
||||
return map_HorizontalPodAutoscalerSpec
|
||||
}
|
||||
|
||||
var map_HorizontalPodAutoscalerStatus = map[string]string{
|
||||
"": "current status of a horizontal pod autoscaler",
|
||||
"observedGeneration": "most recent generation observed by this autoscaler.",
|
||||
"lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.",
|
||||
"currentReplicas": "current number of replicas of pods managed by this autoscaler.",
|
||||
"desiredReplicas": "desired number of replicas of pods managed by this autoscaler.",
|
||||
"currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.",
|
||||
}
|
||||
|
||||
func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string {
|
||||
return map_HorizontalPodAutoscalerStatus
|
||||
}
|
||||
|
||||
var map_HostPortRange = map[string]string{
|
||||
"": "Host Port Range defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.",
|
||||
"min": "min is the start of the range, inclusive.",
|
||||
|
@ -542,6 +503,15 @@ func (RollbackConfig) SwaggerDoc() map[string]string {
|
|||
return map_RollbackConfig
|
||||
}
|
||||
|
||||
var map_RollingUpdateDaemonSet = map[string]string{
|
||||
"": "Spec to control the desired behavior of daemon set rolling update.",
|
||||
"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
|
||||
}
|
||||
|
||||
func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
|
||||
return map_RollingUpdateDaemonSet
|
||||
}
|
||||
|
||||
var map_RollingUpdateDeployment = map[string]string{
|
||||
"": "Spec to control the desired behavior of rolling update.",
|
||||
"maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
|
||||
|
@ -603,18 +573,6 @@ func (ScaleStatus) SwaggerDoc() map[string]string {
|
|||
return map_ScaleStatus
|
||||
}
|
||||
|
||||
var map_SubresourceReference = map[string]string{
|
||||
"": "SubresourceReference contains enough information to let you inspect or modify the referred subresource.",
|
||||
"kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
|
||||
"name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names",
|
||||
"apiVersion": "API version of the referent",
|
||||
"subresource": "Subresource name of the referent",
|
||||
}
|
||||
|
||||
func (SubresourceReference) SwaggerDoc() map[string]string {
|
||||
return map_SubresourceReference
|
||||
}
|
||||
|
||||
var map_SupplementalGroupsStrategyOptions = map[string]string{
|
||||
"": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.",
|
||||
"rule": "Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.",
|
||||
|
|
255
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go
generated
vendored
255
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go
generated
vendored
|
@ -24,11 +24,10 @@ import (
|
|||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
intstr "k8s.io/apimachinery/pkg/util/intstr"
|
||||
api "k8s.io/kubernetes/pkg/api"
|
||||
api_v1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
intstr "k8s.io/kubernetes/pkg/util/intstr"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
|
@ -58,6 +57,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec,
|
||||
Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus,
|
||||
Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus,
|
||||
Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy,
|
||||
Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy,
|
||||
Convert_v1beta1_Deployment_To_extensions_Deployment,
|
||||
Convert_extensions_Deployment_To_v1beta1_Deployment,
|
||||
Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition,
|
||||
|
@ -78,14 +79,6 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath,
|
||||
Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue,
|
||||
Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue,
|
||||
Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler,
|
||||
Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler,
|
||||
Convert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList,
|
||||
Convert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList,
|
||||
Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec,
|
||||
Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec,
|
||||
Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus,
|
||||
Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus,
|
||||
Convert_v1beta1_HostPortRange_To_extensions_HostPortRange,
|
||||
Convert_extensions_HostPortRange_To_v1beta1_HostPortRange,
|
||||
Convert_v1beta1_IDRange_To_extensions_IDRange,
|
||||
|
@ -138,6 +131,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy,
|
||||
Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig,
|
||||
Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig,
|
||||
Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet,
|
||||
Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet,
|
||||
Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment,
|
||||
Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment,
|
||||
Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions,
|
||||
|
@ -211,7 +206,11 @@ func Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCur
|
|||
}
|
||||
|
||||
func autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error {
|
||||
out.Items = *(*[]CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]CustomMetricCurrentStatus, 0)
|
||||
} else {
|
||||
out.Items = *(*[]CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -249,7 +248,11 @@ func Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList
|
|||
}
|
||||
|
||||
func autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error {
|
||||
out.Items = *(*[]CustomMetricTarget)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]CustomMetricTarget, 0)
|
||||
} else {
|
||||
out.Items = *(*[]CustomMetricTarget)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -318,7 +321,7 @@ func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extension
|
|||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
out.Items = make([]DaemonSet, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -332,6 +335,11 @@ func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSet
|
|||
if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MinReadySeconds = in.MinReadySeconds
|
||||
out.TemplateGeneration = in.TemplateGeneration
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -344,6 +352,11 @@ func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extension
|
|||
if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MinReadySeconds = in.MinReadySeconds
|
||||
out.TemplateGeneration = in.TemplateGeneration
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -357,6 +370,9 @@ func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *Daemo
|
|||
out.DesiredNumberScheduled = in.DesiredNumberScheduled
|
||||
out.NumberReady = in.NumberReady
|
||||
out.ObservedGeneration = in.ObservedGeneration
|
||||
out.UpdatedNumberScheduled = in.UpdatedNumberScheduled
|
||||
out.NumberAvailable = in.NumberAvailable
|
||||
out.NumberUnavailable = in.NumberUnavailable
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -370,6 +386,9 @@ func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *exten
|
|||
out.DesiredNumberScheduled = in.DesiredNumberScheduled
|
||||
out.NumberReady = in.NumberReady
|
||||
out.ObservedGeneration = in.ObservedGeneration
|
||||
out.UpdatedNumberScheduled = in.UpdatedNumberScheduled
|
||||
out.NumberAvailable = in.NumberAvailable
|
||||
out.NumberUnavailable = in.NumberUnavailable
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -377,6 +396,42 @@ func Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extension
|
|||
return autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error {
|
||||
out.Type = extensions.DaemonSetUpdateStrategyType(in.Type)
|
||||
if in.RollingUpdate != nil {
|
||||
in, out := &in.RollingUpdate, &out.RollingUpdate
|
||||
*out = new(extensions.RollingUpdateDaemonSet)
|
||||
if err := Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.RollingUpdate = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *DaemonSetUpdateStrategy, s conversion.Scope) error {
|
||||
out.Type = DaemonSetUpdateStrategyType(in.Type)
|
||||
if in.RollingUpdate != nil {
|
||||
in, out := &in.RollingUpdate, &out.RollingUpdate
|
||||
*out = new(RollingUpdateDaemonSet)
|
||||
if err := Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.RollingUpdate = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *DaemonSetUpdateStrategy, s conversion.Scope) error {
|
||||
return autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
|
@ -466,7 +521,7 @@ func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensi
|
|||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
out.Items = make([]Deployment, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -502,7 +557,7 @@ func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *ext
|
|||
}
|
||||
|
||||
func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error {
|
||||
if err := api.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
|
||||
|
@ -521,7 +576,7 @@ func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *Deploym
|
|||
}
|
||||
|
||||
func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error {
|
||||
if err := api.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
|
||||
|
@ -651,7 +706,11 @@ func Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in
|
|||
}
|
||||
|
||||
func autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error {
|
||||
out.Paths = *(*[]HTTPIngressPath)(unsafe.Pointer(&in.Paths))
|
||||
if in.Paths == nil {
|
||||
out.Paths = make([]HTTPIngressPath, 0)
|
||||
} else {
|
||||
out.Paths = *(*[]HTTPIngressPath)(unsafe.Pointer(&in.Paths))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -659,118 +718,6 @@ func Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in
|
|||
return autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]autoscaling.HorizontalPodAutoscaler, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]HorizontalPodAutoscaler, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
|
||||
// WARNING: in.ScaleRef requires manual conversion: does not exist in peer-type
|
||||
out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas))
|
||||
out.MaxReplicas = in.MaxReplicas
|
||||
// WARNING: in.CPUUtilization requires manual conversion: does not exist in peer-type
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error {
|
||||
// WARNING: in.ScaleTargetRef requires manual conversion: does not exist in peer-type
|
||||
out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas))
|
||||
out.MaxReplicas = in.MaxReplicas
|
||||
// WARNING: in.TargetCPUUtilizationPercentage requires manual conversion: does not exist in peer-type
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
|
||||
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
|
||||
out.LastScaleTime = (*v1.Time)(unsafe.Pointer(in.LastScaleTime))
|
||||
out.CurrentReplicas = in.CurrentReplicas
|
||||
out.DesiredReplicas = in.DesiredReplicas
|
||||
out.CurrentCPUUtilizationPercentage = (*int32)(unsafe.Pointer(in.CurrentCPUUtilizationPercentage))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error {
|
||||
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
|
||||
out.LastScaleTime = (*v1.Time)(unsafe.Pointer(in.LastScaleTime))
|
||||
out.CurrentReplicas = in.CurrentReplicas
|
||||
out.DesiredReplicas = in.DesiredReplicas
|
||||
out.CurrentCPUUtilizationPercentage = (*int32)(unsafe.Pointer(in.CurrentCPUUtilizationPercentage))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error {
|
||||
out.Min = int(in.Min)
|
||||
out.Max = int(in.Max)
|
||||
|
@ -873,7 +820,11 @@ func Convert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out
|
|||
|
||||
func autoConvert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]Ingress)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]Ingress, 0)
|
||||
} else {
|
||||
out.Items = *(*[]Ingress)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1045,7 +996,11 @@ func Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *Netwo
|
|||
|
||||
func autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]NetworkPolicy)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]NetworkPolicy, 0)
|
||||
} else {
|
||||
out.Items = *(*[]NetworkPolicy)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1168,7 +1123,7 @@ func autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyLi
|
|||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
out.Items = make([]PodSecurityPolicy, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1344,7 +1299,7 @@ func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensi
|
|||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
out.Items = make([]ReplicaSet, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1354,7 +1309,7 @@ func Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.
|
|||
}
|
||||
|
||||
func autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error {
|
||||
if err := api.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MinReadySeconds = in.MinReadySeconds
|
||||
|
@ -1366,7 +1321,7 @@ func autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *Replica
|
|||
}
|
||||
|
||||
func autoConvert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error {
|
||||
if err := api.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MinReadySeconds = in.MinReadySeconds
|
||||
|
@ -1439,15 +1394,25 @@ func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.
|
|||
return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error {
|
||||
// WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString)
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *RollingUpdateDaemonSet, s conversion.Scope) error {
|
||||
// WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString)
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error {
|
||||
// WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/kubernetes/pkg/util/intstr.IntOrString vs k8s.io/kubernetes/pkg/util/intstr.IntOrString)
|
||||
// WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/kubernetes/pkg/util/intstr.IntOrString vs k8s.io/kubernetes/pkg/util/intstr.IntOrString)
|
||||
// WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString)
|
||||
// WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString)
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error {
|
||||
// WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/kubernetes/pkg/util/intstr.IntOrString vs *k8s.io/kubernetes/pkg/util/intstr.IntOrString)
|
||||
// WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/kubernetes/pkg/util/intstr.IntOrString vs *k8s.io/kubernetes/pkg/util/intstr.IntOrString)
|
||||
// WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString)
|
||||
// WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1626,7 +1591,11 @@ func Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResource
|
|||
|
||||
func autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]ThirdPartyResourceData)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]ThirdPartyResourceData, 0)
|
||||
} else {
|
||||
out.Items = *(*[]ThirdPartyResourceData)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1646,7 +1615,11 @@ func Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList
|
|||
|
||||
func autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]ThirdPartyResource)(unsafe.Pointer(&in.Items))
|
||||
if in.Items == nil {
|
||||
out.Items = make([]ThirdPartyResource, 0)
|
||||
} else {
|
||||
out.Items = *(*[]ThirdPartyResource)(unsafe.Pointer(&in.Items))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
142
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go
generated
vendored
142
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go
generated
vendored
|
@ -24,8 +24,8 @@ import (
|
|||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
intstr "k8s.io/apimachinery/pkg/util/intstr"
|
||||
api_v1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
intstr "k8s.io/kubernetes/pkg/util/intstr"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
|
@ -38,7 +38,6 @@ func init() {
|
|||
func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedDeepCopyFuncs(
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_APIVersion, InType: reflect.TypeOf(&APIVersion{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CPUTargetUtilization, InType: reflect.TypeOf(&CPUTargetUtilization{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricCurrentStatus, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricCurrentStatusList, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricTarget, InType: reflect.TypeOf(&CustomMetricTarget{})},
|
||||
|
@ -47,6 +46,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
|||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetList, InType: reflect.TypeOf(&DaemonSetList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetSpec, InType: reflect.TypeOf(&DaemonSetSpec{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetStatus, InType: reflect.TypeOf(&DaemonSetStatus{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetUpdateStrategy, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Deployment, InType: reflect.TypeOf(&Deployment{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})},
|
||||
|
@ -57,10 +57,6 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
|||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_FSGroupStrategyOptions, InType: reflect.TypeOf(&FSGroupStrategyOptions{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HTTPIngressPath, InType: reflect.TypeOf(&HTTPIngressPath{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HTTPIngressRuleValue, InType: reflect.TypeOf(&HTTPIngressRuleValue{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HorizontalPodAutoscaler, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HostPortRange, InType: reflect.TypeOf(&HostPortRange{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IDRange, InType: reflect.TypeOf(&IDRange{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Ingress, InType: reflect.TypeOf(&Ingress{})},
|
||||
|
@ -87,13 +83,13 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
|||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetStatus, InType: reflect.TypeOf(&ReplicaSetStatus{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicationControllerDummy, InType: reflect.TypeOf(&ReplicationControllerDummy{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDaemonSet, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RunAsUserStrategyOptions, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SELinuxStrategyOptions, InType: reflect.TypeOf(&SELinuxStrategyOptions{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Scale, InType: reflect.TypeOf(&Scale{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SubresourceReference, InType: reflect.TypeOf(&SubresourceReference{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SupplementalGroupsStrategyOptions, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResource, InType: reflect.TypeOf(&ThirdPartyResource{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResourceData, InType: reflect.TypeOf(&ThirdPartyResourceData{})},
|
||||
|
@ -111,15 +107,6 @@ func DeepCopy_v1beta1_APIVersion(in interface{}, out interface{}, c *conversion.
|
|||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1beta1_CPUTargetUtilization(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*CPUTargetUtilization)
|
||||
out := out.(*CPUTargetUtilization)
|
||||
*out = *in
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1beta1_CustomMetricCurrentStatus(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*CustomMetricCurrentStatus)
|
||||
|
@ -227,6 +214,9 @@ func DeepCopy_v1beta1_DaemonSetSpec(in interface{}, out interface{}, c *conversi
|
|||
if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := DeepCopy_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, c); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -240,6 +230,22 @@ func DeepCopy_v1beta1_DaemonSetStatus(in interface{}, out interface{}, c *conver
|
|||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1beta1_DaemonSetUpdateStrategy(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*DaemonSetUpdateStrategy)
|
||||
out := out.(*DaemonSetUpdateStrategy)
|
||||
*out = *in
|
||||
if in.RollingUpdate != nil {
|
||||
in, out := &in.RollingUpdate, &out.RollingUpdate
|
||||
*out = new(RollingUpdateDaemonSet)
|
||||
if err := DeepCopy_v1beta1_RollingUpdateDaemonSet(*in, *out, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1beta1_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*Deployment)
|
||||
|
@ -419,87 +425,6 @@ func DeepCopy_v1beta1_HTTPIngressRuleValue(in interface{}, out interface{}, c *c
|
|||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1beta1_HorizontalPodAutoscaler(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*HorizontalPodAutoscaler)
|
||||
out := out.(*HorizontalPodAutoscaler)
|
||||
*out = *in
|
||||
if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil {
|
||||
return err
|
||||
} else {
|
||||
out.ObjectMeta = *newVal.(*v1.ObjectMeta)
|
||||
}
|
||||
if err := DeepCopy_v1beta1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := DeepCopy_v1beta1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, c); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1beta1_HorizontalPodAutoscalerList(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*HorizontalPodAutoscalerList)
|
||||
out := out.(*HorizontalPodAutoscalerList)
|
||||
*out = *in
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]HorizontalPodAutoscaler, len(*in))
|
||||
for i := range *in {
|
||||
if err := DeepCopy_v1beta1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1beta1_HorizontalPodAutoscalerSpec(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*HorizontalPodAutoscalerSpec)
|
||||
out := out.(*HorizontalPodAutoscalerSpec)
|
||||
*out = *in
|
||||
if in.MinReplicas != nil {
|
||||
in, out := &in.MinReplicas, &out.MinReplicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.CPUUtilization != nil {
|
||||
in, out := &in.CPUUtilization, &out.CPUUtilization
|
||||
*out = new(CPUTargetUtilization)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1beta1_HorizontalPodAutoscalerStatus(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*HorizontalPodAutoscalerStatus)
|
||||
out := out.(*HorizontalPodAutoscalerStatus)
|
||||
*out = *in
|
||||
if in.ObservedGeneration != nil {
|
||||
in, out := &in.ObservedGeneration, &out.ObservedGeneration
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.LastScaleTime != nil {
|
||||
in, out := &in.LastScaleTime, &out.LastScaleTime
|
||||
*out = new(v1.Time)
|
||||
**out = (*in).DeepCopy()
|
||||
}
|
||||
if in.CurrentCPUUtilizationPercentage != nil {
|
||||
in, out := &in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1beta1_HostPortRange(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*HostPortRange)
|
||||
|
@ -970,6 +895,20 @@ func DeepCopy_v1beta1_RollbackConfig(in interface{}, out interface{}, c *convers
|
|||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1beta1_RollingUpdateDaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*RollingUpdateDaemonSet)
|
||||
out := out.(*RollingUpdateDaemonSet)
|
||||
*out = *in
|
||||
if in.MaxUnavailable != nil {
|
||||
in, out := &in.MaxUnavailable, &out.MaxUnavailable
|
||||
*out = new(intstr.IntOrString)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1beta1_RollingUpdateDeployment(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*RollingUpdateDeployment)
|
||||
|
@ -1059,15 +998,6 @@ func DeepCopy_v1beta1_ScaleStatus(in interface{}, out interface{}, c *conversion
|
|||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1beta1_SubresourceReference(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*SubresourceReference)
|
||||
out := out.(*SubresourceReference)
|
||||
*out = *in
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*SupplementalGroupsStrategyOptions)
|
||||
|
|
66
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.defaults.go
generated
vendored
66
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.defaults.go
generated
vendored
|
@ -33,10 +33,6 @@ func RegisterDefaults(scheme *runtime.Scheme) error {
|
|||
scheme.AddTypeDefaultingFunc(&DaemonSetList{}, func(obj interface{}) { SetObjectDefaults_DaemonSetList(obj.(*DaemonSetList)) })
|
||||
scheme.AddTypeDefaultingFunc(&Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*Deployment)) })
|
||||
scheme.AddTypeDefaultingFunc(&DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*DeploymentList)) })
|
||||
scheme.AddTypeDefaultingFunc(&HorizontalPodAutoscaler{}, func(obj interface{}) { SetObjectDefaults_HorizontalPodAutoscaler(obj.(*HorizontalPodAutoscaler)) })
|
||||
scheme.AddTypeDefaultingFunc(&HorizontalPodAutoscalerList{}, func(obj interface{}) {
|
||||
SetObjectDefaults_HorizontalPodAutoscalerList(obj.(*HorizontalPodAutoscalerList))
|
||||
})
|
||||
scheme.AddTypeDefaultingFunc(&NetworkPolicy{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicy(obj.(*NetworkPolicy)) })
|
||||
scheme.AddTypeDefaultingFunc(&NetworkPolicyList{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicyList(obj.(*NetworkPolicyList)) })
|
||||
scheme.AddTypeDefaultingFunc(&ReplicaSet{}, func(obj interface{}) { SetObjectDefaults_ReplicaSet(obj.(*ReplicaSet)) })
|
||||
|
@ -74,6 +70,23 @@ func SetObjectDefaults_DaemonSet(in *DaemonSet) {
|
|||
if a.VolumeSource.AzureDisk != nil {
|
||||
v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
|
||||
}
|
||||
if a.VolumeSource.Projected != nil {
|
||||
v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
|
||||
for j := range a.VolumeSource.Projected.Sources {
|
||||
b := &a.VolumeSource.Projected.Sources[j]
|
||||
if b.DownwardAPI != nil {
|
||||
for k := range b.DownwardAPI.Items {
|
||||
c := &b.DownwardAPI.Items[k]
|
||||
if c.FieldRef != nil {
|
||||
v1.SetDefaults_ObjectFieldSelector(c.FieldRef)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
|
@ -198,6 +211,23 @@ func SetObjectDefaults_Deployment(in *Deployment) {
|
|||
if a.VolumeSource.AzureDisk != nil {
|
||||
v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
|
||||
}
|
||||
if a.VolumeSource.Projected != nil {
|
||||
v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
|
||||
for j := range a.VolumeSource.Projected.Sources {
|
||||
b := &a.VolumeSource.Projected.Sources[j]
|
||||
if b.DownwardAPI != nil {
|
||||
for k := range b.DownwardAPI.Items {
|
||||
c := &b.DownwardAPI.Items[k]
|
||||
if c.FieldRef != nil {
|
||||
v1.SetDefaults_ObjectFieldSelector(c.FieldRef)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
|
@ -292,17 +322,6 @@ func SetObjectDefaults_DeploymentList(in *DeploymentList) {
|
|||
}
|
||||
}
|
||||
|
||||
func SetObjectDefaults_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler) {
|
||||
SetDefaults_HorizontalPodAutoscaler(in)
|
||||
}
|
||||
|
||||
func SetObjectDefaults_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList) {
|
||||
for i := range in.Items {
|
||||
a := &in.Items[i]
|
||||
SetObjectDefaults_HorizontalPodAutoscaler(a)
|
||||
}
|
||||
}
|
||||
|
||||
func SetObjectDefaults_NetworkPolicy(in *NetworkPolicy) {
|
||||
SetDefaults_NetworkPolicy(in)
|
||||
}
|
||||
|
@ -344,6 +363,23 @@ func SetObjectDefaults_ReplicaSet(in *ReplicaSet) {
|
|||
if a.VolumeSource.AzureDisk != nil {
|
||||
v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
|
||||
}
|
||||
if a.VolumeSource.Projected != nil {
|
||||
v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
|
||||
for j := range a.VolumeSource.Projected.Sources {
|
||||
b := &a.VolumeSource.Projected.Sources[j]
|
||||
if b.DownwardAPI != nil {
|
||||
for k := range b.DownwardAPI.Items {
|
||||
c := &b.DownwardAPI.Items[k]
|
||||
if c.FieldRef != nil {
|
||||
v1.SetDefaults_ObjectFieldSelector(c.FieldRef)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
|
|
60
vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/BUILD
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/BUILD
generated
vendored
|
@ -1,60 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["validation.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/validation:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/util:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1/validation",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/validation",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/validation/field",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["validation_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/util:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/validation/field",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
872
vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go
generated
vendored
872
vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go
generated
vendored
|
@ -1,872 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apivalidation "k8s.io/kubernetes/pkg/api/validation"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
|
||||
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func ValidateThirdPartyResourceUpdate(update, old *extensions.ThirdPartyResource) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, ValidateThirdPartyResource(update)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateThirdPartyResourceName(name string, prefix bool) []string {
|
||||
// Make sure it's a valid DNS subdomain
|
||||
if msgs := apivalidation.NameIsDNSSubdomain(name, prefix); len(msgs) != 0 {
|
||||
return msgs
|
||||
}
|
||||
|
||||
// Make sure it's at least three segments (kind + two-segment group name)
|
||||
if !prefix {
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) < 3 {
|
||||
return []string{"must be at least three segments long: <kind>.<domain>.<tld>"}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateThirdPartyResource(obj *extensions.ThirdPartyResource) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&obj.ObjectMeta, false, ValidateThirdPartyResourceName, field.NewPath("metadata"))...)
|
||||
|
||||
versions := sets.String{}
|
||||
if len(obj.Versions) == 0 {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("versions"), "must specify at least one version"))
|
||||
}
|
||||
for ix := range obj.Versions {
|
||||
version := &obj.Versions[ix]
|
||||
if len(version.Name) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("versions").Index(ix).Child("name"), version, "must not be empty"))
|
||||
} else {
|
||||
for _, msg := range validation.IsDNS1123Label(version.Name) {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("versions").Index(ix).Child("name"), version, msg))
|
||||
}
|
||||
}
|
||||
if versions.Has(version.Name) {
|
||||
allErrs = append(allErrs, field.Duplicate(field.NewPath("versions").Index(ix).Child("name"), version))
|
||||
}
|
||||
versions.Insert(version.Name)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDaemonSet tests if required fields in the DaemonSet are set.
|
||||
func ValidateDaemonSet(ds *extensions.DaemonSet) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&ds.ObjectMeta, true, ValidateDaemonSetName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDaemonSetUpdate tests if required fields in the DaemonSet are set.
|
||||
func ValidateDaemonSetUpdate(ds, oldDS *extensions.DaemonSet) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateDaemonSetStatus validates a DaemonSetStatus
|
||||
func validateDaemonSetStatus(status *extensions.DaemonSetStatus, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentNumberScheduled), fldPath.Child("currentNumberScheduled"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberMisscheduled), fldPath.Child("numberMisscheduled"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredNumberScheduled), fldPath.Child("desiredNumberScheduled"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberReady), fldPath.Child("numberReady"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDaemonSetStatus validates tests if required fields in the DaemonSet Status section
|
||||
func ValidateDaemonSetStatusUpdate(ds, oldDS *extensions.DaemonSet) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validateDaemonSetStatus(&ds.Status, field.NewPath("status"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set.
|
||||
func ValidateDaemonSetSpec(spec *extensions.DaemonSetSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
|
||||
if err == nil && !selector.Matches(labels.Set(spec.Template.Labels)) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "metadata", "labels"), spec.Template.Labels, "`selector` does not match template `labels`"))
|
||||
}
|
||||
if spec.Selector != nil && len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for daemonset."))
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"))...)
|
||||
// Daemons typically run on more than one node, so mark Read-Write persistent disks as invalid.
|
||||
allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(spec.Template.Spec.Volumes, fldPath.Child("template", "spec", "volumes"))...)
|
||||
// RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
|
||||
if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateDaemonSetName can be used to check whether the given daemon set name is valid.
|
||||
// Prefix indicates this name will be used as part of generation, in which case
|
||||
// trailing dashes are allowed.
|
||||
var ValidateDaemonSetName = apivalidation.NameIsDNSSubdomain
|
||||
|
||||
// Validates that the given name can be used as a deployment name.
|
||||
var ValidateDeploymentName = apivalidation.NameIsDNSSubdomain
|
||||
|
||||
func ValidatePositiveIntOrPercent(intOrPercent intstr.IntOrString, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
switch intOrPercent.Type {
|
||||
case intstr.String:
|
||||
for _, msg := range validation.IsValidPercent(intOrPercent.StrVal) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, msg))
|
||||
}
|
||||
case intstr.Int:
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(intOrPercent.IntValue()), fldPath)...)
|
||||
default:
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, "must be an integer or percentage (e.g '5%%')"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func getPercentValue(intOrStringValue intstr.IntOrString) (int, bool) {
|
||||
if intOrStringValue.Type != intstr.String {
|
||||
return 0, false
|
||||
}
|
||||
if len(validation.IsValidPercent(intOrStringValue.StrVal)) != 0 {
|
||||
return 0, false
|
||||
}
|
||||
value, _ := strconv.Atoi(intOrStringValue.StrVal[:len(intOrStringValue.StrVal)-1])
|
||||
return value, true
|
||||
}
|
||||
|
||||
func getIntOrPercentValue(intOrStringValue intstr.IntOrString) int {
|
||||
value, isPercent := getPercentValue(intOrStringValue)
|
||||
if isPercent {
|
||||
return value
|
||||
}
|
||||
return intOrStringValue.IntValue()
|
||||
}
|
||||
|
||||
func IsNotMoreThan100Percent(intOrStringValue intstr.IntOrString, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
value, isPercent := getPercentValue(intOrStringValue)
|
||||
if !isPercent || value <= 100 {
|
||||
return nil
|
||||
}
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, intOrStringValue, "must not be greater than 100%"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateRollingUpdateDeployment(rollingUpdate *extensions.RollingUpdateDeployment, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
|
||||
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...)
|
||||
if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 && getIntOrPercentValue(rollingUpdate.MaxSurge) == 0 {
|
||||
// Both MaxSurge and MaxUnavailable cannot be zero.
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "may not be 0 when `maxSurge` is 0"))
|
||||
}
|
||||
// Validate that MaxUnavailable is not more than 100%.
|
||||
allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateDeploymentStrategy(strategy *extensions.DeploymentStrategy, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
switch strategy.Type {
|
||||
case extensions.RecreateDeploymentStrategyType:
|
||||
if strategy.RollingUpdate != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("rollingUpdate"), "may not be specified when strategy `type` is '"+string(extensions.RecreateDeploymentStrategyType+"'")))
|
||||
}
|
||||
case extensions.RollingUpdateDeploymentStrategyType:
|
||||
// This should never happen since it's set and checked in defaults.go
|
||||
if strategy.RollingUpdate == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), "this should be defaulted and never be nil"))
|
||||
} else {
|
||||
allErrs = append(allErrs, ValidateRollingUpdateDeployment(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...)
|
||||
}
|
||||
default:
|
||||
validValues := []string{string(extensions.RecreateDeploymentStrategyType), string(extensions.RollingUpdateDeploymentStrategyType)}
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateRollback(rollback *extensions.RollbackConfig, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
v := rollback.Revision
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(v), fldPath.Child("version"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// Validates given deployment spec.
|
||||
func ValidateDeploymentSpec(spec *extensions.DeploymentSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
|
||||
|
||||
if spec.Selector == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("selector"), ""))
|
||||
} else {
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
|
||||
if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for deployment."))
|
||||
}
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector."))
|
||||
} else {
|
||||
allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...)
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, ValidateDeploymentStrategy(&spec.Strategy, fldPath.Child("strategy"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
|
||||
if spec.RevisionHistoryLimit != nil {
|
||||
// zero is a valid RevisionHistoryLimit
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...)
|
||||
}
|
||||
if spec.RollbackTo != nil {
|
||||
allErrs = append(allErrs, ValidateRollback(spec.RollbackTo, fldPath.Child("rollback"))...)
|
||||
}
|
||||
if spec.ProgressDeadlineSeconds != nil {
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.ProgressDeadlineSeconds), fldPath.Child("progressDeadlineSeconds"))...)
|
||||
if *spec.ProgressDeadlineSeconds <= spec.MinReadySeconds {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("progressDeadlineSeconds"), spec.ProgressDeadlineSeconds, "must be greater than minReadySeconds."))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// Validates given deployment status.
|
||||
func ValidateDeploymentStatus(status *extensions.DeploymentStatus, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedReplicas), fldPath.Child("updatedReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fldPath.Child("readyReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UnavailableReplicas), fldPath.Child("unavailableReplicas"))...)
|
||||
msg := "cannot be greater than status.replicas"
|
||||
if status.ReadyReplicas > status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), status.ReadyReplicas, msg))
|
||||
}
|
||||
if status.AvailableReplicas > status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, msg))
|
||||
}
|
||||
if status.ReadyReplicas > status.AvailableReplicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), status.ReadyReplicas, "cannot be greater than availableReplicas"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateDeploymentUpdate(update, old *extensions.Deployment) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateDeploymentSpec(&update.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateDeploymentStatusUpdate(update, old *extensions.Deployment) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateDeploymentStatus(&update.Status, field.NewPath("status"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateDeployment(obj *extensions.Deployment) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, ValidateDeploymentName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateDeploymentSpec(&obj.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateDeploymentRollback(obj *extensions.DeploymentRollback) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateAnnotations(obj.UpdatedAnnotations, field.NewPath("updatedAnnotations"))
|
||||
if len(obj.Name) == 0 {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("name"), "name is required"))
|
||||
}
|
||||
allErrs = append(allErrs, ValidateRollback(&obj.RollbackTo, field.NewPath("rollback"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateThirdPartyResourceDataUpdate(update, old *extensions.ThirdPartyResourceData) field.ErrorList {
|
||||
return apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))
|
||||
}
|
||||
|
||||
func ValidateThirdPartyResourceData(obj *extensions.ThirdPartyResourceData) field.ErrorList {
|
||||
return apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, apivalidation.NameIsDNSLabel, field.NewPath("metadata"))
|
||||
}
|
||||
|
||||
// ValidateIngress tests if required fields in the Ingress are set.
|
||||
func ValidateIngress(ingress *extensions.Ingress) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&ingress.ObjectMeta, true, ValidateIngressName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateIngressName validates that the given name can be used as an Ingress name.
|
||||
var ValidateIngressName = apivalidation.NameIsDNSSubdomain
|
||||
|
||||
func validateIngressTLS(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
// TODO: Perform a more thorough validation of spec.TLS.Hosts that takes
|
||||
// the wildcard spec from RFC 6125 into account.
|
||||
for _, itls := range spec.TLS {
|
||||
for i, host := range itls.Hosts {
|
||||
if strings.Contains(host, "*") {
|
||||
for _, msg := range validation.IsWildcardDNS1123Subdomain(host) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("hosts"), host, msg))
|
||||
}
|
||||
continue
|
||||
}
|
||||
for _, msg := range validation.IsDNS1123Subdomain(host) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("hosts"), host, msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateIngressSpec tests if required fields in the IngressSpec are set.
|
||||
func ValidateIngressSpec(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
// TODO: Is a default backend mandatory?
|
||||
if spec.Backend != nil {
|
||||
allErrs = append(allErrs, validateIngressBackend(spec.Backend, fldPath.Child("backend"))...)
|
||||
} else if len(spec.Rules) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, spec.Rules, "either `backend` or `rules` must be specified"))
|
||||
}
|
||||
if len(spec.Rules) > 0 {
|
||||
allErrs = append(allErrs, validateIngressRules(spec.Rules, fldPath.Child("rules"))...)
|
||||
}
|
||||
if len(spec.TLS) > 0 {
|
||||
allErrs = append(allErrs, validateIngressTLS(spec, fldPath.Child("tls"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateIngressUpdate tests if required fields in the Ingress are set.
|
||||
func ValidateIngressUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateIngressStatusUpdate tests if required fields in the Ingress are set when updating status.
|
||||
func ValidateIngressStatusUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, apivalidation.ValidateLoadBalancerStatus(&ingress.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateIngressRules(ingressRules []extensions.IngressRule, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if len(ingressRules) == 0 {
|
||||
return append(allErrs, field.Required(fldPath, ""))
|
||||
}
|
||||
for i, ih := range ingressRules {
|
||||
if len(ih.Host) > 0 {
|
||||
if isIP := (net.ParseIP(ih.Host) != nil); isIP {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, "must be a DNS name, not an IP address"))
|
||||
}
|
||||
// TODO: Ports and ips are allowed in the host part of a url
|
||||
// according to RFC 3986, consider allowing them.
|
||||
if strings.Contains(ih.Host, "*") {
|
||||
for _, msg := range validation.IsWildcardDNS1123Subdomain(ih.Host) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, msg))
|
||||
}
|
||||
continue
|
||||
}
|
||||
for _, msg := range validation.IsDNS1123Subdomain(ih.Host) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, msg))
|
||||
}
|
||||
}
|
||||
allErrs = append(allErrs, validateIngressRuleValue(&ih.IngressRuleValue, fldPath.Index(0))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateIngressRuleValue(ingressRule *extensions.IngressRuleValue, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if ingressRule.HTTP != nil {
|
||||
allErrs = append(allErrs, validateHTTPIngressRuleValue(ingressRule.HTTP, fldPath.Child("http"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateHTTPIngressRuleValue(httpIngressRuleValue *extensions.HTTPIngressRuleValue, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if len(httpIngressRuleValue.Paths) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("paths"), ""))
|
||||
}
|
||||
for i, rule := range httpIngressRuleValue.Paths {
|
||||
if len(rule.Path) > 0 {
|
||||
if !strings.HasPrefix(rule.Path, "/") {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be an absolute path"))
|
||||
}
|
||||
// TODO: More draconian path regex validation.
|
||||
// Path must be a valid regex. This is the basic requirement.
|
||||
// In addition to this any characters not allowed in a path per
|
||||
// RFC 3986 section-3.3 cannot appear as a literal in the regex.
|
||||
// Consider the example: http://host/valid?#bar, everything after
|
||||
// the last '/' is a valid regex that matches valid#bar, which
|
||||
// isn't a valid path, because the path terminates at the first ?
|
||||
// or #. A more sophisticated form of validation would detect that
|
||||
// the user is confusing url regexes with path regexes.
|
||||
_, err := regexp.CompilePOSIX(rule.Path)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be a valid regex"))
|
||||
}
|
||||
}
|
||||
allErrs = append(allErrs, validateIngressBackend(&rule.Backend, fldPath.Child("backend"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateIngressBackend tests if a given backend is valid.
|
||||
func validateIngressBackend(backend *extensions.IngressBackend, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
// All backends must reference a single local service by name, and a single service port by name or number.
|
||||
if len(backend.ServiceName) == 0 {
|
||||
return append(allErrs, field.Required(fldPath.Child("serviceName"), ""))
|
||||
} else {
|
||||
for _, msg := range apivalidation.ValidateServiceName(backend.ServiceName, false) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceName"), backend.ServiceName, msg))
|
||||
}
|
||||
}
|
||||
allErrs = append(allErrs, apivalidation.ValidatePortNumOrName(backend.ServicePort, fldPath.Child("servicePort"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateScale(scale *extensions.Scale) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&scale.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath("metadata"))...)
|
||||
|
||||
if scale.Spec.Replicas < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "replicas"), scale.Spec.Replicas, "must be greater than or equal to 0"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateReplicaSetName can be used to check whether the given ReplicaSet
|
||||
// name is valid.
|
||||
// Prefix indicates this name will be used as part of generation, in which case
|
||||
// trailing dashes are allowed.
|
||||
var ValidateReplicaSetName = apivalidation.NameIsDNSSubdomain
|
||||
|
||||
// ValidateReplicaSet tests if required fields in the ReplicaSet are set.
|
||||
func ValidateReplicaSet(rs *extensions.ReplicaSet) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&rs.ObjectMeta, true, ValidateReplicaSetName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateReplicaSetUpdate tests if required fields in the ReplicaSet are set.
|
||||
func ValidateReplicaSetUpdate(rs, oldRs *extensions.ReplicaSet) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateReplicaSetStatusUpdate tests if required fields in the ReplicaSet are set.
|
||||
func ValidateReplicaSetStatusUpdate(rs, oldRs *extensions.ReplicaSet) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
fldPath := field.NewPath("status")
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.Replicas), fldPath.Child("replicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.FullyLabeledReplicas), fldPath.Child("fullyLabeledReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.ReadyReplicas), fldPath.Child("readyReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.AvailableReplicas), fldPath.Child("availableReplicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.ObservedGeneration), fldPath.Child("observedGeneration"))...)
|
||||
msg := "cannot be greater than status.replicas"
|
||||
if rs.Status.FullyLabeledReplicas > rs.Status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("fullyLabeledReplicas"), rs.Status.FullyLabeledReplicas, msg))
|
||||
}
|
||||
if rs.Status.ReadyReplicas > rs.Status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), rs.Status.ReadyReplicas, msg))
|
||||
}
|
||||
if rs.Status.AvailableReplicas > rs.Status.Replicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), rs.Status.AvailableReplicas, msg))
|
||||
}
|
||||
if rs.Status.ReadyReplicas > rs.Status.AvailableReplicas {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), rs.Status.ReadyReplicas, "cannot be greater than availableReplicas"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateReplicaSetSpec tests if required fields in the ReplicaSet spec are set.
|
||||
func ValidateReplicaSetSpec(spec *extensions.ReplicaSetSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
|
||||
|
||||
if spec.Selector == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("selector"), ""))
|
||||
} else {
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
|
||||
if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for deployment."))
|
||||
}
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector."))
|
||||
} else {
|
||||
allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// Validates the given template and ensures that it is in accordance with the desired selector and replicas.
|
||||
func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if template == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath, ""))
|
||||
} else {
|
||||
if !selector.Empty() {
|
||||
// Verify that the ReplicaSet selector matches the labels in template.
|
||||
labels := labels.Set(template.Labels)
|
||||
if !selector.Matches(labels) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`"))
|
||||
}
|
||||
}
|
||||
allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...)
|
||||
if replicas > 1 {
|
||||
allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...)
|
||||
}
|
||||
// RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
|
||||
if template.Spec.RestartPolicy != api.RestartPolicyAlways {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSecurityPolicyName can be used to check whether the given
|
||||
// pod security policy name is valid.
|
||||
// Prefix indicates this name will be used as part of generation, in which case
|
||||
// trailing dashes are allowed.
|
||||
var ValidatePodSecurityPolicyName = apivalidation.NameIsDNSSubdomain
|
||||
|
||||
func ValidatePodSecurityPolicy(psp *extensions.PodSecurityPolicy) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&psp.ObjectMeta, false, ValidatePodSecurityPolicyName, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpecificAnnotations(psp.Annotations, field.NewPath("metadata").Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&psp.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidatePodSecurityPolicySpec(spec *extensions.PodSecurityPolicySpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
allErrs = append(allErrs, validatePSPRunAsUser(fldPath.Child("runAsUser"), &spec.RunAsUser)...)
|
||||
allErrs = append(allErrs, validatePSPSELinux(fldPath.Child("seLinux"), &spec.SELinux)...)
|
||||
allErrs = append(allErrs, validatePSPSupplementalGroup(fldPath.Child("supplementalGroups"), &spec.SupplementalGroups)...)
|
||||
allErrs = append(allErrs, validatePSPFSGroup(fldPath.Child("fsGroup"), &spec.FSGroup)...)
|
||||
allErrs = append(allErrs, validatePodSecurityPolicyVolumes(fldPath, spec.Volumes)...)
|
||||
allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.DefaultAddCapabilities, field.NewPath("defaultAddCapabilities"))...)
|
||||
allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.AllowedCapabilities, field.NewPath("allowedCapabilities"))...)
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidatePodSecurityPolicySpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if p := annotations[apparmor.DefaultProfileAnnotationKey]; p != "" {
|
||||
if err := apparmor.ValidateProfileFormat(p); err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Key(apparmor.DefaultProfileAnnotationKey), p, err.Error()))
|
||||
}
|
||||
}
|
||||
if allowed := annotations[apparmor.AllowedProfilesAnnotationKey]; allowed != "" {
|
||||
for _, p := range strings.Split(allowed, ",") {
|
||||
if err := apparmor.ValidateProfileFormat(p); err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Key(apparmor.AllowedProfilesAnnotationKey), allowed, err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sysctlAnnotation := annotations[extensions.SysctlsPodSecurityPolicyAnnotationKey]
|
||||
sysctlFldPath := fldPath.Key(extensions.SysctlsPodSecurityPolicyAnnotationKey)
|
||||
sysctls, err := extensions.SysctlsFromPodSecurityPolicyAnnotation(sysctlAnnotation)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(sysctlFldPath, sysctlAnnotation, err.Error()))
|
||||
} else {
|
||||
allErrs = append(allErrs, validatePodSecurityPolicySysctls(sysctlFldPath, sysctls)...)
|
||||
}
|
||||
|
||||
if p := annotations[seccomp.DefaultProfileAnnotationKey]; p != "" {
|
||||
allErrs = append(allErrs, apivalidation.ValidateSeccompProfile(p, fldPath.Key(seccomp.DefaultProfileAnnotationKey))...)
|
||||
}
|
||||
if allowed := annotations[seccomp.AllowedProfilesAnnotationKey]; allowed != "" {
|
||||
for _, p := range strings.Split(allowed, ",") {
|
||||
allErrs = append(allErrs, apivalidation.ValidateSeccompProfile(p, fldPath.Key(seccomp.AllowedProfilesAnnotationKey))...)
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPSELinux validates the SELinux fields of PodSecurityPolicy.
|
||||
func validatePSPSELinux(fldPath *field.Path, seLinux *extensions.SELinuxStrategyOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
// ensure the selinux strategy has a valid rule
|
||||
supportedSELinuxRules := sets.NewString(string(extensions.SELinuxStrategyMustRunAs),
|
||||
string(extensions.SELinuxStrategyRunAsAny))
|
||||
if !supportedSELinuxRules.Has(string(seLinux.Rule)) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), seLinux.Rule, supportedSELinuxRules.List()))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPRunAsUser validates the RunAsUser fields of PodSecurityPolicy.
|
||||
func validatePSPRunAsUser(fldPath *field.Path, runAsUser *extensions.RunAsUserStrategyOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
// ensure the user strategy has a valid rule
|
||||
supportedRunAsUserRules := sets.NewString(string(extensions.RunAsUserStrategyMustRunAs),
|
||||
string(extensions.RunAsUserStrategyMustRunAsNonRoot),
|
||||
string(extensions.RunAsUserStrategyRunAsAny))
|
||||
if !supportedRunAsUserRules.Has(string(runAsUser.Rule)) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), runAsUser.Rule, supportedRunAsUserRules.List()))
|
||||
}
|
||||
|
||||
// validate range settings
|
||||
for idx, rng := range runAsUser.Ranges {
|
||||
allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPFSGroup validates the FSGroupStrategyOptions fields of the PodSecurityPolicy.
|
||||
func validatePSPFSGroup(fldPath *field.Path, groupOptions *extensions.FSGroupStrategyOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
supportedRules := sets.NewString(
|
||||
string(extensions.FSGroupStrategyMustRunAs),
|
||||
string(extensions.FSGroupStrategyRunAsAny),
|
||||
)
|
||||
if !supportedRules.Has(string(groupOptions.Rule)) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), groupOptions.Rule, supportedRules.List()))
|
||||
}
|
||||
|
||||
for idx, rng := range groupOptions.Ranges {
|
||||
allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPSupplementalGroup validates the SupplementalGroupsStrategyOptions fields of the PodSecurityPolicy.
|
||||
func validatePSPSupplementalGroup(fldPath *field.Path, groupOptions *extensions.SupplementalGroupsStrategyOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
supportedRules := sets.NewString(
|
||||
string(extensions.SupplementalGroupsStrategyRunAsAny),
|
||||
string(extensions.SupplementalGroupsStrategyMustRunAs),
|
||||
)
|
||||
if !supportedRules.Has(string(groupOptions.Rule)) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), groupOptions.Rule, supportedRules.List()))
|
||||
}
|
||||
|
||||
for idx, rng := range groupOptions.Ranges {
|
||||
allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePodSecurityPolicyVolumes validates the volume fields of PodSecurityPolicy.
|
||||
func validatePodSecurityPolicyVolumes(fldPath *field.Path, volumes []extensions.FSType) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allowed := psputil.GetAllFSTypesAsSet()
|
||||
// add in the * value since that is a pseudo type that is not included by default
|
||||
allowed.Insert(string(extensions.All))
|
||||
for _, v := range volumes {
|
||||
if !allowed.Has(string(v)) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumes"), v, allowed.List()))
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
const sysctlPatternSegmentFmt string = "([a-z0-9][-_a-z0-9]*)?[a-z0-9*]"
|
||||
const SysctlPatternFmt string = "(" + apivalidation.SysctlSegmentFmt + "\\.)*" + sysctlPatternSegmentFmt
|
||||
|
||||
var sysctlPatternRegexp = regexp.MustCompile("^" + SysctlPatternFmt + "$")
|
||||
|
||||
func IsValidSysctlPattern(name string) bool {
|
||||
if len(name) > apivalidation.SysctlMaxLength {
|
||||
return false
|
||||
}
|
||||
return sysctlPatternRegexp.MatchString(name)
|
||||
}
|
||||
|
||||
// validatePodSecurityPolicySysctls validates the sysctls fields of PodSecurityPolicy.
|
||||
func validatePodSecurityPolicySysctls(fldPath *field.Path, sysctls []string) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for i, s := range sysctls {
|
||||
if !IsValidSysctlPattern(string(s)) {
|
||||
allErrs = append(
|
||||
allErrs,
|
||||
field.Invalid(fldPath.Index(i), sysctls[i], fmt.Sprintf("must have at most %d characters and match regex %s",
|
||||
apivalidation.SysctlMaxLength,
|
||||
SysctlPatternFmt,
|
||||
)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateIDRanges ensures the range is valid.
|
||||
func validateIDRanges(fldPath *field.Path, rng extensions.IDRange) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
// if 0 <= Min <= Max then we do not need to validate max. It is always greater than or
|
||||
// equal to 0 and Min.
|
||||
if rng.Min < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("min"), rng.Min, "min cannot be negative"))
|
||||
}
|
||||
if rng.Max < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("max"), rng.Max, "max cannot be negative"))
|
||||
}
|
||||
if rng.Min > rng.Max {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("min"), rng.Min, "min cannot be greater than max"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePSPCapsAgainstDrops ensures an allowed cap is not listed in the required drops.
|
||||
func validatePSPCapsAgainstDrops(requiredDrops []api.Capability, capsToCheck []api.Capability, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if requiredDrops == nil {
|
||||
return allErrs
|
||||
}
|
||||
for _, cap := range capsToCheck {
|
||||
if hasCap(cap, requiredDrops) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, cap,
|
||||
fmt.Sprintf("capability is listed in %s and requiredDropCapabilities", fldPath.String())))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// hasCap checks for needle in haystack.
|
||||
func hasCap(needle api.Capability, haystack []api.Capability) bool {
|
||||
for _, c := range haystack {
|
||||
if needle == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidatePodSecurityPolicyUpdate validates a PSP for updates.
|
||||
func ValidatePodSecurityPolicyUpdate(old *extensions.PodSecurityPolicy, new *extensions.PodSecurityPolicy) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpecificAnnotations(new.Annotations, field.NewPath("metadata").Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&new.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateNetworkPolicyName can be used to check whether the given networkpolicy
|
||||
// name is valid.
|
||||
func ValidateNetworkPolicyName(name string, prefix bool) []string {
|
||||
return apivalidation.NameIsDNSSubdomain(name, prefix)
|
||||
}
|
||||
|
||||
// ValidateNetworkPolicySpec tests if required fields in the networkpolicy spec are set.
|
||||
func ValidateNetworkPolicySpec(spec *extensions.NetworkPolicySpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(&spec.PodSelector, fldPath.Child("podSelector"))...)
|
||||
|
||||
// Validate ingress rules.
|
||||
for _, i := range spec.Ingress {
|
||||
// TODO: Update From to be a pointer to slice as soon as auto-generation supports it.
|
||||
for _, f := range i.From {
|
||||
numFroms := 0
|
||||
if f.PodSelector != nil {
|
||||
numFroms++
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(f.PodSelector, fldPath.Child("podSelector"))...)
|
||||
}
|
||||
if f.NamespaceSelector != nil {
|
||||
if numFroms > 0 {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath, "may not specify more than 1 from type"))
|
||||
} else {
|
||||
numFroms++
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(f.NamespaceSelector, fldPath.Child("namespaces"))...)
|
||||
}
|
||||
}
|
||||
|
||||
if numFroms == 0 {
|
||||
// At least one of PodSelector and NamespaceSelector must be defined.
|
||||
allErrs = append(allErrs, field.Required(fldPath, "must specify a from type"))
|
||||
}
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateNetworkPolicy validates a networkpolicy.
|
||||
func ValidateNetworkPolicy(np *extensions.NetworkPolicy) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&np.ObjectMeta, true, ValidateNetworkPolicyName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateNetworkPolicySpec(&np.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateNetworkPolicyUpdate tests if an update to a NetworkPolicy is valid.
|
||||
func ValidateNetworkPolicyUpdate(update, old *extensions.NetworkPolicy) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...)
|
||||
if !reflect.DeepEqual(update.Spec, old.Spec) {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to networkpolicy spec are forbidden."))
|
||||
}
|
||||
return allErrs
|
||||
}
|
2328
vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation_test.go
generated
vendored
2328
vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation_test.go
generated
vendored
File diff suppressed because it is too large
Load diff
30
vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go
generated
vendored
|
@ -24,8 +24,8 @@ import (
|
|||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
intstr "k8s.io/apimachinery/pkg/util/intstr"
|
||||
api "k8s.io/kubernetes/pkg/api"
|
||||
intstr "k8s.io/kubernetes/pkg/util/intstr"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
|
@ -46,6 +46,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
|||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetList, InType: reflect.TypeOf(&DaemonSetList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetSpec, InType: reflect.TypeOf(&DaemonSetSpec{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetStatus, InType: reflect.TypeOf(&DaemonSetStatus{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetUpdateStrategy, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Deployment, InType: reflect.TypeOf(&Deployment{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})},
|
||||
|
@ -82,6 +83,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
|||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetStatus, InType: reflect.TypeOf(&ReplicaSetStatus{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicationControllerDummy, InType: reflect.TypeOf(&ReplicationControllerDummy{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollingUpdateDaemonSet, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RunAsUserStrategyOptions, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_SELinuxStrategyOptions, InType: reflect.TypeOf(&SELinuxStrategyOptions{})},
|
||||
|
@ -212,6 +214,9 @@ func DeepCopy_extensions_DaemonSetSpec(in interface{}, out interface{}, c *conve
|
|||
if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := DeepCopy_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, c); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -225,6 +230,20 @@ func DeepCopy_extensions_DaemonSetStatus(in interface{}, out interface{}, c *con
|
|||
}
|
||||
}
|
||||
|
||||
func DeepCopy_extensions_DaemonSetUpdateStrategy(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*DaemonSetUpdateStrategy)
|
||||
out := out.(*DaemonSetUpdateStrategy)
|
||||
*out = *in
|
||||
if in.RollingUpdate != nil {
|
||||
in, out := &in.RollingUpdate, &out.RollingUpdate
|
||||
*out = new(RollingUpdateDaemonSet)
|
||||
**out = **in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_extensions_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*Deployment)
|
||||
|
@ -862,6 +881,15 @@ func DeepCopy_extensions_RollbackConfig(in interface{}, out interface{}, c *conv
|
|||
}
|
||||
}
|
||||
|
||||
func DeepCopy_extensions_RollingUpdateDaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*RollingUpdateDaemonSet)
|
||||
out := out.(*RollingUpdateDaemonSet)
|
||||
*out = *in
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func DeepCopy_extensions_RollingUpdateDeployment(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
{
|
||||
in := in.(*RollingUpdateDeployment)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue