Update code for latest k8s

Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
Mrunal Patel 2018-02-12 12:13:07 -08:00
parent 8f5e37a83c
commit 5f7ac28059
792 changed files with 25023 additions and 19841 deletions

View file

@ -116,7 +116,8 @@ var (
// MetricSpec specifies how to scale based on a single metric
// (only `type` and one other matching field should be set at once).
type MetricSpec struct {
// Type is the type of metric source. It should match one of the fields below.
// Type is the type of metric source. It should be one of "Object",
// "Pods" or "Resource", each mapping to a matching field in the object.
Type MetricSourceType
// Object refers to a metric describing a single kubernetes object
@ -261,7 +262,8 @@ type HorizontalPodAutoscalerCondition struct {
// MetricStatus describes the last-read state of a single metric.
type MetricStatus struct {
// Type is the type of metric source. It will match one of the fields below.
// Type is the type of metric source. It will be one of "Object",
// "Pods" or "Resource", each corresponds to a matching field in the object.
Type MetricSourceType
// Object refers to a metric describing a single kubernetes object

View file

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View file

@ -254,46 +254,18 @@ func IsIntegerResourceName(str string) bool {
return integerResources.Has(str) || IsExtendedResourceName(core.ResourceName(str))
}
// Extended and HugePages resources
func IsScalarResourceName(name core.ResourceName) bool {
return IsExtendedResourceName(name) || IsHugePageResourceName(name)
}
// this function aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here
func IsServiceIPSet(service *core.Service) bool {
return service.Spec.ClusterIP != core.ClusterIPNone && service.Spec.ClusterIP != ""
}
// this function aims to check if the service's cluster IP is requested or not
func IsServiceIPRequested(service *core.Service) bool {
// ExternalName services are CNAME aliases to external ones. Ignore the IP.
if service.Spec.Type == core.ServiceTypeExternalName {
return false
}
return service.Spec.ClusterIP == ""
}
var standardFinalizers = sets.NewString(
string(core.FinalizerKubernetes),
metav1.FinalizerOrphanDependents,
metav1.FinalizerDeleteDependents,
)
// HasAnnotation returns a bool if passed in annotation exists
func HasAnnotation(obj core.ObjectMeta, ann string) bool {
_, found := obj.Annotations[ann]
return found
}
// SetMetaDataAnnotation sets the annotation and value
func SetMetaDataAnnotation(obj *core.ObjectMeta, ann string, value string) {
if obj.Annotations == nil {
obj.Annotations = make(map[string]string)
}
obj.Annotations[ann] = value
}
func IsStandardFinalizerName(str string) bool {
return standardFinalizers.Has(str)
}
@ -482,37 +454,6 @@ func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool
return true
}
// TolerationToleratesTaint checks if the toleration tolerates the taint.
func TolerationToleratesTaint(toleration *core.Toleration, taint *core.Taint) bool {
if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect {
return false
}
if toleration.Key != taint.Key {
return false
}
// TODO: Use proper defaulting when Toleration becomes a field of PodSpec
if (len(toleration.Operator) == 0 || toleration.Operator == core.TolerationOpEqual) && toleration.Value == taint.Value {
return true
}
if toleration.Operator == core.TolerationOpExists {
return true
}
return false
}
// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations.
func TaintToleratedByTolerations(taint *core.Taint, tolerations []core.Toleration) bool {
tolerated := false
for i := range tolerations {
if TolerationToleratesTaint(&tolerations[i], taint) {
tolerated = true
break
}
}
return tolerated
}
// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations
// and converts it to the []Taint type in core.
func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) {

View file

@ -269,7 +269,7 @@ type VolumeSource struct {
Quobyte *QuobyteVolumeSource
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
// provisioned/attached using an exec based plugin.
// +optional
FlexVolume *FlexVolumeSource
@ -352,9 +352,9 @@ type PersistentVolumeSource struct {
// +optional
ISCSI *ISCSIPersistentVolumeSource
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
// provisioned/attached using an exec based plugin.
// +optional
FlexVolume *FlexVolumeSource
FlexVolume *FlexPersistentVolumeSource
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// +optional
Cinder *CinderVolumeSource
@ -475,6 +475,7 @@ type PersistentVolumeReclaimPolicy string
const (
// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
// The volume plugin must support Recycling.
// DEPRECATED: The PersistentVolumeReclaimRecycle called Recycle is being deprecated. See announcement here: https://groups.google.com/forum/#!topic/kubernetes-dev/uexugCza84I
PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
// The volume plugin must support Deletion.
@ -576,6 +577,8 @@ type PersistentVolumeClaimConditionType string
const (
// An user trigger resize of pvc has been started
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
)
type PersistentVolumeClaimCondition struct {
@ -867,8 +870,34 @@ type FCVolumeSource struct {
WWIDs []string
}
// FlexPersistentVolumeSource represents a generic persistent volume resource that is
// provisioned/attached using an exec based plugin.
type FlexPersistentVolumeSource struct {
// Driver is the name of the driver to use for this volume.
Driver string
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string
// Optional: SecretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *SecretReference
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool
// Optional: Extra driver options if any.
// +optional
Options map[string]string
}
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
// provisioned/attached using an exec based plugin.
type FlexVolumeSource struct {
// Driver is the name of the driver to use for this volume.
Driver string
@ -1589,6 +1618,12 @@ type CSIPersistentVolumeSource struct {
// Defaults to false (read/write).
// +optional
ReadOnly bool
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string
}
// ContainerPort represents a network port in a single container
@ -2338,13 +2373,13 @@ type PodAffinityTerm struct {
LabelSelector *metav1.LabelSelector
// namespaces specifies which namespaces the labelSelector applies to (matches against);
// null or empty list means "this pod's namespace"
// +optional
Namespaces []string
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
// whose value of the label with key topologyKey matches that of any node on which any of the
// selected pods is running.
// Empty topologyKey is not allowed.
// +optional
TopologyKey string
}
@ -2542,9 +2577,10 @@ type PodSpec struct {
// file if specified. This is only valid for non-hostNetwork pods.
// +optional
HostAliases []HostAlias
// If specified, indicates the pod's priority. "SYSTEM" is a special keyword
// which indicates the highest priority. Any other name must be defined by
// creating a PriorityClass object with that name.
// If specified, indicates the pod's priority. "system-node-critical" and
// "system-cluster-critical" are two special keywords which indicate the
// highest priorities with the former being the highest priority. Any other
// name must be defined by creating a PriorityClass object with that name.
// If not specified, the pod priority will be default or zero if there is no
// default.
// +optional
@ -2692,6 +2728,13 @@ type PodStatus struct {
// A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'
// +optional
Reason string
// nominatedNodeName is set when this pod preempts other pods on the node, but it cannot be
// scheduled right away as preemption victims receive their graceful termination periods.
// This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide
// to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to
// give the resources on this node to a higher priority pod that is created after preemption.
// +optional
NominatedNodeName string
// +optional
HostIP string
@ -4022,7 +4065,7 @@ type Event struct {
type EventSeries struct {
// Number of occurrences in this series up to the last heartbeat time
Count int32
// Time of the last occurence observed
// Time of the last occurrence observed
LastObservedTime metav1.MicroTime
// State of this Series: Ongoing or Finished
State EventSeriesState
@ -4357,8 +4400,21 @@ type ConfigMap struct {
// Data contains the configuration data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// Values with non-UTF-8 byte sequences must use the BinaryData field.
// The keys stored in Data must not overlap with the keys in
// the BinaryData field, this is enforced during validation process.
// +optional
Data map[string]string
// BinaryData contains the binary data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// BinaryData can contain byte sequences that are not in the UTF-8 range.
// The keys stored in BinaryData must not overlap with the ones in
// the Data field, this is enforced during validation process.
// Using this field will require 1.10+ apiserver and
// kubelet.
// +optional
BinaryData map[string][]byte
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View file

@ -163,7 +163,8 @@ func addConversionFuncs(scheme *runtime.Scheme) error {
"spec.restartPolicy",
"spec.schedulerName",
"status.phase",
"status.podIP":
"status.podIP",
"status.nominatedNodeName":
return label, value, nil
// This is for backwards compatibility with old v1 clients which send spec.host
case "spec.host":

View file

@ -89,15 +89,6 @@ func IsServiceIPSet(service *v1.Service) bool {
return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != ""
}
// this function aims to check if the service's cluster IP is requested or not
func IsServiceIPRequested(service *v1.Service) bool {
// ExternalName services are CNAME aliases to external ones. Ignore the IP.
if service.Spec.Type == v1.ServiceTypeExternalName {
return false
}
return service.Spec.ClusterIP == ""
}
// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice,
// only if they do not already exist
func AddToNodeAddresses(addresses *[]v1.NodeAddress, addAddresses ...v1.NodeAddress) {
@ -416,20 +407,6 @@ func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string {
return ""
}
// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field.
func PersistentVolumeClaimHasClass(claim *v1.PersistentVolumeClaim) bool {
// Use beta annotation first
if _, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found {
return true
}
if claim.Spec.StorageClassName != nil {
return true
}
return false
}
// GetStorageNodeAffinityFromAnnotation gets the json serialized data from PersistentVolume.Annotations
// and converts it to the NodeAffinity type in api.
// TODO: update when storage node affinity graduates to beta

View file

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -21,6 +21,8 @@ limitations under the License.
package v1
import (
unsafe "unsafe"
v1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -28,7 +30,6 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
core "k8s.io/kubernetes/pkg/apis/core"
unsafe "unsafe"
)
func init() {
@ -141,6 +142,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_core_ExecAction_To_v1_ExecAction,
Convert_v1_FCVolumeSource_To_core_FCVolumeSource,
Convert_core_FCVolumeSource_To_v1_FCVolumeSource,
Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource,
Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource,
Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource,
Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource,
Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource,
@ -616,6 +619,7 @@ func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(
out.Driver = in.Driver
out.VolumeHandle = in.VolumeHandle
out.ReadOnly = in.ReadOnly
out.FSType = in.FSType
return nil
}
@ -628,6 +632,7 @@ func autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(
out.Driver = in.Driver
out.VolumeHandle = in.VolumeHandle
out.ReadOnly = in.ReadOnly
out.FSType = in.FSType
return nil
}
@ -835,6 +840,7 @@ func Convert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.Compone
func autoConvert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data))
out.BinaryData = *(*map[string][]byte)(unsafe.Pointer(&in.BinaryData))
return nil
}
@ -846,6 +852,7 @@ func Convert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMa
func autoConvert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data))
out.BinaryData = *(*map[string][]byte)(unsafe.Pointer(&in.BinaryData))
return nil
}
@ -1758,6 +1765,34 @@ func Convert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, o
return autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in, out, s)
}
func autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *v1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.FSType = in.FSType
out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options))
return nil
}
// Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *v1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in, out, s)
}
func autoConvert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *v1.FlexPersistentVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.FSType = in.FSType
out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options))
return nil
}
// Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource is an autogenerated conversion function.
func Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *v1.FlexPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in, out, s)
}
func autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.FSType = in.FSType
@ -3250,7 +3285,7 @@ func autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1
out.CephFS = (*core.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS))
out.FC = (*core.FCVolumeSource)(unsafe.Pointer(in.FC))
out.Flocker = (*core.FlockerVolumeSource)(unsafe.Pointer(in.Flocker))
out.FlexVolume = (*core.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume))
out.FlexVolume = (*core.FlexPersistentVolumeSource)(unsafe.Pointer(in.FlexVolume))
out.AzureFile = (*core.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile))
out.VsphereVolume = (*core.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume))
out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte))
@ -3278,7 +3313,7 @@ func autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *co
out.RBD = (*v1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD))
out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte))
out.ISCSI = (*v1.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI))
out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume))
out.FlexVolume = (*v1.FlexPersistentVolumeSource)(unsafe.Pointer(in.FlexVolume))
out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder))
out.CephFS = (*v1.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS))
out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC))
@ -3907,6 +3942,7 @@ func autoConvert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodS
out.Conditions = *(*[]core.PodCondition)(unsafe.Pointer(&in.Conditions))
out.Message = in.Message
out.Reason = in.Reason
out.NominatedNodeName = in.NominatedNodeName
out.HostIP = in.HostIP
out.PodIP = in.PodIP
out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime))
@ -3926,6 +3962,7 @@ func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodS
out.Conditions = *(*[]v1.PodCondition)(unsafe.Pointer(&in.Conditions))
out.Message = in.Message
out.Reason = in.Reason
out.NominatedNodeName = in.NominatedNodeName
out.HostIP = in.HostIP
out.PodIP = in.PodIP
out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime))

View file

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View file

@ -51,6 +51,7 @@ import (
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/security/apparmor"
)
@ -63,6 +64,8 @@ const isInvalidQuotaResource string = `must be a standard resource for quota`
const fieldImmutableErrorMsg string = apimachineryvalidation.FieldImmutableErrorMsg
const isNotIntegerErrorMsg string = `must be an integer`
const isNotPositiveErrorMsg string = `must be greater than zero`
const csiDriverNameRexpErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
const csiDriverNameRexpFmt string = `^[a-zA-Z0-9][-a-zA-Z0-9_.]{0,61}[a-zA-Z-0-9]$`
var pdPartitionErrorMsg string = validation.InclusiveRangeError(1, 255)
var fileModeErrorMsg string = "must be a number between 0 and 0777 (octal), both inclusive"
@ -74,6 +77,8 @@ var iscsiInitiatorIqnRegex = regexp.MustCompile(`iqn\.\d{4}-\d{2}\.([[:alnum:]-.
var iscsiInitiatorEuiRegex = regexp.MustCompile(`^eui.[[:alnum:]]{16}$`)
var iscsiInitiatorNaaRegex = regexp.MustCompile(`^naa.[[:alnum:]]{32}$`)
var csiDriverNameRexp = regexp.MustCompile(csiDriverNameRexpFmt)
// ValidateHasLabel requires that metav1.ObjectMeta has a Label with key and expectedValue
func ValidateHasLabel(meta metav1.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList {
allErrs := field.ErrorList{}
@ -201,10 +206,6 @@ func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath
return allErrs
}
func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList {
return apimachineryvalidation.ValidateOwnerReferences(ownerReferences, fldPath)
}
// ValidateNameFunc validates that the provided name is valid for a given resource type.
// Not all resources have the same validation rules for names. Prefix is true
// if the name will have a value appended to it. If the name is not valid,
@ -212,15 +213,6 @@ func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *f
// value that were not valid. Otherwise this returns an empty list or nil.
type ValidateNameFunc apimachineryvalidation.ValidateNameFunc
// maskTrailingDash replaces the final character of a string with a subdomain safe
// value if is a dash.
func maskTrailingDash(name string) string {
if strings.HasSuffix(name, "-") {
return name[:len(name)-2] + "a"
}
return name
}
// ValidatePodName can be used to check whether the given pod name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
@ -291,11 +283,6 @@ func NameIsDNSSubdomain(name string, prefix bool) []string {
return apimachineryvalidation.NameIsDNSSubdomain(name, prefix)
}
// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label.
func NameIsDNSLabel(name string, prefix bool) []string {
return apimachineryvalidation.NameIsDNSLabel(name, prefix)
}
// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label.
func NameIsDNS1035Label(name string, prefix bool) []string {
return apimachineryvalidation.NameIsDNS1035Label(name, prefix)
@ -361,10 +348,6 @@ func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *fiel
return allErrs
}
func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList {
return apimachineryvalidation.ValidateNoNewFinalizers(newFinalizers, oldFinalizers, fldPath)
}
func ValidateVolumes(volumes []core.Volume, fldPath *field.Path) (map[string]core.VolumeSource, field.ErrorList) {
allErrs := field.ErrorList{}
@ -1257,6 +1240,27 @@ func validateFlexVolumeSource(fv *core.FlexVolumeSource, fldPath *field.Path) fi
return allErrs
}
func validateFlexPersistentVolumeSource(fv *core.FlexPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(fv.Driver) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("driver"), ""))
}
// Make sure user-specified options don't use kubernetes namespaces
for k := range fv.Options {
namespace := k
if parts := strings.SplitN(k, "/", 2); len(parts) == 2 {
namespace = parts[0]
}
normalized := "." + strings.ToLower(namespace)
if strings.HasSuffix(normalized, ".kubernetes.io") || strings.HasSuffix(normalized, ".k8s.io") {
allErrs = append(allErrs, field.Invalid(fldPath.Child("options").Key(k), k, "kubernetes.io and k8s.io namespaces are reserved"))
}
}
return allErrs
}
func validateAzureFile(azure *core.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if azure.SecretName == "" {
@ -1433,6 +1437,14 @@ func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldP
allErrs = append(allErrs, field.Required(fldPath.Child("driver"), ""))
}
if len(csi.Driver) > 63 {
allErrs = append(allErrs, field.TooLong(fldPath.Child("driver"), csi.Driver, 63))
}
if !csiDriverNameRexp.MatchString(csi.Driver) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("driver"), csi.Driver, validation.RegexError(csiDriverNameRexpErrMsg, csiDriverNameRexpFmt, "csi-hostpath")))
}
if len(csi.VolumeHandle) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), ""))
}
@ -1588,7 +1600,7 @@ func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList {
}
if pv.Spec.FlexVolume != nil {
numVolumes++
allErrs = append(allErrs, validateFlexVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...)
allErrs = append(allErrs, validateFlexPersistentVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...)
}
if pv.Spec.AzureFile != nil {
if numVolumes > 0 {
@ -1773,27 +1785,36 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld
func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc)...)
newPvcClone := newPvc.DeepCopy()
oldPvcClone := oldPvc.DeepCopy()
// PVController needs to update PVC.Spec w/ VolumeName.
// Claims are immutable in order to enforce quota, range limits, etc. without gaming the system.
if len(oldPvc.Spec.VolumeName) == 0 {
// volumeName changes are allowed once.
// Reset back to empty string after equality check
oldPvc.Spec.VolumeName = newPvc.Spec.VolumeName
defer func() { oldPvc.Spec.VolumeName = "" }()
oldPvcClone.Spec.VolumeName = newPvcClone.Spec.VolumeName
}
if validateStorageClassUpgrade(oldPvcClone.Annotations, newPvcClone.Annotations,
oldPvcClone.Spec.StorageClassName, newPvcClone.Spec.StorageClassName) {
newPvcClone.Spec.StorageClassName = nil
metav1.SetMetaDataAnnotation(&newPvcClone.ObjectMeta, core.BetaStorageClassAnnotation, oldPvcClone.Annotations[core.BetaStorageClassAnnotation])
} else {
// storageclass annotation should be immutable after creation
// TODO: remove Beta when no longer needed
allErrs = append(allErrs, ValidateImmutableAnnotation(newPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], oldPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], v1.BetaStorageClassAnnotation, field.NewPath("metadata"))...)
}
if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) {
newPVCSpecCopy := newPvc.Spec.DeepCopy()
// lets make sure storage values are same.
if newPvc.Status.Phase == core.ClaimBound && newPVCSpecCopy.Resources.Requests != nil {
newPVCSpecCopy.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"]
if newPvc.Status.Phase == core.ClaimBound && newPvcClone.Spec.Resources.Requests != nil {
newPvcClone.Spec.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"]
}
oldSize := oldPvc.Spec.Resources.Requests["storage"]
newSize := newPvc.Spec.Resources.Requests["storage"]
if !apiequality.Semantic.DeepEqual(*newPVCSpecCopy, oldPvc.Spec) {
if !apiequality.Semantic.DeepEqual(newPvcClone.Spec, oldPvcClone.Spec) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "is immutable after creation except resources.requests for bound claims"))
}
if newSize.Cmp(oldSize) < 0 {
@ -1803,23 +1824,33 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeCl
} else {
// changes to Spec are not allowed, but updates to label/and some annotations are OK.
// no-op updates pass validation.
if !apiequality.Semantic.DeepEqual(newPvc.Spec, oldPvc.Spec) {
if !apiequality.Semantic.DeepEqual(newPvcClone.Spec, oldPvcClone.Spec) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "field is immutable after creation"))
}
}
// storageclass annotation should be immutable after creation
// TODO: remove Beta when no longer needed
allErrs = append(allErrs, ValidateImmutableAnnotation(newPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], oldPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], v1.BetaStorageClassAnnotation, field.NewPath("metadata"))...)
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
allErrs = append(allErrs, ValidateImmutableField(newPvc.Spec.VolumeMode, oldPvc.Spec.VolumeMode, field.NewPath("volumeMode"))...)
}
newPvc.Status = oldPvc.Status
return allErrs
}
// Provide an upgrade path from PVC with storage class specified in beta
// annotation to storage class specified in attribute. We allow update of
// StorageClassName only if following four conditions are met at the same time:
// 1. The old pvc's StorageClassAnnotation is set
// 2. The old pvc's StorageClassName is not set
// 3. The new pvc's StorageClassName is set and equal to the old value in annotation
// 4. If the new pvc's StorageClassAnnotation is set,it must be equal to the old pv/pvc's StorageClassAnnotation
func validateStorageClassUpgrade(oldAnnotations, newAnnotations map[string]string, oldScName, newScName *string) bool {
oldSc, oldAnnotationExist := oldAnnotations[core.BetaStorageClassAnnotation]
newScInAnnotation, newAnnotationExist := newAnnotations[core.BetaStorageClassAnnotation]
return oldAnnotationExist /* condition 1 */ &&
oldScName == nil /* condition 2*/ &&
(newScName != nil && *newScName == oldSc) /* condition 3 */ &&
(!newAnnotationExist || newScInAnnotation == oldSc) /* condition 4 */
}
// ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim
func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
@ -2647,19 +2678,8 @@ func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorL
allErrs := field.ErrorList{}
if affinity != nil {
if na := affinity.NodeAffinity; na != nil {
// TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented.
// if na.RequiredDuringSchedulingRequiredDuringExecution != nil {
// allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
// }
if na.RequiredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
}
if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
}
if affinity.NodeAffinity != nil {
allErrs = append(allErrs, validateNodeAffinity(affinity.NodeAffinity, fldPath.Child("nodeAffinity"))...)
}
if affinity.PodAffinity != nil {
allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, fldPath.Child("podAffinity"))...)
@ -3076,6 +3096,22 @@ func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, fldPath *fie
return allErrs
}
// validateNodeAffinity tests that the specified nodeAffinity fields have valid data
func validateNodeAffinity(na *core.NodeAffinity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented.
// if na.RequiredDuringSchedulingRequiredDuringExecution != nil {
// allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
// }
if na.RequiredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
}
if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
}
return allErrs
}
// validatePodAffinity tests that the specified podAffinity fields have valid data
func validatePodAffinity(podAffinity *core.PodAffinity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
@ -3322,6 +3358,31 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod) field.ErrorList {
return allErrs
}
// ValidateContainerStateTransition test to if any illegal container state transitions are being attempted
func ValidateContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldpath *field.Path, restartPolicy core.RestartPolicy) field.ErrorList {
allErrs := field.ErrorList{}
// If we should always restart, containers are allowed to leave the terminated state
if restartPolicy == core.RestartPolicyAlways {
return allErrs
}
for i, oldStatus := range oldStatuses {
// Skip any container that is not terminated
if oldStatus.State.Terminated == nil {
continue
}
// Skip any container that failed but is allowed to restart
if oldStatus.State.Terminated.ExitCode != 0 && restartPolicy == core.RestartPolicyOnFailure {
continue
}
for _, newStatus := range newStatuses {
if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil {
allErrs = append(allErrs, field.Forbidden(fldpath.Index(i).Child("state"), "may not be transitioned to non-terminated state"))
}
}
}
return allErrs
}
// ValidatePodStatusUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
// that cannot be changed.
func ValidatePodStatusUpdate(newPod, oldPod *core.Pod) field.ErrorList {
@ -3329,10 +3390,22 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...)
fldPath = field.NewPath("status")
if newPod.Spec.NodeName != oldPod.Spec.NodeName {
allErrs = append(allErrs, field.Forbidden(field.NewPath("status", "nodeName"), "may not be changed directly"))
allErrs = append(allErrs, field.Forbidden(fldPath.Child("nodeName"), "may not be changed directly"))
}
if newPod.Status.NominatedNodeName != oldPod.Status.NominatedNodeName && len(newPod.Status.NominatedNodeName) > 0 {
for _, msg := range ValidateNodeName(newPod.Status.NominatedNodeName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("nominatedNodeName"), newPod.Status.NominatedNodeName, msg))
}
}
// If pod should not restart, make sure the status update does not transition
// any terminated containers to a non-terminated state.
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec.RestartPolicy)...)
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), oldPod.Spec.RestartPolicy)...)
// For status update we ignore changes to pod spec.
newPod.Spec = oldPod.Spec
@ -3390,9 +3463,9 @@ func ValidateService(service *core.Service) field.ErrorList {
// This is a workaround for broken cloud environments that
// over-open firewalls. Hopefully it can go away when more clouds
// understand containers better.
if port.Port == 10250 {
if port.Port == ports.KubeletPort {
portPath := specPath.Child("ports").Index(ix)
allErrs = append(allErrs, field.Invalid(portPath, port.Port, "may not expose port 10250 externally since it is used by kubelet"))
allErrs = append(allErrs, field.Invalid(portPath, port.Port, fmt.Sprintf("may not expose port %v externally since it is used by kubelet", ports.KubeletPort)))
}
}
if service.Spec.ClusterIP == "None" {
@ -3404,7 +3477,7 @@ func ValidateService(service *core.Service) field.ErrorList {
}
case core.ServiceTypeExternalName:
if service.Spec.ClusterIP != "" {
allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty for ExternalName services"))
allErrs = append(allErrs, field.Forbidden(specPath.Child("clusterIP"), "must be empty for ExternalName services"))
}
if len(service.Spec.ExternalName) > 0 {
allErrs = append(allErrs, ValidateDNS1123Subdomain(service.Spec.ExternalName, specPath.Child("externalName"))...)
@ -3483,7 +3556,7 @@ func ValidateService(service *core.Service) field.ErrorList {
for i := range service.Spec.Ports {
portPath := portsPath.Index(i)
if service.Spec.Ports[i].NodePort != 0 {
allErrs = append(allErrs, field.Invalid(portPath.Child("nodePort"), service.Spec.Ports[i].NodePort, "may not be used when `type` is 'ClusterIP'"))
allErrs = append(allErrs, field.Forbidden(portPath.Child("nodePort"), "may not be used when `type` is 'ClusterIP'"))
}
}
}
@ -3533,7 +3606,7 @@ func ValidateService(service *core.Service) field.ErrorList {
val = service.Annotations[core.AnnotationLoadBalancerSourceRangesKey]
}
if service.Spec.Type != core.ServiceTypeLoadBalancer {
allErrs = append(allErrs, field.Invalid(fieldPath, "", "may only be used when `type` is 'LoadBalancer'"))
allErrs = append(allErrs, field.Forbidden(fieldPath, "may only be used when `type` is 'LoadBalancer'"))
}
_, err := apiservice.GetLoadBalancerSourceRanges(service)
if err != nil {
@ -4310,10 +4383,22 @@ func ValidateConfigMap(cfg *core.ConfigMap) field.ErrorList {
for _, msg := range validation.IsConfigMapKey(key) {
allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg))
}
// check if we have a duplicate key in the other bag
if _, isValue := cfg.BinaryData[key]; isValue {
msg := "duplicate of key present in binaryData"
allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg))
}
totalSize += len(value)
}
for key, value := range cfg.BinaryData {
for _, msg := range validation.IsConfigMapKey(key) {
allErrs = append(allErrs, field.Invalid(field.NewPath("binaryData").Key(key), key, msg))
}
totalSize += len(value)
}
if totalSize > core.MaxSecretSize {
allErrs = append(allErrs, field.TooLong(field.NewPath("data"), "", core.MaxSecretSize))
// pass back "" to indicate that the error refers to the whole object.
allErrs = append(allErrs, field.TooLong(field.NewPath(""), cfg, core.MaxSecretSize))
}
return allErrs
@ -4340,7 +4425,13 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa
allErrs := field.ErrorList{}
limPath := fldPath.Child("limits")
reqPath := fldPath.Child("requests")
limContainsCpuOrMemory := false
reqContainsCpuOrMemory := false
limContainsHugePages := false
reqContainsHugePages := false
supportedQoSComputeResources := sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
for resourceName, quantity := range requirements.Limits {
fldPath := limPath.Key(string(resourceName))
// Validate resource name.
allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...)
@ -4351,10 +4442,17 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa
if resourceName == core.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
allErrs = append(allErrs, field.Forbidden(limPath, "ResourceEphemeralStorage field disabled by feature-gate for ResourceRequirements"))
}
if helper.IsHugePageResourceName(resourceName) && !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) {
allErrs = append(allErrs, field.Forbidden(limPath, fmt.Sprintf("%s field disabled by feature-gate for ResourceRequirements", resourceName)))
if helper.IsHugePageResourceName(resourceName) {
if !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) {
allErrs = append(allErrs, field.Forbidden(limPath, fmt.Sprintf("%s field disabled by feature-gate for ResourceRequirements", resourceName)))
} else {
limContainsHugePages = true
}
}
if supportedQoSComputeResources.Has(string(resourceName)) {
limContainsCpuOrMemory = true
}
}
for resourceName, quantity := range requirements.Requests {
fldPath := reqPath.Key(string(resourceName))
@ -4366,15 +4464,25 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa
// Check that request <= limit.
limitQuantity, exists := requirements.Limits[resourceName]
if exists {
// For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal.
// For non overcommitable resources, not only requests can't exceed limits, they also can't be lower, i.e. must be equal.
if quantity.Cmp(limitQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName)))
} else if quantity.Cmp(limitQuantity) > 0 {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName)))
}
} else if resourceName == core.ResourceNvidiaGPU {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s request", core.ResourceNvidiaGPU)))
} else if !helper.IsOvercommitAllowed(resourceName) {
allErrs = append(allErrs, field.Required(limPath, "Limit must be set for non overcommitable resources"))
}
if helper.IsHugePageResourceName(resourceName) {
reqContainsHugePages = true
}
if supportedQoSComputeResources.Has(string(resourceName)) {
reqContainsCpuOrMemory = true
}
}
if !limContainsCpuOrMemory && !reqContainsCpuOrMemory && (reqContainsHugePages || limContainsHugePages) {
allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("HugePages require cpu or memory")))
}
return allErrs
@ -4744,8 +4852,8 @@ func ValidateSecurityContext(sc *core.SecurityContext, fldPath *field.Path) fiel
}
if sc.RunAsUser != nil {
if *sc.RunAsUser < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, isNegativeErrorMsg))
for _, msg := range validation.IsValidUserID(*sc.RunAsUser) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, msg))
}
}
@ -4858,3 +4966,13 @@ func ValidateCIDR(cidr string) (*net.IPNet, error) {
}
return net, nil
}
func IsDecremented(update, old *int32) bool {
if update == nil && old != nil {
return true
}
if update == nil || old == nil {
return false
}
return *update < *old
}

View file

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -483,6 +483,18 @@ func (in *ConfigMap) DeepCopyInto(out *ConfigMap) {
(*out)[key] = val
}
}
if in.BinaryData != nil {
in, out := &in.BinaryData, &out.BinaryData
*out = make(map[string][]byte, len(*in))
for key, val := range *in {
if val == nil {
(*out)[key] = nil
} else {
(*out)[key] = make([]byte, len(val))
copy((*out)[key], val)
}
}
}
return
}
@ -1547,6 +1559,38 @@ func (in *FCVolumeSource) DeepCopy() *FCVolumeSource {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
if in.Options != nil {
in, out := &in.Options, &out.Options
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource.
func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource {
if in == nil {
return nil
}
out := new(FlexPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) {
*out = *in
@ -3152,7 +3196,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
if *in == nil {
*out = nil
} else {
*out = new(FlexVolumeSource)
*out = new(FlexPersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
}

View file

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View file

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.