Vendor in latest k8s.io changes

These changes allow for the container's pid namespace to be set to the same
as the pod infra container's namespace if the pid namespace mode is set to POD

Signed-off-by: umohnani8 <umohnani@redhat.com>
This commit is contained in:
umohnani8 2018-03-01 15:50:10 -05:00
parent 0b736bb43f
commit e5fdb6bc9e
113 changed files with 11290 additions and 5289 deletions

View file

@ -111,6 +111,12 @@ var (
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics (the "pods" source).
ResourceMetricSourceType MetricSourceType = "Resource"
// ExternalMetricSourceType is a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
ExternalMetricSourceType MetricSourceType = "External"
)
// MetricSpec specifies how to scale based on a single metric
@ -136,6 +142,13 @@ type MetricSpec struct {
// to normal per-pod metrics using the "pods" source.
// +optional
Resource *ResourceMetricSource
// External refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
External *ExternalMetricSource
}
// ObjectMetricSource indicates how to scale on a metric describing a
@ -184,6 +197,26 @@ type ResourceMetricSource struct {
TargetAverageValue *resource.Quantity
}
// ExternalMetricSource indicates how to scale on a metric not associated with
// any Kubernetes object (for example length of queue in cloud
// messaging service, or QPS from loadbalancer running outside of cluster).
type ExternalMetricSource struct {
// metricName is the name of the metric in question.
MetricName string
// MetricSelector is used to identify a specific time series
// within a given metric.
// +optional
MetricSelector *metav1.LabelSelector
// TargetValue is the target value of the metric (as a quantity).
// Mutually exclusive with TargetAverageValue.
// +optional
TargetValue *resource.Quantity
// TargetAverageValue is the target per-pod value of global metric (as a quantity).
// Mutually exclusive with TargetValue.
// +optional
TargetAverageValue *resource.Quantity
}
// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
type HorizontalPodAutoscalerStatus struct {
// ObservedGeneration is the most recent generation observed by this autoscaler.
@ -282,6 +315,13 @@ type MetricStatus struct {
// to normal per-pod metrics using the "pods" source.
// +optional
Resource *ResourceMetricStatus
// External refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
External *ExternalMetricStatus
}
// ObjectMetricStatus indicates the current value of a metric describing a
@ -328,6 +368,23 @@ type ResourceMetricStatus struct {
CurrentAverageValue resource.Quantity
}
// ExternalMetricStatus indicates the current value of a global metric
// not associated with any Kubernetes object.
type ExternalMetricStatus struct {
// MetricName is the name of a metric used for autoscaling in
// metric system.
MetricName string
// MetricSelector is used to identify a specific time series
// within a given metric.
// +optional
MetricSelector *metav1.LabelSelector
// CurrentValue is the current value of the metric (as a quantity)
CurrentValue resource.Quantity
// CurrentAverageValue is the current value of metric averaged over autoscaled pods.
// +optional
CurrentAverageValue *resource.Quantity
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View file

@ -16,12 +16,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package autoscaling
import (
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@ -42,6 +41,84 @@ func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) {
*out = *in
if in.MetricSelector != nil {
in, out := &in.MetricSelector, &out.MetricSelector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
if in.TargetValue != nil {
in, out := &in.TargetValue, &out.TargetValue
if *in == nil {
*out = nil
} else {
x := (*in).DeepCopy()
*out = &x
}
}
if in.TargetAverageValue != nil {
in, out := &in.TargetAverageValue, &out.TargetAverageValue
if *in == nil {
*out = nil
} else {
x := (*in).DeepCopy()
*out = &x
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource.
func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource {
if in == nil {
return nil
}
out := new(ExternalMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) {
*out = *in
if in.MetricSelector != nil {
in, out := &in.MetricSelector, &out.MetricSelector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
out.CurrentValue = in.CurrentValue.DeepCopy()
if in.CurrentAverageValue != nil {
in, out := &in.CurrentAverageValue, &out.CurrentAverageValue
if *in == nil {
*out = nil
} else {
x := (*in).DeepCopy()
*out = &x
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus.
func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus {
if in == nil {
return nil
}
out := new(ExternalMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) {
*out = *in
@ -66,9 +143,8 @@ func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler {
func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -117,9 +193,8 @@ func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList {
func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -172,8 +247,7 @@ func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscal
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.CurrentMetrics != nil {
@ -233,6 +307,15 @@ func (in *MetricSpec) DeepCopyInto(out *MetricSpec) {
(*in).DeepCopyInto(*out)
}
}
if in.External != nil {
in, out := &in.External, &out.External
if *in == nil {
*out = nil
} else {
*out = new(ExternalMetricSource)
(*in).DeepCopyInto(*out)
}
}
return
}
@ -276,6 +359,15 @@ func (in *MetricStatus) DeepCopyInto(out *MetricStatus) {
(*in).DeepCopyInto(*out)
}
}
if in.External != nil {
in, out := &in.External, &out.External
if *in == nil {
*out = nil
} else {
*out = new(ExternalMetricStatus)
(*in).DeepCopyInto(*out)
}
}
return
}
@ -376,8 +468,8 @@ func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) {
if *in == nil {
*out = nil
} else {
*out = new(resource.Quantity)
**out = (*in).DeepCopy()
x := (*in).DeepCopy()
*out = &x
}
}
return
@ -443,9 +535,8 @@ func (in *Scale) DeepCopy() *Scale {
func (in *Scale) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View file

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/apis/core"
)
@ -146,10 +147,21 @@ func IsStandardContainerResourceName(str string) bool {
return standardContainerResources.Has(str) || IsHugePageResourceName(core.ResourceName(str))
}
// IsExtendedResourceName returns true if the resource name is not in the
// default namespace.
// IsExtendedResourceName returns true if:
// 1. the resource name is not in the default namespace;
// 2. resource name does not have "requests." prefix,
// to avoid confusion with the convention in quota
// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name
func IsExtendedResourceName(name core.ResourceName) bool {
return !IsDefaultNamespaceResource(name)
if IsDefaultNamespaceResource(name) || strings.HasPrefix(string(name), core.DefaultResourceRequestsPrefix) {
return false
}
// Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name
nameForQuota := fmt.Sprintf("%s%s", core.DefaultResourceRequestsPrefix, string(name))
if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 {
return false
}
return true
}
// IsDefaultNamespaceResource returns true if the resource name is in the

View file

@ -391,7 +391,7 @@ type PersistentVolumeSource struct {
// More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource
// CSI (Container Storage Interface) represents storage that handled by an external CSI driver
// CSI (Container Storage Interface) represents storage that handled by an external CSI driver (Beta feature).
// +optional
CSI *CSIPersistentVolumeSource
}
@ -467,6 +467,16 @@ type PersistentVolumeSpec struct {
// This is an alpha feature and may change in the future.
// +optional
VolumeMode *PersistentVolumeMode
// NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
// This field influences the scheduling of pods that use this volume.
// +optional
NodeAffinity *VolumeNodeAffinity
}
// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
type VolumeNodeAffinity struct {
// Required specifies hard node constraints that must be met.
Required *NodeSelector
}
// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes
@ -1603,7 +1613,7 @@ type LocalVolumeSource struct {
Path string
}
// Represents storage that is managed by an external CSI volume driver
// Represents storage that is managed by an external CSI volume driver (Beta feature)
type CSIPersistentVolumeSource struct {
// Driver is the name of the driver to use for this volume.
// Required.
@ -1624,6 +1634,34 @@ type CSIPersistentVolumeSource struct {
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string
// Attributes of the volume to publish.
// +optional
VolumeAttributes map[string]string
// ControllerPublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerPublishVolume and ControllerUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
ControllerPublishSecretRef *SecretReference
// NodeStageSecretRef is a reference to the secret object containing sensitive
// information to pass to the CSI driver to complete the CSI NodeStageVolume
// and NodeStageVolume and NodeUnstageVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodeStageSecretRef *SecretReference
// NodePublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodePublishVolume and NodeUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodePublishSecretRef *SecretReference
}
// ContainerPort represents a network port in a single container
@ -2634,6 +2672,15 @@ type PodSecurityContext struct {
// +k8s:conversion-gen=false
// +optional
HostIPC bool
// Share a single process namespace between all of the containers in a pod.
// When this is set containers will be able to view and signal processes from other containers
// in the same pod, and the first process in each container will not be assigned PID 1.
// HostPID and ShareProcessNamespace cannot both be set.
// Optional: Default to false.
// This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature.
// +k8s:conversion-gen=false
// +optional
ShareProcessNamespace *bool
// The SELinux context to be applied to all containers.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in SecurityContext. If set in
@ -3533,8 +3580,8 @@ const (
NodeDiskPressure NodeConditionType = "DiskPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
// NodeConfigOK indicates whether the kubelet is correctly configured
NodeConfigOK NodeConditionType = "ConfigOK"
// NodeKubeletConfigOk indicates whether the kubelet is correctly configured
NodeKubeletConfigOk NodeConditionType = "KubeletConfigOk"
)
type NodeCondition struct {
@ -4202,6 +4249,8 @@ const (
// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
ResourceRequestsHugePagesPrefix = "requests.hugepages-"
// Default resource requests prefix
DefaultResourceRequestsPrefix = "requests."
)
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota

View file

@ -350,6 +350,10 @@ func Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec
return err
}
// drop init container annotations so they don't take effect on legacy kubelets.
// remove this once the oldest supported kubelet no longer honors the annotations over the field.
out.Annotations = dropInitContainerAnnotations(out.Annotations)
return nil
}
@ -358,6 +362,9 @@ func Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec,
return err
}
// drop init container annotations so they don't show up as differences when receiving requests from old clients
out.Annotations = dropInitContainerAnnotations(out.Annotations)
return nil
}
@ -377,6 +384,7 @@ func Convert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s con
out.HostPID = in.SecurityContext.HostPID
out.HostNetwork = in.SecurityContext.HostNetwork
out.HostIPC = in.SecurityContext.HostIPC
out.ShareProcessNamespace = in.SecurityContext.ShareProcessNamespace
}
return nil
@ -401,6 +409,18 @@ func Convert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s con
out.SecurityContext.HostNetwork = in.HostNetwork
out.SecurityContext.HostPID = in.HostPID
out.SecurityContext.HostIPC = in.HostIPC
out.SecurityContext.ShareProcessNamespace = in.ShareProcessNamespace
return nil
}
func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error {
if err := autoConvert_v1_Pod_To_core_Pod(in, out, s); err != nil {
return err
}
// drop init container annotations so they don't show up as differences when receiving requests from old clients
out.Annotations = dropInitContainerAnnotations(out.Annotations)
return nil
}
@ -412,17 +432,7 @@ func Convert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) e
// drop init container annotations so they don't take effect on legacy kubelets.
// remove this once the oldest supported kubelet no longer honors the annotations over the field.
if len(out.Annotations) > 0 {
old := out.Annotations
out.Annotations = make(map[string]string, len(old))
for k, v := range old {
out.Annotations[k] = v
}
delete(out.Annotations, "pod.beta.kubernetes.io/init-containers")
delete(out.Annotations, "pod.alpha.kubernetes.io/init-containers")
delete(out.Annotations, "pod.beta.kubernetes.io/init-container-statuses")
delete(out.Annotations, "pod.alpha.kubernetes.io/init-container-statuses")
}
out.Annotations = dropInitContainerAnnotations(out.Annotations)
return nil
}
@ -570,3 +580,40 @@ func AddFieldLabelConversionsForSecret(scheme *runtime.Scheme) error {
}
})
}
var initContainerAnnotations = map[string]bool{
"pod.beta.kubernetes.io/init-containers": true,
"pod.alpha.kubernetes.io/init-containers": true,
"pod.beta.kubernetes.io/init-container-statuses": true,
"pod.alpha.kubernetes.io/init-container-statuses": true,
}
// dropInitContainerAnnotations returns a copy of the annotations with init container annotations removed,
// or the original annotations if no init container annotations were present.
//
// this can be removed once no clients prior to 1.8 are supported, and no kubelets prior to 1.8 can be run
// (we don't support kubelets older than 2 versions skewed from the apiserver, but we don't prevent them, either)
func dropInitContainerAnnotations(oldAnnotations map[string]string) map[string]string {
if len(oldAnnotations) == 0 {
return oldAnnotations
}
found := false
for k := range initContainerAnnotations {
if _, ok := oldAnnotations[k]; ok {
found = true
break
}
}
if !found {
return oldAnnotations
}
newAnnotations := make(map[string]string, len(oldAnnotations))
for k, v := range oldAnnotations {
if !initContainerAnnotations[k] {
newAnnotations[k] = v
}
}
return newAnnotations
}

View file

@ -26,13 +26,25 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/apis/core/helper"
)
// IsExtendedResourceName returns true if the resource name is not in the
// default namespace.
// IsExtendedResourceName returns true if:
// 1. the resource name is not in the default namespace;
// 2. resource name does not have "requests." prefix,
// to avoid confusion with the convention in quota
// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name
func IsExtendedResourceName(name v1.ResourceName) bool {
return !IsDefaultNamespaceResource(name)
if IsDefaultNamespaceResource(name) || strings.HasPrefix(string(name), v1.DefaultResourceRequestsPrefix) {
return false
}
// Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name
nameForQuota := fmt.Sprintf("%s%s", v1.DefaultResourceRequestsPrefix, string(name))
if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 {
return false
}
return true
}
// IsDefaultNamespaceResource returns true if the resource name is in the

View file

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by conversion-gen. Do not edit it manually!
// Code generated by conversion-gen. DO NOT EDIT.
package v1
@ -406,6 +406,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_core_VolumeDevice_To_v1_VolumeDevice,
Convert_v1_VolumeMount_To_core_VolumeMount,
Convert_core_VolumeMount_To_v1_VolumeMount,
Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity,
Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity,
Convert_v1_VolumeProjection_To_core_VolumeProjection,
Convert_core_VolumeProjection_To_v1_VolumeProjection,
Convert_v1_VolumeSource_To_core_VolumeSource,
@ -620,6 +622,10 @@ func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(
out.VolumeHandle = in.VolumeHandle
out.ReadOnly = in.ReadOnly
out.FSType = in.FSType
out.VolumeAttributes = *(*map[string]string)(unsafe.Pointer(&in.VolumeAttributes))
out.ControllerPublishSecretRef = (*core.SecretReference)(unsafe.Pointer(in.ControllerPublishSecretRef))
out.NodeStageSecretRef = (*core.SecretReference)(unsafe.Pointer(in.NodeStageSecretRef))
out.NodePublishSecretRef = (*core.SecretReference)(unsafe.Pointer(in.NodePublishSecretRef))
return nil
}
@ -633,6 +639,10 @@ func autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(
out.VolumeHandle = in.VolumeHandle
out.ReadOnly = in.ReadOnly
out.FSType = in.FSType
out.VolumeAttributes = *(*map[string]string)(unsafe.Pointer(&in.VolumeAttributes))
out.ControllerPublishSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.ControllerPublishSecretRef))
out.NodeStageSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.NodeStageSecretRef))
out.NodePublishSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.NodePublishSecretRef))
return nil
}
@ -3346,6 +3356,7 @@ func autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.Per
out.StorageClassName = in.StorageClassName
out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions))
out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.NodeAffinity = (*core.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity))
return nil
}
@ -3365,6 +3376,7 @@ func autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.P
out.StorageClassName = in.StorageClassName
out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions))
out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.NodeAffinity = (*v1.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity))
return nil
}
@ -3430,11 +3442,6 @@ func autoConvert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scop
return nil
}
// Convert_v1_Pod_To_core_Pod is an autogenerated conversion function.
func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error {
return autoConvert_v1_Pod_To_core_Pod(in, out, s)
}
func autoConvert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil {
@ -3777,6 +3784,7 @@ func autoConvert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSe
// INFO: in.HostNetwork opted out of conversion generation
// INFO: in.HostPID opted out of conversion generation
// INFO: in.HostIPC opted out of conversion generation
// INFO: in.ShareProcessNamespace opted out of conversion generation
out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions))
out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser))
out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot))
@ -3851,6 +3859,7 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s
// INFO: in.HostNetwork opted out of conversion generation
// INFO: in.HostPID opted out of conversion generation
// INFO: in.HostIPC opted out of conversion generation
// INFO: in.ShareProcessNamespace opted out of conversion generation
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(core.PodSecurityContext)
@ -5508,6 +5517,26 @@ func Convert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *v1.Vo
return autoConvert_core_VolumeMount_To_v1_VolumeMount(in, out, s)
}
func autoConvert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in *v1.VolumeNodeAffinity, out *core.VolumeNodeAffinity, s conversion.Scope) error {
out.Required = (*core.NodeSelector)(unsafe.Pointer(in.Required))
return nil
}
// Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity is an autogenerated conversion function.
func Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in *v1.VolumeNodeAffinity, out *core.VolumeNodeAffinity, s conversion.Scope) error {
return autoConvert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in, out, s)
}
func autoConvert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in *core.VolumeNodeAffinity, out *v1.VolumeNodeAffinity, s conversion.Scope) error {
out.Required = (*v1.NodeSelector)(unsafe.Pointer(in.Required))
return nil
}
// Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity is an autogenerated conversion function.
func Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in *core.VolumeNodeAffinity, out *v1.VolumeNodeAffinity, s conversion.Scope) error {
return autoConvert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in, out, s)
}
func autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error {
out.Secret = (*core.SecretProjection)(unsafe.Pointer(in.Secret))
out.DownwardAPI = (*core.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI))

View file

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by defaulter-gen. Do not edit it manually!
// Code generated by defaulter-gen. DO NOT EDIT.
package v1

View file

@ -1383,6 +1383,9 @@ func validateLocalVolumeSource(ls *core.LocalVolumeSource, fldPath *field.Path)
return allErrs
}
if !path.IsAbs(ls.Path) {
allErrs = append(allErrs, field.Invalid(fldPath, ls.Path, "must be an absolute path"))
}
allErrs = append(allErrs, validatePathNoBacksteps(ls.Path, fldPath.Child("path"))...)
return allErrs
}
@ -1449,6 +1452,45 @@ func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldP
allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), ""))
}
if csi.ControllerPublishSecretRef != nil {
if len(csi.ControllerPublishSecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("controllerPublishSecretRef", "name"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(csi.ControllerPublishSecretRef.Name, fldPath.Child("name"))...)
}
if len(csi.ControllerPublishSecretRef.Namespace) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("controllerPublishSecretRef", "namespace"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(csi.ControllerPublishSecretRef.Namespace, fldPath.Child("namespace"))...)
}
}
if csi.NodePublishSecretRef != nil {
if len(csi.NodePublishSecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("nodePublishSecretRef ", "name"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(csi.NodePublishSecretRef.Name, fldPath.Child("name"))...)
}
if len(csi.NodePublishSecretRef.Namespace) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("nodePublishSecretRef ", "namespace"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(csi.NodePublishSecretRef.Namespace, fldPath.Child("namespace"))...)
}
}
if csi.NodeStageSecretRef != nil {
if len(csi.NodeStageSecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("nodeStageSecretRef", "name"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(csi.NodeStageSecretRef.Name, fldPath.Child("name"))...)
}
if len(csi.NodeStageSecretRef.Namespace) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("nodeStageSecretRef", "namespace"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(csi.NodeStageSecretRef.Namespace, fldPath.Child("namespace"))...)
}
}
return allErrs
}
@ -1497,6 +1539,15 @@ func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList {
nodeAffinitySpecified, errs := validateStorageNodeAffinityAnnotation(pv.ObjectMeta.Annotations, metaPath.Child("annotations"))
allErrs = append(allErrs, errs...)
volumeNodeAffinitySpecified, errs := validateVolumeNodeAffinity(pv.Spec.NodeAffinity, specPath.Child("nodeAffinity"))
allErrs = append(allErrs, errs...)
if nodeAffinitySpecified && volumeNodeAffinitySpecified {
allErrs = append(allErrs, field.Forbidden(specPath.Child("nodeAffinity"), "may not specify both alpha nodeAffinity annotation and nodeAffinity field"))
}
nodeAffinitySpecified = nodeAffinitySpecified || volumeNodeAffinitySpecified
numVolumes := 0
if pv.Spec.HostPath != nil {
if numVolumes > 0 {
@ -1725,6 +1776,13 @@ func ValidatePersistentVolumeUpdate(newPv, oldPv *core.PersistentVolume) field.E
allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.VolumeMode, oldPv.Spec.VolumeMode, field.NewPath("volumeMode"))...)
}
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
// Allow setting NodeAffinity if oldPv NodeAffinity was not set
if oldPv.Spec.NodeAffinity != nil {
allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.NodeAffinity, oldPv.Spec.NodeAffinity, field.NewPath("nodeAffinity"))...)
}
}
return allErrs
}
@ -2917,19 +2975,13 @@ func ValidatePodSpec(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
}
if len(spec.PriorityClassName) > 0 {
if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("priorityClassName"), "Pod priority is disabled by feature-gate"))
} else {
if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
for _, msg := range ValidatePriorityClassName(spec.PriorityClassName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("priorityClassName"), spec.PriorityClassName, msg))
}
}
}
if spec.Priority != nil && !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("priority"), "Pod priority is disabled by feature-gate"))
}
return allErrs
}
@ -3251,6 +3303,13 @@ func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *
allErrs = append(allErrs, field.Invalid(fldPath.Child("supplementalGroups").Index(g), gid, msg))
}
}
if securityContext.ShareProcessNamespace != nil {
if !utilfeature.DefaultFeatureGate.Enabled(features.PodShareProcessNamespace) {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("shareProcessNamespace"), "Process Namespace Sharing is disabled by PodShareProcessNamespace feature-gate"))
} else if securityContext.HostPID && *securityContext.ShareProcessNamespace {
allErrs = append(allErrs, field.Invalid(fldPath.Child("shareProcessNamespace"), *securityContext.ShareProcessNamespace, "ShareProcessNamespace and HostPID cannot both be enabled"))
}
}
}
return allErrs
@ -4071,6 +4130,10 @@ func validateContainerResourceName(value string, fldPath *field.Path) field.Erro
if !helper.IsStandardContainerResourceName(value) {
return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers"))
}
} else if !helper.IsDefaultNamespaceResource(core.ResourceName(value)) {
if !helper.IsExtendedResourceName(core.ResourceName(value)) {
return append(allErrs, field.Invalid(fldPath, value, "doesn't follow extended resource name standard"))
}
}
return allErrs
}
@ -4252,7 +4315,8 @@ func ValidateLimitRange(limitRange *core.LimitRange) field.ErrorList {
}
}
// for GPU and hugepages, the default value and defaultRequest value must match if both are specified
// for GPU, hugepages and other resources that are not allowed to overcommit,
// the default value and defaultRequest value must match if both are specified
if !helper.IsOvercommitAllowed(core.ResourceName(k)) && defaultQuantityFound && defaultRequestQuantityFound && defaultQuantity.Cmp(defaultRequestQuantity) != 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default value %s must equal to defaultRequest value %s in %s", defaultQuantity.String(), defaultRequestQuantity.String(), k)))
}
@ -4942,7 +5006,7 @@ func validateStorageNodeAffinityAnnotation(annotations map[string]string, fldPat
return false, allErrs
}
if !utilfeature.DefaultFeatureGate.Enabled(features.PersistentLocalVolumes) {
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
allErrs = append(allErrs, field.Forbidden(fldPath, "Storage node affinity is disabled by feature-gate"))
}
@ -4958,6 +5022,30 @@ func validateStorageNodeAffinityAnnotation(annotations map[string]string, fldPat
return policySpecified, allErrs
}
// validateVolumeNodeAffinity tests that the PersistentVolume.NodeAffinity has valid data
// returns:
// - true if volumeNodeAffinity is set
// - errorList if there are validation errors
func validateVolumeNodeAffinity(nodeAffinity *core.VolumeNodeAffinity, fldPath *field.Path) (bool, field.ErrorList) {
allErrs := field.ErrorList{}
if nodeAffinity == nil {
return false, allErrs
}
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
allErrs = append(allErrs, field.Forbidden(fldPath, "Volume node affinity is disabled by feature-gate"))
}
if nodeAffinity.Required != nil {
allErrs = append(allErrs, ValidateNodeSelector(nodeAffinity.Required, fldPath.Child("required"))...)
} else {
allErrs = append(allErrs, field.Required(fldPath.Child("required"), "must specify required node constraints"))
}
return true, allErrs
}
// ValidateCIDR validates whether a CIDR matches the conventions expected by net.ParseCIDR
func ValidateCIDR(cidr string) (*net.IPNet, error) {
_, net, err := net.ParseCIDR(cidr)

View file

@ -16,12 +16,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package core
import (
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
@ -241,14 +240,47 @@ func (in *Binding) DeepCopy() *Binding {
func (in *Binding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) {
*out = *in
if in.VolumeAttributes != nil {
in, out := &in.VolumeAttributes, &out.VolumeAttributes
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ControllerPublishSecretRef != nil {
in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
if in.NodeStageSecretRef != nil {
in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
if in.NodePublishSecretRef != nil {
in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
return
}
@ -432,9 +464,8 @@ func (in *ComponentStatus) DeepCopy() *ComponentStatus {
func (in *ComponentStatus) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -466,9 +497,8 @@ func (in *ComponentStatusList) DeepCopy() *ComponentStatusList {
func (in *ComponentStatusList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -512,9 +542,8 @@ func (in *ConfigMap) DeepCopy() *ConfigMap {
func (in *ConfigMap) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -598,9 +627,8 @@ func (in *ConfigMapList) DeepCopy() *ConfigMapList {
func (in *ConfigMapList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -994,9 +1022,8 @@ func (in *DeleteOptions) DeepCopy() *DeleteOptions {
func (in *DeleteOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1105,8 +1132,8 @@ func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) {
if *in == nil {
*out = nil
} else {
*out = new(resource.Quantity)
**out = (*in).DeepCopy()
x := (*in).DeepCopy()
*out = &x
}
}
return
@ -1236,9 +1263,8 @@ func (in *Endpoints) DeepCopy() *Endpoints {
func (in *Endpoints) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1270,9 +1296,8 @@ func (in *EndpointsList) DeepCopy() *EndpointsList {
func (in *EndpointsList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1431,9 +1456,8 @@ func (in *Event) DeepCopy() *Event {
func (in *Event) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1465,9 +1489,8 @@ func (in *EventList) DeepCopy() *EventList {
func (in *EventList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1974,9 +1997,8 @@ func (in *LimitRange) DeepCopy() *LimitRange {
func (in *LimitRange) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2059,9 +2081,8 @@ func (in *LimitRangeList) DeepCopy() *LimitRangeList {
func (in *LimitRangeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2120,9 +2141,8 @@ func (in *List) DeepCopy() *List {
func (in *List) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2165,9 +2185,8 @@ func (in *ListOptions) DeepCopy() *ListOptions {
func (in *ListOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2279,9 +2298,8 @@ func (in *Namespace) DeepCopy() *Namespace {
func (in *Namespace) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2313,9 +2331,8 @@ func (in *NamespaceList) DeepCopy() *NamespaceList {
func (in *NamespaceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2379,9 +2396,8 @@ func (in *Node) DeepCopy() *Node {
func (in *Node) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2480,9 +2496,8 @@ func (in *NodeConfigSource) DeepCopy() *NodeConfigSource {
func (in *NodeConfigSource) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2531,9 +2546,8 @@ func (in *NodeList) DeepCopy() *NodeList {
func (in *NodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2557,9 +2571,8 @@ func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions {
func (in *NodeProxyOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2786,8 +2799,7 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) {
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.DeletionGracePeriodSeconds != nil {
@ -2867,9 +2879,8 @@ func (in *ObjectReference) DeepCopy() *ObjectReference {
func (in *ObjectReference) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2896,9 +2907,8 @@ func (in *PersistentVolume) DeepCopy() *PersistentVolume {
func (in *PersistentVolume) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2925,9 +2935,8 @@ func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim {
func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -2977,9 +2986,8 @@ func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList {
func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3111,9 +3119,8 @@ func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList {
func (in *PersistentVolumeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3314,7 +3321,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
*out = nil
} else {
*out = new(CSIPersistentVolumeSource)
**out = **in
(*in).DeepCopyInto(*out)
}
}
return
@ -3369,6 +3376,15 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) {
**out = **in
}
}
if in.NodeAffinity != nil {
in, out := &in.NodeAffinity, &out.NodeAffinity
if *in == nil {
*out = nil
} else {
*out = new(VolumeNodeAffinity)
(*in).DeepCopyInto(*out)
}
}
return
}
@ -3438,9 +3454,8 @@ func (in *Pod) DeepCopy() *Pod {
func (in *Pod) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3554,9 +3569,8 @@ func (in *PodAttachOptions) DeepCopy() *PodAttachOptions {
func (in *PodAttachOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3661,9 +3675,8 @@ func (in *PodExecOptions) DeepCopy() *PodExecOptions {
func (in *PodExecOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3695,9 +3708,8 @@ func (in *PodList) DeepCopy() *PodList {
func (in *PodList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3718,8 +3730,7 @@ func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) {
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.TailLines != nil {
@ -3757,9 +3768,8 @@ func (in *PodLogOptions) DeepCopy() *PodLogOptions {
func (in *PodLogOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3788,9 +3798,8 @@ func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions {
func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -3814,14 +3823,22 @@ func (in *PodProxyOptions) DeepCopy() *PodProxyOptions {
func (in *PodProxyOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
*out = *in
if in.ShareProcessNamespace != nil {
in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
if in.SELinuxOptions != nil {
in, out := &in.SELinuxOptions, &out.SELinuxOptions
if *in == nil {
@ -4042,8 +4059,7 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) {
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
if in.InitContainerStatuses != nil {
@ -4096,9 +4112,8 @@ func (in *PodStatusResult) DeepCopy() *PodStatusResult {
func (in *PodStatusResult) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4124,9 +4139,8 @@ func (in *PodTemplate) DeepCopy() *PodTemplate {
func (in *PodTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4158,9 +4172,8 @@ func (in *PodTemplateList) DeepCopy() *PodTemplateList {
func (in *PodTemplateList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4409,9 +4422,8 @@ func (in *RangeAllocation) DeepCopy() *RangeAllocation {
func (in *RangeAllocation) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4438,9 +4450,8 @@ func (in *ReplicationController) DeepCopy() *ReplicationController {
func (in *ReplicationController) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4489,9 +4500,8 @@ func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList {
func (in *ReplicationControllerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4566,6 +4576,28 @@ func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourceList) DeepCopyInto(out *ResourceList) {
{
in := &in
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList.
func (in ResourceList) DeepCopy() ResourceList {
if in == nil {
return nil
}
out := new(ResourceList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) {
*out = *in
@ -4590,9 +4622,8 @@ func (in *ResourceQuota) DeepCopy() *ResourceQuota {
func (in *ResourceQuota) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4624,9 +4655,8 @@ func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList {
func (in *ResourceQuotaList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4817,9 +4847,8 @@ func (in *Secret) DeepCopy() *Secret {
func (in *Secret) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -4903,9 +4932,8 @@ func (in *SecretList) DeepCopy() *SecretList {
func (in *SecretList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5099,9 +5127,8 @@ func (in *SerializedReference) DeepCopy() *SerializedReference {
func (in *SerializedReference) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5128,9 +5155,8 @@ func (in *Service) DeepCopy() *Service {
func (in *Service) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5174,9 +5200,8 @@ func (in *ServiceAccount) DeepCopy() *ServiceAccount {
func (in *ServiceAccount) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5208,9 +5233,8 @@ func (in *ServiceAccountList) DeepCopy() *ServiceAccountList {
func (in *ServiceAccountList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5242,9 +5266,8 @@ func (in *ServiceList) DeepCopy() *ServiceList {
func (in *ServiceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5285,9 +5308,8 @@ func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions {
func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -5470,8 +5492,7 @@ func (in *Taint) DeepCopyInto(out *Taint) {
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
}
return
@ -5570,6 +5591,31 @@ func (in *VolumeMount) DeepCopy() *VolumeMount {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) {
*out = *in
if in.Required != nil {
in, out := &in.Required, &out.Required
if *in == nil {
*out = nil
} else {
*out = new(NodeSelector)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity.
func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity {
if in == nil {
return nil
}
out := new(VolumeNodeAffinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) {
*out = *in

View file

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package extensions
@ -162,9 +162,8 @@ func (in *DaemonSet) DeepCopy() *DaemonSet {
func (in *DaemonSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -213,9 +212,8 @@ func (in *DaemonSetList) DeepCopy() *DaemonSetList {
func (in *DaemonSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -335,9 +333,8 @@ func (in *Deployment) DeepCopy() *Deployment {
func (in *Deployment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -387,9 +384,8 @@ func (in *DeploymentList) DeepCopy() *DeploymentList {
func (in *DeploymentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -421,9 +417,8 @@ func (in *DeploymentRollback) DeepCopy() *DeploymentRollback {
func (in *DeploymentRollback) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -652,9 +647,8 @@ func (in *Ingress) DeepCopy() *Ingress {
func (in *Ingress) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -703,9 +697,8 @@ func (in *IngressList) DeepCopy() *IngressList {
func (in *IngressList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -850,9 +843,8 @@ func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy {
func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -884,9 +876,8 @@ func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList {
func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -977,9 +968,8 @@ func (in *ReplicaSet) DeepCopy() *ReplicaSet {
func (in *ReplicaSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1028,9 +1018,8 @@ func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -1103,9 +1092,8 @@ func (in *ReplicationControllerDummy) DeepCopy() *ReplicationControllerDummy {
func (in *ReplicationControllerDummy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View file

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package networking
@ -71,9 +71,8 @@ func (in *NetworkPolicy) DeepCopy() *NetworkPolicy {
func (in *NetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -165,9 +164,8 @@ func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList {
func (in *NetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View file

@ -21,7 +21,7 @@ import (
"sync"
"github.com/golang/glog"
appsv1beta1 "k8s.io/api/apps/v1beta1"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
@ -436,18 +436,18 @@ func NewControllerRevisionControllerRefManager(
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of ControllerRevisions that you now own is
// returned.
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*appsv1beta1.ControllerRevision) ([]*appsv1beta1.ControllerRevision, error) {
var claimed []*appsv1beta1.ControllerRevision
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) {
var claimed []*apps.ControllerRevision
var errlist []error
match := func(obj metav1.Object) bool {
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(obj metav1.Object) error {
return m.AdoptControllerRevision(obj.(*appsv1beta1.ControllerRevision))
return m.AdoptControllerRevision(obj.(*apps.ControllerRevision))
}
release := func(obj metav1.Object) error {
return m.ReleaseControllerRevision(obj.(*appsv1beta1.ControllerRevision))
return m.ReleaseControllerRevision(obj.(*apps.ControllerRevision))
}
for _, h := range histories {
@ -465,7 +465,7 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor
// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if
// the patching fails.
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *appsv1beta1.ControllerRevision) error {
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *apps.ControllerRevision) error {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err)
}
@ -480,7 +480,7 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history
// ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *appsv1beta1.ControllerRevision) error {
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error {
glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s",
history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID)

View file

@ -64,10 +64,9 @@ const (
Accelerators utilfeature.Feature = "Accelerators"
// owner: @jiayingz
// alpha: v1.8
// beta: v1.10
//
// Enables support for Device Plugins
// Only Nvidia GPUs are tested as of v1.8.
DevicePlugins utilfeature.Feature = "DevicePlugins"
// owner: @gmarek
@ -191,7 +190,7 @@ const (
CSIPersistentVolume utilfeature.Feature = "CSIPersistentVolume"
// owner @MrHohn
// alpha: v1.9
// beta: v1.10
//
// Support configurable pod DNS parameters.
CustomPodDNS utilfeature.Feature = "CustomPodDNS"
@ -203,10 +202,10 @@ const (
BlockVolume utilfeature.Feature = "BlockVolume"
// owner: @pospispa
// alpha: v1.9
// beta: v1.10
//
// Postpone deletion of a PV or a PVC when they are being used
StorageProtection utilfeature.Feature = "StorageProtection"
StorageObjectInUseProtection utilfeature.Feature = "StorageObjectInUseProtection"
// owner: @aveshagarwal
// alpha: v1.9
@ -238,6 +237,30 @@ const (
// Mount secret, configMap, downwardAPI and projected volumes ReadOnly. Note: this feature
// gate is present only for backward compatibility, it will be removed in the 1.11 release.
ReadOnlyAPIDataVolumes utilfeature.Feature = "ReadOnlyAPIDataVolumes"
// owner: @k82cn
// alpha: v1.10
//
// Schedule DaemonSet Pods by default scheduler instead of DaemonSet controller
NoDaemonSetScheduler utilfeature.Feature = "NoDaemonSetScheduler"
// owner: @mikedanese
// alpha: v1.10
//
// Implement TokenRequest endpoint on service account resources.
TokenRequest utilfeature.Feature = "TokenRequest"
// owner: @Random-Liu
// alpha: v1.10
//
// Enable container log rotation for cri container runtime
CRIContainerLogRotation utilfeature.Feature = "CRIContainerLogRotation"
// owner: @verult
// beta: v1.10
//
// Enables the regional PD feature on GCE.
GCERegionalPersistentDisk utilfeature.Feature = "GCERegionalPersistentDisk"
)
func init() {
@ -253,11 +276,11 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta},
ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha},
Accelerators: {Default: false, PreRelease: utilfeature.Alpha},
DevicePlugins: {Default: false, PreRelease: utilfeature.Alpha},
DevicePlugins: {Default: true, PreRelease: utilfeature.Beta},
TaintBasedEvictions: {Default: false, PreRelease: utilfeature.Alpha},
RotateKubeletServerCertificate: {Default: false, PreRelease: utilfeature.Alpha},
RotateKubeletClientCertificate: {Default: true, PreRelease: utilfeature.Beta},
PersistentLocalVolumes: {Default: false, PreRelease: utilfeature.Alpha},
PersistentLocalVolumes: {Default: true, PreRelease: utilfeature.Beta},
LocalStorageCapacityIsolation: {Default: false, PreRelease: utilfeature.Alpha},
HugePages: {Default: true, PreRelease: utilfeature.Beta},
DebugContainers: {Default: false, PreRelease: utilfeature.Alpha},
@ -270,15 +293,19 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
CPUManager: {Default: true, PreRelease: utilfeature.Beta},
ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha},
MountContainers: {Default: false, PreRelease: utilfeature.Alpha},
VolumeScheduling: {Default: false, PreRelease: utilfeature.Alpha},
VolumeScheduling: {Default: true, PreRelease: utilfeature.Beta},
CSIPersistentVolume: {Default: true, PreRelease: utilfeature.Beta},
CustomPodDNS: {Default: false, PreRelease: utilfeature.Alpha},
CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta},
BlockVolume: {Default: false, PreRelease: utilfeature.Alpha},
StorageProtection: {Default: false, PreRelease: utilfeature.Alpha},
StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.Beta},
ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha},
SupportIPVSProxyMode: {Default: false, PreRelease: utilfeature.Beta},
SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.Beta},
SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha},
HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha},
NoDaemonSetScheduler: {Default: false, PreRelease: utilfeature.Alpha},
TokenRequest: {Default: false, PreRelease: utilfeature.Alpha},
CRIContainerLogRotation: {Default: false, PreRelease: utilfeature.Alpha},
GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.Beta},
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
@ -290,7 +317,8 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
// inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta},
apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta},
apiextensionsfeatures.CustomResourceSubresources: {Default: false, PreRelease: utilfeature.Alpha},
// features that enable backwards compatibility but are scheduled to be removed
ServiceProxyAllowExternalIPs: {Default: false, PreRelease: utilfeature.Deprecated},

View file

@ -1066,7 +1066,7 @@ func (m *PodSandboxStatus) GetAnnotations() map[string]string {
type PodSandboxStatusResponse struct {
// Status of the PodSandbox.
Status *PodSandboxStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"`
// Info is extra information of the PodSandbox. The key could be abitrary string, and
// Info is extra information of the PodSandbox. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. network namespace for linux container based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -2401,7 +2401,7 @@ func (m *ContainerStatus) GetLogPath() string {
type ContainerStatusResponse struct {
// Status of the container.
Status *ContainerStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"`
// Info is extra information of the Container. The key could be abitrary string, and
// Info is extra information of the Container. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. pid for linux container based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -2870,7 +2870,7 @@ func (m *ImageStatusRequest) GetVerbose() bool {
type ImageStatusResponse struct {
// Status of the image.
Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"`
// Info is extra information of the Image. The key could be abitrary string, and
// Info is extra information of the Image. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful
// for debug, e.g. image config for oci image based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -3173,7 +3173,7 @@ func (m *StatusRequest) GetVerbose() bool {
type StatusResponse struct {
// Status of the Runtime.
Status *RuntimeStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"`
// Info is extra information of the Runtime. The key could be abitrary string, and
// Info is extra information of the Runtime. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. plugins used by the container runtime.
// It should only be returned non-empty when Verbose is true.
@ -3739,7 +3739,9 @@ type RuntimeServiceClient interface {
UpdateContainerResources(ctx context.Context, in *UpdateContainerResourcesRequest, opts ...grpc.CallOption) (*UpdateContainerResourcesResponse, error)
// ReopenContainerLog asks runtime to reopen the stdout/stderr log file
// for the container. This is often called after the log file has been
// rotated.
// rotated. If the container is not running, container runtime can choose
// to either create a new log file and return nil, or return an error.
// Once it returns error, new container log file MUST NOT be created.
ReopenContainerLog(ctx context.Context, in *ReopenContainerLogRequest, opts ...grpc.CallOption) (*ReopenContainerLogResponse, error)
// ExecSync runs a command in a container synchronously.
ExecSync(ctx context.Context, in *ExecSyncRequest, opts ...grpc.CallOption) (*ExecSyncResponse, error)
@ -4017,7 +4019,9 @@ type RuntimeServiceServer interface {
UpdateContainerResources(context.Context, *UpdateContainerResourcesRequest) (*UpdateContainerResourcesResponse, error)
// ReopenContainerLog asks runtime to reopen the stdout/stderr log file
// for the container. This is often called after the log file has been
// rotated.
// rotated. If the container is not running, container runtime can choose
// to either create a new log file and return nil, or return an error.
// Once it returns error, new container log file MUST NOT be created.
ReopenContainerLog(context.Context, *ReopenContainerLogRequest) (*ReopenContainerLogResponse, error)
// ExecSync runs a command in a container synchronously.
ExecSync(context.Context, *ExecSyncRequest) (*ExecSyncResponse, error)

View file

@ -66,7 +66,9 @@ service RuntimeService {
rpc UpdateContainerResources(UpdateContainerResourcesRequest) returns (UpdateContainerResourcesResponse) {}
// ReopenContainerLog asks runtime to reopen the stdout/stderr log file
// for the container. This is often called after the log file has been
// rotated.
// rotated. If the container is not running, container runtime can choose
// to either create a new log file and return nil, or return an error.
// Once it returns error, new container log file MUST NOT be created.
rpc ReopenContainerLog(ReopenContainerLogRequest) returns (ReopenContainerLogResponse) {}
// ExecSync runs a command in a container synchronously.
@ -414,7 +416,7 @@ message PodSandboxStatus {
message PodSandboxStatusResponse {
// Status of the PodSandbox.
PodSandboxStatus status = 1;
// Info is extra information of the PodSandbox. The key could be abitrary string, and
// Info is extra information of the PodSandbox. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. network namespace for linux container based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -852,7 +854,7 @@ message ContainerStatus {
message ContainerStatusResponse {
// Status of the container.
ContainerStatus status = 1;
// Info is extra information of the Container. The key could be abitrary string, and
// Info is extra information of the Container. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. pid for linux container based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -993,7 +995,7 @@ message ImageStatusRequest {
message ImageStatusResponse {
// Status of the image.
Image image = 1;
// Info is extra information of the Image. The key could be abitrary string, and
// Info is extra information of the Image. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful
// for debug, e.g. image config for oci image based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -1088,7 +1090,7 @@ message StatusRequest {
message StatusResponse {
// Status of the Runtime.
RuntimeStatus status = 1;
// Info is extra information of the Runtime. The key could be abitrary string, and
// Info is extra information of the Runtime. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. plugins used by the container runtime.
// It should only be returned non-empty when Verbose is true.

View file

@ -1,25 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apis
const (
// When kubelet is started with the "external" cloud provider, then
// it sets this annotation on the node to denote an ip address set from the
// cmd line flag. This ip is verified with the cloudprovider as valid by
// the cloud-controller-manager
AnnotationProvidedIPAddr = "alpha.kubernetes.io/provided-node-ip"
)

View file

@ -1,33 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apis
const (
LabelHostname = "kubernetes.io/hostname"
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
LabelMultiZoneDelimiter = "__"
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
LabelInstanceType = "beta.kubernetes.io/instance-type"
LabelOS = "beta.kubernetes.io/os"
LabelArch = "beta.kubernetes.io/arch"
)
// When the --failure-domains scheduler flag is not specified,
// DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity.
var DefaultFailureDomains string = LabelHostname + "," + LabelZoneFailureDomain + "," + LabelZoneRegion

View file

@ -19,6 +19,8 @@ package container
import (
"fmt"
"time"
"github.com/golang/glog"
)
// Specified a policy for garbage collecting containers.
@ -80,5 +82,6 @@ func (cgc *realContainerGC) GarbageCollect() error {
}
func (cgc *realContainerGC) DeleteAllUnusedContainers() error {
glog.Infof("attempting to delete unused containers")
return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), true)
}

View file

@ -26,9 +26,13 @@ import (
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/util/conntrack"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilnet "k8s.io/kubernetes/pkg/util/net"
"k8s.io/utils/exec"
)
// HostPortManager is an interface for adding and removing hostport for a given pod sandbox.
@ -44,18 +48,26 @@ type HostPortManager interface {
}
type hostportManager struct {
hostPortMap map[hostport]closeable
iptables utiliptables.Interface
portOpener hostportOpener
mu sync.Mutex
hostPortMap map[hostport]closeable
execer exec.Interface
conntrackFound bool
iptables utiliptables.Interface
portOpener hostportOpener
mu sync.Mutex
}
func NewHostportManager(iptables utiliptables.Interface) HostPortManager {
return &hostportManager{
h := &hostportManager{
hostPortMap: make(map[hostport]closeable),
execer: exec.New(),
iptables: iptables,
portOpener: openLocalPort,
}
h.conntrackFound = conntrack.Exists(h.execer)
if !h.conntrackFound {
glog.Warningf("The binary conntrack is not installed, this can cause failures in network connection cleanup.")
}
return h
}
func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInterfaceName string) (err error) {
@ -103,10 +115,14 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt
}
newChains := []utiliptables.Chain{}
conntrackPortsToRemove := []int{}
for _, pm := range hostportMappings {
protocol := strings.ToLower(string(pm.Protocol))
chain := getHostportChain(id, pm)
newChains = append(newChains, chain)
if pm.Protocol == v1.ProtocolUDP {
conntrackPortsToRemove = append(conntrackPortsToRemove, int(pm.HostPort))
}
// Add new hostport chain
writeLine(natChains, utiliptables.MakeChainLine(chain))
@ -150,6 +166,21 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt
// clean up opened host port if encounter any error
return utilerrors.NewAggregate([]error{err, hm.closeHostports(hostportMappings)})
}
isIpv6 := utilnet.IsIPv6(podPortMapping.IP)
// Remove conntrack entries just after adding the new iptables rules. If the conntrack entry is removed along with
// the IP tables rule, it can be the case that the packets received by the node after iptables rule removal will
// create a new conntrack entry without any DNAT. That will result in blackhole of the traffic even after correct
// iptables rules have been added back.
if hm.execer != nil && hm.conntrackFound {
glog.Infof("Starting to delete udp conntrack entries: %v, isIPv6 - %v", conntrackPortsToRemove, isIpv6)
for _, port := range conntrackPortsToRemove {
err = conntrack.ClearEntriesForPort(hm.execer, port, isIpv6, v1.ProtocolUDP)
if err != nil {
glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", port, err)
}
}
}
return nil
}

View file

@ -45,7 +45,7 @@ const (
// options contains details about which streams are required for
// port forwarding.
// All fields incldued in V4Options need to be expressed explicitly in the
// All fields included in V4Options need to be expressed explicitly in the
// CRI (pkg/kubelet/apis/cri/{version}/api.proto) PortForwardRequest.
type V4Options struct {
Ports []int32

View file

@ -22,6 +22,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeapi "k8s.io/kubernetes/pkg/apis/core"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
)
const (
@ -139,15 +140,16 @@ func (sp SyncPodType) String() string {
}
}
// IsCriticalPod returns true if the pod bears the critical pod annotation
// key. Both the rescheduler and the kubelet use this key to make admission
// and scheduling decisions.
// IsCriticalPod returns true if the pod bears the critical pod annotation key or if pod's priority is greater than
// or equal to SystemCriticalPriority. Both the rescheduler(deprecated in 1.10) and the kubelet use this function
// to make admission and scheduling decisions.
func IsCriticalPod(pod *v1.Pod) bool {
return IsCritical(pod.Namespace, pod.Annotations)
return IsCritical(pod.Namespace, pod.Annotations) || (pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(pod.Namespace, *pod.Spec.Priority))
}
// IsCritical returns true if parameters bear the critical pod annotation
// key. The DaemonSetController use this key directly to make scheduling decisions.
// TODO: @ravig - Deprecated. Remove this when we move to resolving critical pods based on priorityClassName.
func IsCritical(ns string, annotations map[string]string) bool {
// Critical pods are restricted to "kube-system" namespace as of now.
if ns != kubeapi.NamespaceSystem {
@ -159,3 +161,15 @@ func IsCritical(ns string, annotations map[string]string) bool {
}
return false
}
// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec.
func IsCriticalPodBasedOnPriority(ns string, priority int32) bool {
// Critical pods are restricted to "kube-system" namespace as of now.
if ns != kubeapi.NamespaceSystem {
return false
}
if priority >= schedulerapi.SystemCriticalPriority {
return true
}
return false
}

View file

@ -26,12 +26,12 @@ const (
// SchedulerPort is the default port for the scheduler status server.
// May be overridden by a flag at startup.
SchedulerPort = 10251
// ControllerManagerPort is the default port for the controller manager status server.
// InsecureKubeControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
ControllerManagerPort = 10252
// CloudControllerManagerPort is the default port for the cloud controller manager server.
InsecureKubeControllerManagerPort = 10252
// InsecureCloudControllerManagerPort is the default port for the cloud controller manager server.
// This value may be overridden by a flag at startup.
CloudControllerManagerPort = 10253
InsecureCloudControllerManagerPort = 10253
// KubeletReadOnlyPort exposes basic read-only services from the kubelet.
// May be overridden by a flag at startup.
// This is necessary for heapster to collect monitoring stats from the kubelet

314
vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go generated vendored Normal file
View file

@ -0,0 +1,314 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"net"
"reflect"
"strconv"
"sync"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
api "k8s.io/kubernetes/pkg/apis/core"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
utilnet "k8s.io/kubernetes/pkg/util/net"
)
// BaseEndpointInfo contains base information that defines an endpoint.
// This could be used directly by proxier while processing endpoints,
// or can be used for constructing a more specific EndpointInfo struct
// defined by the proxier if needed.
type BaseEndpointInfo struct {
Endpoint string // TODO: should be an endpointString type
// IsLocal indicates whether the endpoint is running in same host as kube-proxy.
IsLocal bool
}
var _ Endpoint = &BaseEndpointInfo{}
// String is part of proxy.Endpoint interface.
func (info *BaseEndpointInfo) String() string {
return info.Endpoint
}
// GetIsLocal is part of proxy.Endpoint interface.
func (info *BaseEndpointInfo) GetIsLocal() bool {
return info.IsLocal
}
// IP returns just the IP part of the endpoint, it's a part of proxy.Endpoint interface.
func (info *BaseEndpointInfo) IP() string {
return utilproxy.IPPart(info.Endpoint)
}
// Port returns just the Port part of the endpoint.
func (info *BaseEndpointInfo) Port() (int, error) {
return utilproxy.PortPart(info.Endpoint)
}
// Equal is part of proxy.Endpoint interface.
func (info *BaseEndpointInfo) Equal(other Endpoint) bool {
return info.String() == other.String() && info.GetIsLocal() == other.GetIsLocal()
}
func newBaseEndpointInfo(IP string, port int, isLocal bool) *BaseEndpointInfo {
return &BaseEndpointInfo{
Endpoint: net.JoinHostPort(IP, strconv.Itoa(port)),
IsLocal: isLocal,
}
}
type makeEndpointFunc func(info *BaseEndpointInfo) Endpoint
// EndpointChangeTracker carries state about uncommitted changes to an arbitrary number of
// Endpoints, keyed by their namespace and name.
type EndpointChangeTracker struct {
// lock protects items.
lock sync.Mutex
// hostname is the host where kube-proxy is running.
hostname string
// items maps a service to is endpointsChange.
items map[types.NamespacedName]*endpointsChange
// makeEndpointInfo allows proxier to inject customized information when processing endpoint.
makeEndpointInfo makeEndpointFunc
// isIPv6Mode indicates if change tracker is under IPv6/IPv4 mode. Nil means not applicable.
isIPv6Mode *bool
recorder record.EventRecorder
}
// NewEndpointChangeTracker initializes an EndpointsChangeMap
func NewEndpointChangeTracker(hostname string, makeEndpointInfo makeEndpointFunc, isIPv6Mode *bool, recorder record.EventRecorder) *EndpointChangeTracker {
return &EndpointChangeTracker{
hostname: hostname,
items: make(map[types.NamespacedName]*endpointsChange),
makeEndpointInfo: makeEndpointInfo,
isIPv6Mode: isIPv6Mode,
recorder: recorder,
}
}
// Update updates given service's endpoints change map based on the <previous, current> endpoints pair. It returns true
// if items changed, otherwise return false. Update can be used to add/update/delete items of EndpointsChangeMap. For example,
// Add item
// - pass <nil, endpoints> as the <previous, current> pair.
// Update item
// - pass <oldEndpoints, endpoints> as the <previous, current> pair.
// Delete item
// - pass <endpoints, nil> as the <previous, current> pair.
func (ect *EndpointChangeTracker) Update(previous, current *api.Endpoints) bool {
endpoints := current
if endpoints == nil {
endpoints = previous
}
// previous == nil && current == nil is unexpected, we should return false directly.
if endpoints == nil {
return false
}
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
ect.lock.Lock()
defer ect.lock.Unlock()
change, exists := ect.items[namespacedName]
if !exists {
change = &endpointsChange{}
change.previous = ect.endpointsToEndpointsMap(previous)
ect.items[namespacedName] = change
}
change.current = ect.endpointsToEndpointsMap(current)
// if change.previous equal to change.current, it means no change
if reflect.DeepEqual(change.previous, change.current) {
delete(ect.items, namespacedName)
}
return len(ect.items) > 0
}
// endpointsChange contains all changes to endpoints that happened since proxy rules were synced. For a single object,
// changes are accumulated, i.e. previous is state from before applying the changes,
// current is state after applying the changes.
type endpointsChange struct {
previous EndpointsMap
current EndpointsMap
}
// UpdateEndpointMapResult is the updated results after applying endpoints changes.
type UpdateEndpointMapResult struct {
// HCEndpointsLocalIPSize maps an endpoints name to the length of its local IPs.
HCEndpointsLocalIPSize map[types.NamespacedName]int
// StaleEndpoints identifies if an endpoints service pair is stale.
StaleEndpoints []ServiceEndpoint
// StaleServiceNames identifies if a service is stale.
StaleServiceNames []ServicePortName
}
// UpdateEndpointsMap updates endpointsMap base on the given changes.
func UpdateEndpointsMap(endpointsMap EndpointsMap, changes *EndpointChangeTracker) (result UpdateEndpointMapResult) {
result.StaleEndpoints = make([]ServiceEndpoint, 0)
result.StaleServiceNames = make([]ServicePortName, 0)
endpointsMap.apply(changes, &result.StaleEndpoints, &result.StaleServiceNames)
// TODO: If this will appear to be computationally expensive, consider
// computing this incrementally similarly to endpointsMap.
result.HCEndpointsLocalIPSize = make(map[types.NamespacedName]int)
localIPs := GetLocalEndpointIPs(endpointsMap)
for nsn, ips := range localIPs {
result.HCEndpointsLocalIPSize[nsn] = len(ips)
}
return result
}
// EndpointsMap maps a service name to a list of all its Endpoints.
type EndpointsMap map[ServicePortName][]Endpoint
// endpointsToEndpointsMap translates single Endpoints object to EndpointsMap.
// This function is used for incremental updated of endpointsMap.
//
// NOTE: endpoints object should NOT be modified.
func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *api.Endpoints) EndpointsMap {
if endpoints == nil {
return nil
}
endpointsMap := make(EndpointsMap)
// We need to build a map of portname -> all ip:ports for that
// portname. Explode Endpoints.Subsets[*] into this structure.
for i := range endpoints.Subsets {
ss := &endpoints.Subsets[i]
for i := range ss.Ports {
port := &ss.Ports[i]
if port.Port == 0 {
glog.Warningf("ignoring invalid endpoint port %s", port.Name)
continue
}
svcPortName := ServicePortName{
NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name},
Port: port.Name,
}
for i := range ss.Addresses {
addr := &ss.Addresses[i]
if addr.IP == "" {
glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name)
continue
}
// Filter out the incorrect IP version case.
// Any endpoint port that contains incorrect IP version will be ignored.
if ect.isIPv6Mode != nil && utilnet.IsIPv6String(addr.IP) != *ect.isIPv6Mode {
// Emit event on the corresponding service which had a different
// IP version than the endpoint.
utilproxy.LogAndEmitIncorrectIPVersionEvent(ect.recorder, "endpoints", addr.IP, endpoints.Name, endpoints.Namespace, "")
continue
}
isLocal := addr.NodeName != nil && *addr.NodeName == ect.hostname
baseEndpointInfo := newBaseEndpointInfo(addr.IP, int(port.Port), isLocal)
if ect.makeEndpointInfo != nil {
endpointsMap[svcPortName] = append(endpointsMap[svcPortName], ect.makeEndpointInfo(baseEndpointInfo))
} else {
endpointsMap[svcPortName] = append(endpointsMap[svcPortName], baseEndpointInfo)
}
}
if glog.V(3) {
newEPList := []string{}
for _, ep := range endpointsMap[svcPortName] {
newEPList = append(newEPList, ep.String())
}
glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList)
}
}
}
return endpointsMap
}
// apply the changes to EndpointsMap and updates stale endpoints and service-endpoints pair. The `staleEndpoints` argument
// is passed in to store the stale udp endpoints and `staleServiceNames` argument is passed in to store the stale udp service.
// The changes map is cleared after applying them.
func (endpointsMap EndpointsMap) apply(changes *EndpointChangeTracker, staleEndpoints *[]ServiceEndpoint, staleServiceNames *[]ServicePortName) {
if changes == nil {
return
}
changes.lock.Lock()
defer changes.lock.Unlock()
for _, change := range changes.items {
endpointsMap.Unmerge(change.previous)
endpointsMap.Merge(change.current)
detectStaleConnections(change.previous, change.current, staleEndpoints, staleServiceNames)
}
changes.items = make(map[types.NamespacedName]*endpointsChange)
}
// Merge ensures that the current EndpointsMap contains all <service, endpoints> pairs from the EndpointsMap passed in.
func (em EndpointsMap) Merge(other EndpointsMap) {
for svcPortName := range other {
em[svcPortName] = other[svcPortName]
}
}
// Unmerge removes the <service, endpoints> pairs from the current EndpointsMap which are contained in the EndpointsMap passed in.
func (em EndpointsMap) Unmerge(other EndpointsMap) {
for svcPortName := range other {
delete(em, svcPortName)
}
}
// GetLocalEndpointIPs returns endpoints IPs if given endpoint is local - local means the endpoint is running in same host as kube-proxy.
func GetLocalEndpointIPs(endpointsMap EndpointsMap) map[types.NamespacedName]sets.String {
localIPs := make(map[types.NamespacedName]sets.String)
for svcPortName, epList := range endpointsMap {
for _, ep := range epList {
if ep.GetIsLocal() {
nsn := svcPortName.NamespacedName
if localIPs[nsn] == nil {
localIPs[nsn] = sets.NewString()
}
localIPs[nsn].Insert(ep.IP())
}
}
}
return localIPs
}
// detectStaleConnections modifies <staleEndpoints> and <staleServices> with detected stale connections. <staleServiceNames>
// is used to store stale udp service in order to clear udp conntrack later.
func detectStaleConnections(oldEndpointsMap, newEndpointsMap EndpointsMap, staleEndpoints *[]ServiceEndpoint, staleServiceNames *[]ServicePortName) {
for svcPortName, epList := range oldEndpointsMap {
for _, ep := range epList {
stale := true
for i := range newEndpointsMap[svcPortName] {
if newEndpointsMap[svcPortName][i].Equal(ep) {
stale = false
break
}
}
if stale {
glog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.String())
*staleEndpoints = append(*staleEndpoints, ServiceEndpoint{Endpoint: ep.String(), ServicePortName: svcPortName})
}
}
}
for svcPortName, epList := range newEndpointsMap {
// For udp service, if its backend changes from 0 to non-0. There may exist a conntrack entry that could blackhole traffic to the service.
if len(epList) > 0 && len(oldEndpointsMap[svcPortName]) == 0 {
*staleServiceNames = append(*staleServiceNames, svcPortName)
}
}
}

File diff suppressed because it is too large Load diff

344
vendor/k8s.io/kubernetes/pkg/proxy/service.go generated vendored Normal file
View file

@ -0,0 +1,344 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"fmt"
"net"
"reflect"
"strings"
"sync"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
apiservice "k8s.io/kubernetes/pkg/api/service"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
utilnet "k8s.io/kubernetes/pkg/util/net"
)
// BaseServiceInfo contains base information that defines a service.
// This could be used directly by proxier while processing services,
// or can be used for constructing a more specific ServiceInfo struct
// defined by the proxier if needed.
type BaseServiceInfo struct {
ClusterIP net.IP
Port int
Protocol api.Protocol
NodePort int
LoadBalancerStatus api.LoadBalancerStatus
SessionAffinityType api.ServiceAffinity
StickyMaxAgeSeconds int
ExternalIPs []string
LoadBalancerSourceRanges []string
HealthCheckNodePort int
OnlyNodeLocalEndpoints bool
}
var _ ServicePort = &BaseServiceInfo{}
// String is part of ServicePort interface.
func (info *BaseServiceInfo) String() string {
return fmt.Sprintf("%s:%d/%s", info.ClusterIP, info.Port, info.Protocol)
}
// ClusterIPString is part of ServicePort interface.
func (info *BaseServiceInfo) ClusterIPString() string {
return info.ClusterIP.String()
}
// GetProtocol is part of ServicePort interface.
func (info *BaseServiceInfo) GetProtocol() api.Protocol {
return info.Protocol
}
// GetHealthCheckNodePort is part of ServicePort interface.
func (info *BaseServiceInfo) GetHealthCheckNodePort() int {
return info.HealthCheckNodePort
}
func (sct *ServiceChangeTracker) newBaseServiceInfo(port *api.ServicePort, service *api.Service) *BaseServiceInfo {
onlyNodeLocalEndpoints := false
if apiservice.RequestsOnlyLocalTraffic(service) {
onlyNodeLocalEndpoints = true
}
var stickyMaxAgeSeconds int
if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
// Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP
stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
}
info := &BaseServiceInfo{
ClusterIP: net.ParseIP(service.Spec.ClusterIP),
Port: int(port.Port),
Protocol: port.Protocol,
NodePort: int(port.NodePort),
// Deep-copy in case the service instance changes
LoadBalancerStatus: *helper.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer),
SessionAffinityType: service.Spec.SessionAffinity,
StickyMaxAgeSeconds: stickyMaxAgeSeconds,
OnlyNodeLocalEndpoints: onlyNodeLocalEndpoints,
}
if sct.isIPv6Mode == nil {
info.ExternalIPs = make([]string, len(service.Spec.ExternalIPs))
info.LoadBalancerSourceRanges = make([]string, len(service.Spec.LoadBalancerSourceRanges))
copy(info.LoadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges)
copy(info.ExternalIPs, service.Spec.ExternalIPs)
} else {
// Filter out the incorrect IP version case.
// If ExternalIPs and LoadBalancerSourceRanges on service contains incorrect IP versions,
// only filter out the incorrect ones.
var incorrectIPs []string
info.ExternalIPs, incorrectIPs = utilnet.FilterIncorrectIPVersion(service.Spec.ExternalIPs, *sct.isIPv6Mode)
if len(incorrectIPs) > 0 {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "externalIPs", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID)
}
info.LoadBalancerSourceRanges, incorrectIPs = utilnet.FilterIncorrectCIDRVersion(service.Spec.LoadBalancerSourceRanges, *sct.isIPv6Mode)
if len(incorrectIPs) > 0 {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "loadBalancerSourceRanges", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID)
}
}
if apiservice.NeedsHealthCheck(service) {
p := service.Spec.HealthCheckNodePort
if p == 0 {
glog.Errorf("Service %s/%s has no healthcheck nodeport", service.Namespace, service.Name)
} else {
info.HealthCheckNodePort = int(p)
}
}
return info
}
type makeServicePortFunc func(*api.ServicePort, *api.Service, *BaseServiceInfo) ServicePort
// serviceChange contains all changes to services that happened since proxy rules were synced. For a single object,
// changes are accumulated, i.e. previous is state from before applying the changes,
// current is state after applying all of the changes.
type serviceChange struct {
previous ServiceMap
current ServiceMap
}
// ServiceChangeTracker carries state about uncommitted changes to an arbitrary number of
// Services, keyed by their namespace and name.
type ServiceChangeTracker struct {
// lock protects items.
lock sync.Mutex
// items maps a service to its serviceChange.
items map[types.NamespacedName]*serviceChange
// makeServiceInfo allows proxier to inject customized information when processing service.
makeServiceInfo makeServicePortFunc
// isIPv6Mode indicates if change tracker is under IPv6/IPv4 mode. Nil means not applicable.
isIPv6Mode *bool
recorder record.EventRecorder
}
// NewServiceChangeTracker initializes a ServiceChangeTracker
func NewServiceChangeTracker(makeServiceInfo makeServicePortFunc, isIPv6Mode *bool, recorder record.EventRecorder) *ServiceChangeTracker {
return &ServiceChangeTracker{
items: make(map[types.NamespacedName]*serviceChange),
makeServiceInfo: makeServiceInfo,
isIPv6Mode: isIPv6Mode,
recorder: recorder,
}
}
// Update updates given service's change map based on the <previous, current> service pair. It returns true if items changed,
// otherwise return false. Update can be used to add/update/delete items of ServiceChangeMap. For example,
// Add item
// - pass <nil, service> as the <previous, current> pair.
// Update item
// - pass <oldService, service> as the <previous, current> pair.
// Delete item
// - pass <service, nil> as the <previous, current> pair.
func (sct *ServiceChangeTracker) Update(previous, current *api.Service) bool {
svc := current
if svc == nil {
svc = previous
}
// previous == nil && current == nil is unexpected, we should return false directly.
if svc == nil {
return false
}
namespacedName := types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name}
sct.lock.Lock()
defer sct.lock.Unlock()
change, exists := sct.items[namespacedName]
if !exists {
change = &serviceChange{}
change.previous = sct.serviceToServiceMap(previous)
sct.items[namespacedName] = change
}
change.current = sct.serviceToServiceMap(current)
// if change.previous equal to change.current, it means no change
if reflect.DeepEqual(change.previous, change.current) {
delete(sct.items, namespacedName)
}
return len(sct.items) > 0
}
// UpdateServiceMapResult is the updated results after applying service changes.
type UpdateServiceMapResult struct {
// HCServiceNodePorts is a map of Service names to node port numbers which indicate the health of that Service on this Node.
// The value(uint16) of HCServices map is the service health check node port.
HCServiceNodePorts map[types.NamespacedName]uint16
// UDPStaleClusterIP holds stale (no longer assigned to a Service) Service IPs that had UDP ports.
// Callers can use this to abort timeout-waits or clear connection-tracking information.
UDPStaleClusterIP sets.String
}
// UpdateServiceMap updates ServiceMap based on the given changes.
func UpdateServiceMap(serviceMap ServiceMap, changes *ServiceChangeTracker) (result UpdateServiceMapResult) {
result.UDPStaleClusterIP = sets.NewString()
serviceMap.apply(changes, result.UDPStaleClusterIP)
// TODO: If this will appear to be computationally expensive, consider
// computing this incrementally similarly to serviceMap.
result.HCServiceNodePorts = make(map[types.NamespacedName]uint16)
for svcPortName, info := range serviceMap {
if info.GetHealthCheckNodePort() != 0 {
result.HCServiceNodePorts[svcPortName.NamespacedName] = uint16(info.GetHealthCheckNodePort())
}
}
return result
}
// ServiceMap maps a service to its ServicePort.
type ServiceMap map[ServicePortName]ServicePort
// serviceToServiceMap translates a single Service object to a ServiceMap.
//
// NOTE: service object should NOT be modified.
func (sct *ServiceChangeTracker) serviceToServiceMap(service *api.Service) ServiceMap {
if service == nil {
return nil
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if utilproxy.ShouldSkipService(svcName, service) {
return nil
}
if len(service.Spec.ClusterIP) != 0 {
// Filter out the incorrect IP version case.
// If ClusterIP on service has incorrect IP version, service itself will be ignored.
if sct.isIPv6Mode != nil && utilnet.IsIPv6String(service.Spec.ClusterIP) != *sct.isIPv6Mode {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "clusterIP", service.Spec.ClusterIP, service.Namespace, service.Name, service.UID)
return nil
}
}
serviceMap := make(ServiceMap)
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
svcPortName := ServicePortName{NamespacedName: svcName, Port: servicePort.Name}
baseSvcInfo := sct.newBaseServiceInfo(servicePort, service)
if sct.makeServiceInfo != nil {
serviceMap[svcPortName] = sct.makeServiceInfo(servicePort, service, baseSvcInfo)
} else {
serviceMap[svcPortName] = baseSvcInfo
}
}
return serviceMap
}
// apply the changes to ServiceMap and update the stale udp cluster IP set. The UDPStaleClusterIP argument is passed in to store the
// udp protocol service cluster ip when service is deleted from the ServiceMap.
func (serviceMap *ServiceMap) apply(changes *ServiceChangeTracker, UDPStaleClusterIP sets.String) {
changes.lock.Lock()
defer changes.lock.Unlock()
for _, change := range changes.items {
serviceMap.merge(change.current)
// filter out the Update event of current changes from previous changes before calling unmerge() so that can
// skip deleting the Update events.
change.previous.filter(change.current)
serviceMap.unmerge(change.previous, UDPStaleClusterIP)
}
// clear changes after applying them to ServiceMap.
changes.items = make(map[types.NamespacedName]*serviceChange)
return
}
// merge adds other ServiceMap's elements to current ServiceMap.
// If collision, other ALWAYS win. Otherwise add the other to current.
// In other words, if some elements in current collisions with other, update the current by other.
// It returns a string type set which stores all the newly merged services' identifier, ServicePortName.String(), to help users
// tell if a service is deleted or updated.
// The returned value is one of the arguments of ServiceMap.unmerge().
// ServiceMap A Merge ServiceMap B will do following 2 things:
// * update ServiceMap A.
// * produce a string set which stores all other ServiceMap's ServicePortName.String().
// For example,
// - A{}
// - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}}
// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}}
// - produce string set {"ns/cluster-ip:http"}
// - A{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 345, "UDP"}}
// - B{{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}}
// - A updated to be {{"ns", "cluster-ip", "http"}: {"172.16.55.10", 1234, "TCP"}}
// - produce string set {"ns/cluster-ip:http"}
func (sm *ServiceMap) merge(other ServiceMap) sets.String {
// existingPorts is going to store all identifiers of all services in `other` ServiceMap.
existingPorts := sets.NewString()
for svcPortName, info := range other {
// Take ServicePortName.String() as the newly merged service's identifier and put it into existingPorts.
existingPorts.Insert(svcPortName.String())
_, exists := (*sm)[svcPortName]
if !exists {
glog.V(1).Infof("Adding new service port %q at %s", svcPortName, info.String())
} else {
glog.V(1).Infof("Updating existing service port %q at %s", svcPortName, info.String())
}
(*sm)[svcPortName] = info
}
return existingPorts
}
// filter filters out elements from ServiceMap base on given ports string sets.
func (sm *ServiceMap) filter(other ServiceMap) {
for svcPortName := range *sm {
// skip the delete for Update event.
if _, ok := other[svcPortName]; ok {
delete(*sm, svcPortName)
}
}
}
// unmerge deletes all other ServiceMap's elements from current ServiceMap. We pass in the UDPStaleClusterIP strings sets
// for storing the stale udp service cluster IPs. We will clear stale udp connection base on UDPStaleClusterIP later
func (sm *ServiceMap) unmerge(other ServiceMap, UDPStaleClusterIP sets.String) {
for svcPortName := range other {
info, exists := (*sm)[svcPortName]
if exists {
glog.V(1).Infof("Removing service port %q", svcPortName)
if info.GetProtocol() == api.ProtocolUDP {
UDPStaleClusterIP.Insert(info.ClusterIPString())
}
delete(*sm, svcPortName)
} else {
glog.Errorf("Service port %q doesn't exists", svcPortName)
}
}
}

View file

@ -20,11 +20,12 @@ import (
"fmt"
"k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/core"
)
// ProxyProvider is the interface provided by proxier implementations.
type ProxyProvider interface {
// Sync immediately synchronizes the ProxyProvider's current state to iptables.
// Sync immediately synchronizes the ProxyProvider's current state to proxy rules.
Sync()
// SyncLoop runs periodic work.
// This is expected to run as a goroutine or as the main loop of the app.
@ -33,7 +34,7 @@ type ProxyProvider interface {
}
// ServicePortName carries a namespace + name + portname. This is the unique
// identfier for a load-balanced service.
// identifier for a load-balanced service.
type ServicePortName struct {
types.NamespacedName
Port string
@ -42,3 +43,37 @@ type ServicePortName struct {
func (spn ServicePortName) String() string {
return fmt.Sprintf("%s:%s", spn.NamespacedName.String(), spn.Port)
}
// ServicePort is an interface which abstracts information about a service.
type ServicePort interface {
// String returns service string. An example format can be: `IP:Port/Protocol`.
String() string
// ClusterIPString returns service cluster IP in string format.
ClusterIPString() string
// GetProtocol returns service protocol.
GetProtocol() api.Protocol
// GetHealthCheckNodePort returns service health check node port if present. If return 0, it means not present.
GetHealthCheckNodePort() int
}
// Endpoint in an interface which abstracts information about an endpoint.
// TODO: Rename functions to be consistent with ServicePort.
type Endpoint interface {
// String returns endpoint string. An example format can be: `IP:Port`.
// We take the returned value as ServiceEndpoint.Endpoint.
String() string
// GetIsLocal returns true if the endpoint is running in same host as kube-proxy, otherwise returns false.
GetIsLocal() bool
// IP returns IP part of the endpoint.
IP() string
// Port returns the Port part of the endpoint.
Port() (int, error)
// Equal checks if two endpoints are equal.
Equal(Endpoint) bool
}
// ServiceEndpoint is used to identify a service and one of its endpoint pair.
type ServiceEndpoint struct {
Endpoint string
ServicePortName ServicePortName
}

View file

@ -47,6 +47,7 @@ func IPPart(s string) string {
return ""
}
// PortPart returns just the port part of an endpoint string.
func PortPart(s string) (int, error) {
// Must be IP:port
_, port, err := net.SplitHostPort(s)

45
vendor/k8s.io/kubernetes/pkg/proxy/util/network.go generated vendored Normal file
View file

@ -0,0 +1,45 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"net"
)
// NetworkInterfacer defines an interface for several net library functions. Production
// code will forward to net library functions, and unit tests will override the methods
// for testing purposes.
type NetworkInterfacer interface {
Addrs(intf *net.Interface) ([]net.Addr, error)
Interfaces() ([]net.Interface, error)
}
// RealNetwork implements the NetworkInterfacer interface for production code, just
// wrapping the underlying net library function calls.
type RealNetwork struct{}
// Addrs wraps net.Interface.Addrs(), it's a part of NetworkInterfacer interface.
func (_ RealNetwork) Addrs(intf *net.Interface) ([]net.Addr, error) {
return intf.Addrs()
}
// Interfaces wraps net.Interfaces(), it's a part of NetworkInterfacer interface.
func (_ RealNetwork) Interfaces() ([]net.Interface, error) {
return net.Interfaces()
}
var _ NetworkInterfacer = &RealNetwork{}

View file

@ -17,15 +17,32 @@ limitations under the License.
package util
import (
"fmt"
"net"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
utilnet "k8s.io/kubernetes/pkg/util/net"
"github.com/golang/glog"
)
const (
IPv4ZeroCIDR = "0.0.0.0/0"
IPv6ZeroCIDR = "::/0"
)
func IsZeroCIDR(cidr string) bool {
if cidr == IPv4ZeroCIDR || cidr == IPv6ZeroCIDR {
return true
}
return false
}
func IsLocalIP(ip string) (bool, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
@ -56,3 +73,76 @@ func ShouldSkipService(svcName types.NamespacedName, service *api.Service) bool
}
return false
}
// GetNodeAddresses return all matched node IP addresses based on given cidr slice.
// Some callers, e.g. IPVS proxier, need concrete IPs, not ranges, which is why this exists.
// NetworkInterfacer is injected for test purpose.
// We expect the cidrs passed in is already validated.
// Given an empty input `[]`, it will return `0.0.0.0/0` and `::/0` directly.
// If multiple cidrs is given, it will return the minimal IP sets, e.g. given input `[1.2.0.0/16, 0.0.0.0/0]`, it will
// only return `0.0.0.0/0`.
// NOTE: GetNodeAddresses only accepts CIDRs, if you want concrete IPs, e.g. 1.2.3.4, then the input should be 1.2.3.4/32.
func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error) {
uniqueAddressList := sets.NewString()
if len(cidrs) == 0 {
uniqueAddressList.Insert(IPv4ZeroCIDR)
uniqueAddressList.Insert(IPv6ZeroCIDR)
return uniqueAddressList, nil
}
// First round of iteration to pick out `0.0.0.0/0` or `::/0` for the sake of excluding non-zero IPs.
for _, cidr := range cidrs {
if IsZeroCIDR(cidr) {
uniqueAddressList.Insert(cidr)
}
}
// Second round of iteration to parse IPs based on cidr.
for _, cidr := range cidrs {
if IsZeroCIDR(cidr) {
continue
}
_, ipNet, _ := net.ParseCIDR(cidr)
itfs, err := nw.Interfaces()
if err != nil {
return nil, fmt.Errorf("error listing all interfaces from host, error: %v", err)
}
for _, itf := range itfs {
addrs, err := nw.Addrs(&itf)
if err != nil {
return nil, fmt.Errorf("error getting address from interface %s, error: %v", itf.Name, err)
}
for _, addr := range addrs {
if addr == nil {
continue
}
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
return nil, fmt.Errorf("error parsing CIDR for interface %s, error: %v", itf.Name, err)
}
if ipNet.Contains(ip) {
if utilnet.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) {
uniqueAddressList.Insert(ip.String())
}
if !utilnet.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) {
uniqueAddressList.Insert(ip.String())
}
}
}
}
}
return uniqueAddressList, nil
}
// LogAndEmitIncorrectIPVersionEvent logs and emits incorrect IP version event.
func LogAndEmitIncorrectIPVersionEvent(recorder record.EventRecorder, fieldName, fieldValue, svcNamespace, svcName string, svcUID types.UID) {
errMsg := fmt.Sprintf("%s in %s has incorrect IP version", fieldValue, fieldName)
glog.Errorf("%s (service %s/%s).", errMsg, svcNamespace, svcName)
if recorder != nil {
recorder.Eventf(
&v1.ObjectReference{
Kind: "Service",
Name: svcName,
Namespace: svcNamespace,
UID: svcUID,
}, v1.EventTypeWarning, "KubeProxyIncorrectIPVersion", errMsg)
}
}

View file

@ -1,5 +1,5 @@
/*
Copyright 2015 The Kubernetes Authors.
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Contains utility code for use by volume plugins.
package util // import "k8s.io/kubernetes/pkg/volume/util"
// +k8s:deepcopy-gen=package
// Package api contains scheduler API objects.
package api // import "k8s.io/kubernetes/pkg/scheduler/api"

55
vendor/k8s.io/kubernetes/pkg/scheduler/api/register.go generated vendored Normal file
View file

@ -0,0 +1,55 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
// TODO: remove this, scheduler should not have its own scheme.
var Scheme = runtime.NewScheme()
// SchemeGroupVersion is group version used to register these objects
// TODO this should be in the "scheduler" group
var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}
var (
// SchemeBuilder defines a SchemeBuilder object.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is used to add stored functions to scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
func init() {
if err := addKnownTypes(Scheme); err != nil {
// Programmer error.
panic(err)
}
}
func addKnownTypes(scheme *runtime.Scheme) error {
if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
return err
}
scheme.AddKnownTypes(SchemeGroupVersion,
&Policy{},
)
return nil
}

281
vendor/k8s.io/kubernetes/pkg/scheduler/api/types.go generated vendored Normal file
View file

@ -0,0 +1,281 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
restclient "k8s.io/client-go/rest"
)
const (
// MaxUint defines the max unsigned int value.
MaxUint = ^uint(0)
// MaxInt defines the max signed int value.
MaxInt = int(MaxUint >> 1)
// MaxTotalPriority defines the max total priority value.
MaxTotalPriority = MaxInt
// MaxPriority defines the max priority value.
MaxPriority = 10
// MaxWeight defines the max weight value.
MaxWeight = MaxInt / MaxPriority
// HighestUserDefinablePriority is the highest priority for user defined priority classes. Priority values larger than 1 billion are reserved for Kubernetes system use.
HighestUserDefinablePriority = int32(1000000000)
// SystemCriticalPriority is the beginning of the range of priority values for critical system components.
SystemCriticalPriority = 2 * HighestUserDefinablePriority
// NOTE: In order to avoid conflict of names with user-defined priority classes, all the names must
// start with scheduling.SystemPriorityClassPrefix which is by default "system-".
SystemClusterCritical = "system-cluster-critical"
SystemNodeCritical = "system-node-critical"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Policy describes a struct of a policy resource in api.
type Policy struct {
metav1.TypeMeta
// Holds the information to configure the fit predicate functions.
// If unspecified, the default predicate functions will be applied.
// If empty list, all predicates (except the mandatory ones) will be
// bypassed.
Predicates []PredicatePolicy
// Holds the information to configure the priority functions.
// If unspecified, the default priority functions will be applied.
// If empty list, all priority functions will be bypassed.
Priorities []PriorityPolicy
// Holds the information to communicate with the extender(s)
ExtenderConfigs []ExtenderConfig
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
// corresponding to every RequiredDuringScheduling affinity rule.
// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100.
HardPodAffinitySymmetricWeight int32
// When AlwaysCheckAllPredicates is set to true, scheduler checks all
// the configured predicates even after one or more of them fails.
// When the flag is set to false, scheduler skips checking the rest
// of the predicates after it finds one predicate that failed.
AlwaysCheckAllPredicates bool
}
// PredicatePolicy describes a struct of a predicate policy.
type PredicatePolicy struct {
// Identifier of the predicate policy
// For a custom predicate, the name can be user-defined
// For the Kubernetes provided predicates, the name is the identifier of the pre-defined predicate
Name string
// Holds the parameters to configure the given predicate
Argument *PredicateArgument
}
// PriorityPolicy describes a struct of a priority policy.
type PriorityPolicy struct {
// Identifier of the priority policy
// For a custom priority, the name can be user-defined
// For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function
Name string
// The numeric multiplier for the node scores that the priority function generates
// The weight should be a positive integer
Weight int
// Holds the parameters to configure the given priority function
Argument *PriorityArgument
}
// PredicateArgument represents the arguments to configure predicate functions in scheduler policy configuration.
// Only one of its members may be specified
type PredicateArgument struct {
// The predicate that provides affinity for pods belonging to a service
// It uses a label to identify nodes that belong to the same "group"
ServiceAffinity *ServiceAffinity
// The predicate that checks whether a particular node has a certain label
// defined or not, regardless of value
LabelsPresence *LabelsPresence
}
// PriorityArgument represents the arguments to configure priority functions in scheduler policy configuration.
// Only one of its members may be specified
type PriorityArgument struct {
// The priority function that ensures a good spread (anti-affinity) for pods belonging to a service
// It uses a label to identify nodes that belong to the same "group"
ServiceAntiAffinity *ServiceAntiAffinity
// The priority function that checks whether a particular node has a certain label
// defined or not, regardless of value
LabelPreference *LabelPreference
}
// ServiceAffinity holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration.
type ServiceAffinity struct {
// The list of labels that identify node "groups"
// All of the labels should match for the node to be considered a fit for hosting the pod
Labels []string
}
// LabelsPresence holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration.
type LabelsPresence struct {
// The list of labels that identify node "groups"
// All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod
Labels []string
// The boolean flag that indicates whether the labels should be present or absent from the node
Presence bool
}
// ServiceAntiAffinity holds the parameters that are used to configure the corresponding priority function
type ServiceAntiAffinity struct {
// Used to identify node "groups"
Label string
}
// LabelPreference holds the parameters that are used to configure the corresponding priority function
type LabelPreference struct {
// Used to identify node "groups"
Label string
// This is a boolean flag
// If true, higher priority is given to nodes that have the label
// If false, higher priority is given to nodes that do not have the label
Presence bool
}
// ExtenderManagedResource describes the arguments of extended resources
// managed by an extender.
type ExtenderManagedResource struct {
// Name is the extended resource name.
Name v1.ResourceName
// IgnoredByScheduler indicates whether kube-scheduler should ignore this
// resource when applying predicates.
IgnoredByScheduler bool
}
// ExtenderConfig holds the parameters used to communicate with the extender. If a verb is unspecified/empty,
// it is assumed that the extender chose not to provide that extension.
type ExtenderConfig struct {
// URLPrefix at which the extender is available
URLPrefix string
// Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender.
FilterVerb string
// Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender.
PrioritizeVerb string
// The numeric multiplier for the node scores that the prioritize call generates.
// The weight should be a positive integer
Weight int
// Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender.
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
// can implement this function.
BindVerb string
// EnableHTTPS specifies whether https should be used to communicate with the extender
EnableHTTPS bool
// TLSConfig specifies the transport layer security config
TLSConfig *restclient.TLSClientConfig
// HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize
// timeout is ignored, k8s/other extenders priorities are used to select the node.
HTTPTimeout time.Duration
// NodeCacheCapable specifies that the extender is capable of caching node information,
// so the scheduler should only send minimal information about the eligible nodes
// assuming that the extender already cached full details of all nodes in the cluster
NodeCacheCapable bool
// ManagedResources is a list of extended resources that are managed by
// this extender.
// - A pod will be sent to the extender on the Filter, Prioritize and Bind
// (if the extender is the binder) phases iff the pod requests at least
// one of the extended resources in this list. If empty or unspecified,
// all pods will be sent to this extender.
// - If IgnoredByScheduler is set to true for a resource, kube-scheduler
// will skip checking the resource in predicates.
// +optional
ManagedResources []ExtenderManagedResource
}
// ExtenderArgs represents the arguments needed by the extender to filter/prioritize
// nodes for a pod.
type ExtenderArgs struct {
// Pod being scheduled
Pod v1.Pod
// List of candidate nodes where the pod can be scheduled; to be populated
// only if ExtenderConfig.NodeCacheCapable == false
Nodes *v1.NodeList
// List of candidate node names where the pod can be scheduled; to be
// populated only if ExtenderConfig.NodeCacheCapable == true
NodeNames *[]string
}
// FailedNodesMap represents the filtered out nodes, with node names and failure messages
type FailedNodesMap map[string]string
// ExtenderFilterResult represents the results of a filter call to an extender
type ExtenderFilterResult struct {
// Filtered set of nodes where the pod can be scheduled; to be populated
// only if ExtenderConfig.NodeCacheCapable == false
Nodes *v1.NodeList
// Filtered set of nodes where the pod can be scheduled; to be populated
// only if ExtenderConfig.NodeCacheCapable == true
NodeNames *[]string
// Filtered out nodes where the pod can't be scheduled and the failure messages
FailedNodes FailedNodesMap
// Error message indicating failure
Error string
}
// ExtenderBindingArgs represents the arguments to an extender for binding a pod to a node.
type ExtenderBindingArgs struct {
// PodName is the name of the pod being bound
PodName string
// PodNamespace is the namespace of the pod being bound
PodNamespace string
// PodUID is the UID of the pod being bound
PodUID types.UID
// Node selected by the scheduler
Node string
}
// ExtenderBindingResult represents the result of binding of a pod to a node from an extender.
type ExtenderBindingResult struct {
// Error message indicating failure
Error string
}
// HostPriority represents the priority of scheduling to a particular host, higher priority is better.
type HostPriority struct {
// Name of the host
Host string
// Score associated with the host
Score int
}
// HostPriorityList declares a []HostPriority type.
type HostPriorityList []HostPriority
// SystemPriorityClasses defines special priority classes which are used by system critical pods that should not be preempted by workload pods.
var SystemPriorityClasses = map[string]int32{
SystemClusterCritical: SystemCriticalPriority,
SystemNodeCritical: SystemCriticalPriority + 1000,
}
func (h HostPriorityList) Len() int {
return len(h)
}
func (h HostPriorityList) Less(i, j int) bool {
if h[i].Score == h[j].Score {
return h[i].Host < h[j].Host
}
return h[i].Score < h[j].Score
}
func (h HostPriorityList) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}

View file

@ -0,0 +1,485 @@
// +build !ignore_autogenerated
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package api
import (
v1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
rest "k8s.io/client-go/rest"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderArgs) DeepCopyInto(out *ExtenderArgs) {
*out = *in
in.Pod.DeepCopyInto(&out.Pod)
if in.Nodes != nil {
in, out := &in.Nodes, &out.Nodes
if *in == nil {
*out = nil
} else {
*out = new(v1.NodeList)
(*in).DeepCopyInto(*out)
}
}
if in.NodeNames != nil {
in, out := &in.NodeNames, &out.NodeNames
if *in == nil {
*out = nil
} else {
*out = new([]string)
if **in != nil {
in, out := *in, *out
*out = make([]string, len(*in))
copy(*out, *in)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderArgs.
func (in *ExtenderArgs) DeepCopy() *ExtenderArgs {
if in == nil {
return nil
}
out := new(ExtenderArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderBindingArgs) DeepCopyInto(out *ExtenderBindingArgs) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingArgs.
func (in *ExtenderBindingArgs) DeepCopy() *ExtenderBindingArgs {
if in == nil {
return nil
}
out := new(ExtenderBindingArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderBindingResult) DeepCopyInto(out *ExtenderBindingResult) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingResult.
func (in *ExtenderBindingResult) DeepCopy() *ExtenderBindingResult {
if in == nil {
return nil
}
out := new(ExtenderBindingResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderConfig) DeepCopyInto(out *ExtenderConfig) {
*out = *in
if in.TLSConfig != nil {
in, out := &in.TLSConfig, &out.TLSConfig
if *in == nil {
*out = nil
} else {
*out = new(rest.TLSClientConfig)
(*in).DeepCopyInto(*out)
}
}
if in.ManagedResources != nil {
in, out := &in.ManagedResources, &out.ManagedResources
*out = make([]ExtenderManagedResource, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderConfig.
func (in *ExtenderConfig) DeepCopy() *ExtenderConfig {
if in == nil {
return nil
}
out := new(ExtenderConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderFilterResult) DeepCopyInto(out *ExtenderFilterResult) {
*out = *in
if in.Nodes != nil {
in, out := &in.Nodes, &out.Nodes
if *in == nil {
*out = nil
} else {
*out = new(v1.NodeList)
(*in).DeepCopyInto(*out)
}
}
if in.NodeNames != nil {
in, out := &in.NodeNames, &out.NodeNames
if *in == nil {
*out = nil
} else {
*out = new([]string)
if **in != nil {
in, out := *in, *out
*out = make([]string, len(*in))
copy(*out, *in)
}
}
}
if in.FailedNodes != nil {
in, out := &in.FailedNodes, &out.FailedNodes
*out = make(FailedNodesMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderFilterResult.
func (in *ExtenderFilterResult) DeepCopy() *ExtenderFilterResult {
if in == nil {
return nil
}
out := new(ExtenderFilterResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource.
func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource {
if in == nil {
return nil
}
out := new(ExtenderManagedResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in FailedNodesMap) DeepCopyInto(out *FailedNodesMap) {
{
in := &in
*out = make(FailedNodesMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedNodesMap.
func (in FailedNodesMap) DeepCopy() FailedNodesMap {
if in == nil {
return nil
}
out := new(FailedNodesMap)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostPriority) DeepCopyInto(out *HostPriority) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriority.
func (in *HostPriority) DeepCopy() *HostPriority {
if in == nil {
return nil
}
out := new(HostPriority)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in HostPriorityList) DeepCopyInto(out *HostPriorityList) {
{
in := &in
*out = make(HostPriorityList, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriorityList.
func (in HostPriorityList) DeepCopy() HostPriorityList {
if in == nil {
return nil
}
out := new(HostPriorityList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LabelPreference) DeepCopyInto(out *LabelPreference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelPreference.
func (in *LabelPreference) DeepCopy() *LabelPreference {
if in == nil {
return nil
}
out := new(LabelPreference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LabelsPresence) DeepCopyInto(out *LabelsPresence) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelsPresence.
func (in *LabelsPresence) DeepCopy() *LabelsPresence {
if in == nil {
return nil
}
out := new(LabelsPresence)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Policy) DeepCopyInto(out *Policy) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Predicates != nil {
in, out := &in.Predicates, &out.Predicates
*out = make([]PredicatePolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Priorities != nil {
in, out := &in.Priorities, &out.Priorities
*out = make([]PriorityPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExtenderConfigs != nil {
in, out := &in.ExtenderConfigs, &out.ExtenderConfigs
*out = make([]ExtenderConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
func (in *Policy) DeepCopy() *Policy {
if in == nil {
return nil
}
out := new(Policy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Policy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PredicateArgument) DeepCopyInto(out *PredicateArgument) {
*out = *in
if in.ServiceAffinity != nil {
in, out := &in.ServiceAffinity, &out.ServiceAffinity
if *in == nil {
*out = nil
} else {
*out = new(ServiceAffinity)
(*in).DeepCopyInto(*out)
}
}
if in.LabelsPresence != nil {
in, out := &in.LabelsPresence, &out.LabelsPresence
if *in == nil {
*out = nil
} else {
*out = new(LabelsPresence)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicateArgument.
func (in *PredicateArgument) DeepCopy() *PredicateArgument {
if in == nil {
return nil
}
out := new(PredicateArgument)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PredicatePolicy) DeepCopyInto(out *PredicatePolicy) {
*out = *in
if in.Argument != nil {
in, out := &in.Argument, &out.Argument
if *in == nil {
*out = nil
} else {
*out = new(PredicateArgument)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicatePolicy.
func (in *PredicatePolicy) DeepCopy() *PredicatePolicy {
if in == nil {
return nil
}
out := new(PredicatePolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityArgument) DeepCopyInto(out *PriorityArgument) {
*out = *in
if in.ServiceAntiAffinity != nil {
in, out := &in.ServiceAntiAffinity, &out.ServiceAntiAffinity
if *in == nil {
*out = nil
} else {
*out = new(ServiceAntiAffinity)
**out = **in
}
}
if in.LabelPreference != nil {
in, out := &in.LabelPreference, &out.LabelPreference
if *in == nil {
*out = nil
} else {
*out = new(LabelPreference)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityArgument.
func (in *PriorityArgument) DeepCopy() *PriorityArgument {
if in == nil {
return nil
}
out := new(PriorityArgument)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityPolicy) DeepCopyInto(out *PriorityPolicy) {
*out = *in
if in.Argument != nil {
in, out := &in.Argument, &out.Argument
if *in == nil {
*out = nil
} else {
*out = new(PriorityArgument)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityPolicy.
func (in *PriorityPolicy) DeepCopy() *PriorityPolicy {
if in == nil {
return nil
}
out := new(PriorityPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAffinity) DeepCopyInto(out *ServiceAffinity) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAffinity.
func (in *ServiceAffinity) DeepCopy() *ServiceAffinity {
if in == nil {
return nil
}
out := new(ServiceAffinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAntiAffinity) DeepCopyInto(out *ServiceAntiAffinity) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAntiAffinity.
func (in *ServiceAntiAffinity) DeepCopy() *ServiceAntiAffinity {
if in == nil {
return nil
}
out := new(ServiceAntiAffinity)
in.DeepCopyInto(out)
return out
}

186
vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go generated vendored Normal file
View file

@ -0,0 +1,186 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package serviceaccount
import (
"errors"
"fmt"
"time"
"github.com/golang/glog"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/kubernetes/pkg/apis/core"
"gopkg.in/square/go-jose.v2/jwt"
)
// time.Now stubbed out to allow testing
var now = time.Now
type privateClaims struct {
Kubernetes kubernetes `json:"kubernetes.io,omitempty"`
}
type kubernetes struct {
Namespace string `json:"namespace,omitempty"`
Svcacct ref `json:"serviceaccount,omitempty"`
Pod *ref `json:"pod,omitempty"`
Secret *ref `json:"secret,omitempty"`
}
type ref struct {
Name string `json:"name,omitempty"`
UID string `json:"uid,omitempty"`
}
func Claims(sa core.ServiceAccount, pod *core.Pod, secret *core.Secret, expirationSeconds int64, audience []string) (*jwt.Claims, interface{}) {
now := now()
sc := &jwt.Claims{
Subject: apiserverserviceaccount.MakeUsername(sa.Namespace, sa.Name),
Audience: jwt.Audience(audience),
IssuedAt: jwt.NewNumericDate(now),
NotBefore: jwt.NewNumericDate(now),
Expiry: jwt.NewNumericDate(now.Add(time.Duration(expirationSeconds) * time.Second)),
}
pc := &privateClaims{
Kubernetes: kubernetes{
Namespace: sa.Namespace,
Svcacct: ref{
Name: sa.Name,
UID: string(sa.UID),
},
},
}
switch {
case pod != nil:
pc.Kubernetes.Pod = &ref{
Name: pod.Name,
UID: string(pod.UID),
}
case secret != nil:
pc.Kubernetes.Secret = &ref{
Name: secret.Name,
UID: string(secret.UID),
}
}
return sc, pc
}
func NewValidator(audiences []string, getter ServiceAccountTokenGetter) Validator {
return &validator{
auds: audiences,
getter: getter,
}
}
type validator struct {
auds []string
getter ServiceAccountTokenGetter
}
var _ = Validator(&validator{})
func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{}) (string, string, string, error) {
private, ok := privateObj.(*privateClaims)
if !ok {
glog.Errorf("jwt validator expected private claim of type *privateClaims but got: %T", privateObj)
return "", "", "", errors.New("Token could not be validated.")
}
err := public.Validate(jwt.Expected{
Time: now(),
})
switch {
case err == nil:
case err == jwt.ErrExpired:
return "", "", "", errors.New("Token has expired.")
default:
glog.Errorf("unexpected validation error: %T", err)
return "", "", "", errors.New("Token could not be validated.")
}
var audValid bool
for _, aud := range v.auds {
audValid = public.Audience.Contains(aud)
if audValid {
break
}
}
if !audValid {
return "", "", "", errors.New("Token is invalid for this audience.")
}
namespace := private.Kubernetes.Namespace
saref := private.Kubernetes.Svcacct
podref := private.Kubernetes.Pod
secref := private.Kubernetes.Secret
// Make sure service account still exists (name and UID)
serviceAccount, err := v.getter.GetServiceAccount(namespace, saref.Name)
if err != nil {
glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, saref.Name, err)
return "", "", "", err
}
if serviceAccount.DeletionTimestamp != nil {
glog.V(4).Infof("Service account has been deleted %s/%s", namespace, saref.Name)
return "", "", "", fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, saref.Name)
}
if string(serviceAccount.UID) != saref.UID {
glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, saref.Name, string(serviceAccount.UID), saref.UID)
return "", "", "", fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, saref.UID)
}
if secref != nil {
// Make sure token hasn't been invalidated by deletion of the secret
secret, err := v.getter.GetSecret(namespace, secref.Name)
if err != nil {
glog.V(4).Infof("Could not retrieve bound secret %s/%s for service account %s/%s: %v", namespace, secref.Name, namespace, saref.Name, err)
return "", "", "", errors.New("Token has been invalidated")
}
if secret.DeletionTimestamp != nil {
glog.V(4).Infof("Bound secret is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secref.Name, namespace, saref.Name)
return "", "", "", errors.New("Token has been invalidated")
}
if string(secref.UID) != secref.UID {
glog.V(4).Infof("Secret UID no longer matches %s/%s: %q != %q", namespace, secref.Name, string(serviceAccount.UID), secref.UID)
return "", "", "", fmt.Errorf("Secret UID (%s) does not match claim (%s)", secret.UID, secref.UID)
}
}
if podref != nil {
// Make sure token hasn't been invalidated by deletion of the pod
pod, err := v.getter.GetPod(namespace, podref.Name)
if err != nil {
glog.V(4).Infof("Could not retrieve bound secret %s/%s for service account %s/%s: %v", namespace, podref.Name, namespace, saref.Name, err)
return "", "", "", errors.New("Token has been invalidated")
}
if pod.DeletionTimestamp != nil {
glog.V(4).Infof("Bound pod is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, podref.Name, namespace, saref.Name)
return "", "", "", errors.New("Token has been invalidated")
}
if string(podref.UID) != podref.UID {
glog.V(4).Infof("Pod UID no longer matches %s/%s: %q != %q", namespace, podref.Name, string(serviceAccount.UID), podref.UID)
return "", "", "", fmt.Errorf("Pod UID (%s) does not match claim (%s)", pod.UID, podref.UID)
}
}
return private.Kubernetes.Namespace, private.Kubernetes.Svcacct.Name, private.Kubernetes.Svcacct.UID, nil
}
func (v *validator) NewPrivateClaims() interface{} {
return &privateClaims{}
}

View file

@ -17,7 +17,6 @@ limitations under the License.
package serviceaccount
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
@ -30,10 +29,8 @@ import (
"k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apiserver/pkg/authentication/authenticator"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/apiserver/pkg/authentication/user"
"github.com/golang/glog"
jose "gopkg.in/square/go-jose.v2"
"gopkg.in/square/go-jose.v2/jwt"
)
@ -41,6 +38,7 @@ import (
// ServiceAccountTokenGetter defines functions to retrieve a named service account and secret
type ServiceAccountTokenGetter interface {
GetServiceAccount(namespace, name string) (*v1.ServiceAccount, error)
GetPod(namespace, name string) (*v1.Pod, error)
GetSecret(namespace, name string) (*v1.Secret, error)
}
@ -113,14 +111,11 @@ func (j *jwtTokenGenerator) GenerateToken(claims *jwt.Claims, privateClaims inte
// JWTTokenAuthenticator authenticates tokens as JWT tokens produced by JWTTokenGenerator
// Token signatures are verified using each of the given public keys until one works (allowing key rotation)
// If lookup is true, the service account and secret referenced as claims inside the token are retrieved and verified with the provided ServiceAccountTokenGetter
func JWTTokenAuthenticator(iss string, keys []interface{}, lookup bool, getter ServiceAccountTokenGetter) authenticator.Token {
func JWTTokenAuthenticator(iss string, keys []interface{}, validator Validator) authenticator.Token {
return &jwtTokenAuthenticator{
iss: iss,
keys: keys,
validator: &legacyValidator{
lookup: lookup,
getter: getter,
},
iss: iss,
keys: keys,
validator: validator,
}
}
@ -130,8 +125,19 @@ type jwtTokenAuthenticator struct {
validator Validator
}
// Validator is called by the JWT token authentictaor to apply domain specific
// validation to a token and extract user information.
type Validator interface {
Validate(tokenData string, public *jwt.Claims, private *legacyPrivateClaims) error
// Validate validates a token and returns user information or an error.
// Validator can assume that the issuer and signature of a token are already
// verified when this function is called.
Validate(tokenData string, public *jwt.Claims, private interface{}) (namespace, name, uid string, err error)
// NewPrivateClaims returns a struct that the authenticator should
// deserialize the JWT payload into. The authenticator may then pass this
// struct back to the Validator as the 'private' argument to a Validate()
// call. This struct should contain fields for any private claims that the
// Validator requires to validate the JWT.
NewPrivateClaims() interface{}
}
var errMismatchedSigningMethod = errors.New("invalid signing method")
@ -147,7 +153,7 @@ func (j *jwtTokenAuthenticator) AuthenticateToken(tokenData string) (user.Info,
}
public := &jwt.Claims{}
private := &legacyPrivateClaims{}
private := j.validator.NewPrivateClaims()
var (
found bool
@ -168,12 +174,12 @@ func (j *jwtTokenAuthenticator) AuthenticateToken(tokenData string) (user.Info,
// If we get here, we have a token with a recognized signature and
// issuer string.
if err := j.validator.Validate(tokenData, public, private); err != nil {
ns, name, uid, err := j.validator.Validate(tokenData, public, private)
if err != nil {
return nil, false, err
}
return UserInfo(private.Namespace, private.ServiceAccountName, private.ServiceAccountUID), true, nil
return UserInfo(ns, name, uid), true, nil
}
// hasCorrectIssuer returns true if tokenData is a valid JWT in compact
@ -204,71 +210,3 @@ func (j *jwtTokenAuthenticator) hasCorrectIssuer(tokenData string) bool {
return true
}
type legacyValidator struct {
lookup bool
getter ServiceAccountTokenGetter
}
func (v *legacyValidator) Validate(tokenData string, public *jwt.Claims, private *legacyPrivateClaims) error {
// Make sure the claims we need exist
if len(public.Subject) == 0 {
return errors.New("sub claim is missing")
}
namespace := private.Namespace
if len(namespace) == 0 {
return errors.New("namespace claim is missing")
}
secretName := private.SecretName
if len(secretName) == 0 {
return errors.New("secretName claim is missing")
}
serviceAccountName := private.ServiceAccountName
if len(serviceAccountName) == 0 {
return errors.New("serviceAccountName claim is missing")
}
serviceAccountUID := private.ServiceAccountUID
if len(serviceAccountUID) == 0 {
return errors.New("serviceAccountUID claim is missing")
}
subjectNamespace, subjectName, err := apiserverserviceaccount.SplitUsername(public.Subject)
if err != nil || subjectNamespace != namespace || subjectName != serviceAccountName {
return errors.New("sub claim is invalid")
}
if v.lookup {
// Make sure token hasn't been invalidated by deletion of the secret
secret, err := v.getter.GetSecret(namespace, secretName)
if err != nil {
glog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err)
return errors.New("Token has been invalidated")
}
if secret.DeletionTimestamp != nil {
glog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName)
return errors.New("Token has been invalidated")
}
if bytes.Compare(secret.Data[v1.ServiceAccountTokenKey], []byte(tokenData)) != 0 {
glog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName)
return errors.New("Token does not match server's copy")
}
// Make sure service account still exists (name and UID)
serviceAccount, err := v.getter.GetServiceAccount(namespace, serviceAccountName)
if err != nil {
glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err)
return err
}
if serviceAccount.DeletionTimestamp != nil {
glog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName)
return fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, serviceAccountName)
}
if string(serviceAccount.UID) != serviceAccountUID {
glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID)
return fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID)
}
}
return nil
}

View file

@ -17,9 +17,14 @@ limitations under the License.
package serviceaccount
import (
"bytes"
"errors"
"fmt"
"k8s.io/api/core/v1"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
"github.com/golang/glog"
"gopkg.in/square/go-jose.v2/jwt"
)
@ -42,3 +47,89 @@ type legacyPrivateClaims struct {
SecretName string `json:"kubernetes.io/serviceaccount/secret.name"`
Namespace string `json:"kubernetes.io/serviceaccount/namespace"`
}
func NewLegacyValidator(lookup bool, getter ServiceAccountTokenGetter) Validator {
return &legacyValidator{
lookup: lookup,
getter: getter,
}
}
type legacyValidator struct {
lookup bool
getter ServiceAccountTokenGetter
}
var _ = Validator(&legacyValidator{})
func (v *legacyValidator) Validate(tokenData string, public *jwt.Claims, privateObj interface{}) (string, string, string, error) {
private, ok := privateObj.(*legacyPrivateClaims)
if !ok {
glog.Errorf("jwt validator expected private claim of type *legacyPrivateClaims but got: %T", privateObj)
return "", "", "", errors.New("Token could not be validated.")
}
// Make sure the claims we need exist
if len(public.Subject) == 0 {
return "", "", "", errors.New("sub claim is missing")
}
namespace := private.Namespace
if len(namespace) == 0 {
return "", "", "", errors.New("namespace claim is missing")
}
secretName := private.SecretName
if len(secretName) == 0 {
return "", "", "", errors.New("secretName claim is missing")
}
serviceAccountName := private.ServiceAccountName
if len(serviceAccountName) == 0 {
return "", "", "", errors.New("serviceAccountName claim is missing")
}
serviceAccountUID := private.ServiceAccountUID
if len(serviceAccountUID) == 0 {
return "", "", "", errors.New("serviceAccountUID claim is missing")
}
subjectNamespace, subjectName, err := apiserverserviceaccount.SplitUsername(public.Subject)
if err != nil || subjectNamespace != namespace || subjectName != serviceAccountName {
return "", "", "", errors.New("sub claim is invalid")
}
if v.lookup {
// Make sure token hasn't been invalidated by deletion of the secret
secret, err := v.getter.GetSecret(namespace, secretName)
if err != nil {
glog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err)
return "", "", "", errors.New("Token has been invalidated")
}
if secret.DeletionTimestamp != nil {
glog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName)
return "", "", "", errors.New("Token has been invalidated")
}
if bytes.Compare(secret.Data[v1.ServiceAccountTokenKey], []byte(tokenData)) != 0 {
glog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName)
return "", "", "", errors.New("Token does not match server's copy")
}
// Make sure service account still exists (name and UID)
serviceAccount, err := v.getter.GetServiceAccount(namespace, serviceAccountName)
if err != nil {
glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err)
return "", "", "", err
}
if serviceAccount.DeletionTimestamp != nil {
glog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName)
return "", "", "", fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, serviceAccountName)
}
if string(serviceAccount.UID) != serviceAccountUID {
glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID)
return "", "", "", fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID)
}
}
return private.Namespace, private.ServiceAccountName, private.ServiceAccountUID, nil
}
func (v *legacyValidator) NewPrivateClaims() interface{} {
return &legacyPrivateClaims{}
}

View file

@ -14,28 +14,25 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package util
package conntrack
import (
"fmt"
"net"
"strconv"
"strings"
"k8s.io/api/core/v1"
utilnet "k8s.io/kubernetes/pkg/util/net"
"k8s.io/utils/exec"
)
// Utilities for dealing with conntrack
// NoConnectionToDelete is the error string returned by conntrack when no matching connections are found
const NoConnectionToDelete = "0 flow entries have been deleted"
func IsIPv6(netIP net.IP) bool {
return netIP != nil && netIP.To4() == nil
}
func IsIPv6String(ip string) bool {
netIP := net.ParseIP(ip)
return IsIPv6(netIP)
func protoStr(proto v1.Protocol) string {
return strings.ToLower(string(proto))
}
func parametersWithFamily(isIPv6 bool, parameters ...string) []string {
@ -45,11 +42,11 @@ func parametersWithFamily(isIPv6 bool, parameters ...string) []string {
return parameters
}
// ClearUDPConntrackForIP uses the conntrack tool to delete the conntrack entries
// ClearEntriesForIP uses the conntrack tool to delete the conntrack entries
// for the UDP connections specified by the given service IP
func ClearUDPConntrackForIP(execer exec.Interface, ip string) error {
parameters := parametersWithFamily(IsIPv6String(ip), "-D", "--orig-dst", ip, "-p", "udp")
err := ExecConntrackTool(execer, parameters...)
func ClearEntriesForIP(execer exec.Interface, ip string, protocol v1.Protocol) error {
parameters := parametersWithFamily(utilnet.IsIPv6String(ip), "-D", "--orig-dst", ip, "-p", protoStr(protocol))
err := Exec(execer, parameters...)
if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) {
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it
@ -59,8 +56,8 @@ func ClearUDPConntrackForIP(execer exec.Interface, ip string) error {
return nil
}
// ExecConntrackTool executes the conntrack tool using the given parameters
func ExecConntrackTool(execer exec.Interface, parameters ...string) error {
// Exec executes the conntrack tool using the given parameters
func Exec(execer exec.Interface, parameters ...string) error {
conntrackPath, err := execer.LookPath("conntrack")
if err != nil {
return fmt.Errorf("error looking for path of conntrack: %v", err)
@ -72,29 +69,36 @@ func ExecConntrackTool(execer exec.Interface, parameters ...string) error {
return nil
}
// ClearUDPConntrackForPort uses the conntrack tool to delete the conntrack entries
// for the UDP connections specified by the port.
// Exists returns true if conntrack binary is installed.
func Exists(execer exec.Interface) bool {
_, err := execer.LookPath("conntrack")
return err == nil
}
// ClearEntriesForPort uses the conntrack tool to delete the conntrack entries
// for connections specified by the port.
// When a packet arrives, it will not go through NAT table again, because it is not "the first" packet.
// The solution is clearing the conntrack. Known issues:
// https://github.com/docker/docker/issues/8795
// https://github.com/kubernetes/kubernetes/issues/31983
func ClearUDPConntrackForPort(execer exec.Interface, port int, isIPv6 bool) error {
func ClearEntriesForPort(execer exec.Interface, port int, isIPv6 bool, protocol v1.Protocol) error {
if port <= 0 {
return fmt.Errorf("Wrong port number. The port number must be greater than zero")
}
parameters := parametersWithFamily(isIPv6, "-D", "-p", "udp", "--dport", strconv.Itoa(port))
err := ExecConntrackTool(execer, parameters...)
parameters := parametersWithFamily(isIPv6, "-D", "-p", protoStr(protocol), "--dport", strconv.Itoa(port))
err := Exec(execer, parameters...)
if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) {
return fmt.Errorf("error deleting conntrack entries for UDP port: %d, error: %v", port, err)
}
return nil
}
// ClearUDPConntrackForPeers uses the conntrack tool to delete the conntrack entries
// for the UDP connections specified by the {origin, dest} IP pair.
func ClearUDPConntrackForPeers(execer exec.Interface, origin, dest string) error {
parameters := parametersWithFamily(IsIPv6String(origin), "-D", "--orig-dst", origin, "--dst-nat", dest, "-p", "udp")
err := ExecConntrackTool(execer, parameters...)
// ClearEntriesForNAT uses the conntrack tool to delete the conntrack entries
// for connections specified by the {origin, dest} IP pair.
func ClearEntriesForNAT(execer exec.Interface, origin, dest string, protocol v1.Protocol) error {
parameters := parametersWithFamily(utilnet.IsIPv6String(origin), "-D", "--orig-dst", origin, "--dst-nat", dest,
"-p", protoStr(protocol))
err := Exec(execer, parameters...)
if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) {
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it

View file

@ -21,7 +21,8 @@ import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"k8s.io/kubernetes/pkg/util/nsenter"
"github.com/golang/glog"
)
@ -54,25 +55,20 @@ type NsenterWriter struct{}
// WriteFile calls 'nsenter cat - > <the file>' and 'nsenter chmod' to create a
// file on the host.
func (writer *NsenterWriter) WriteFile(filename string, data []byte, perm os.FileMode) error {
cmd := "nsenter"
baseArgs := []string{
"--mount=/rootfs/proc/1/ns/mnt",
"--",
}
echoArgs := append(baseArgs, "sh", "-c", fmt.Sprintf("cat > %s", filename))
glog.V(5).Infof("Command to write data to file: %v %v", cmd, echoArgs)
command := exec.Command(cmd, echoArgs...)
command.Stdin = bytes.NewBuffer(data)
ne := nsenter.NewNsenter()
echoArgs := []string{"-c", fmt.Sprintf("cat > %s", filename)}
glog.V(5).Infof("nsenter: write data to file %s by nsenter", filename)
command := ne.Exec("sh", echoArgs)
command.SetStdin(bytes.NewBuffer(data))
outputBytes, err := command.CombinedOutput()
if err != nil {
glog.Errorf("Output from writing to %q: %v", filename, string(outputBytes))
return err
}
chmodArgs := append(baseArgs, "chmod", fmt.Sprintf("%o", perm), filename)
glog.V(5).Infof("Command to change permissions to file: %v %v", cmd, chmodArgs)
outputBytes, err = exec.Command(cmd, chmodArgs...).CombinedOutput()
chmodArgs := []string{fmt.Sprintf("%o", perm), filename}
glog.V(5).Infof("nsenter: change permissions of file %s to %s", filename, chmodArgs[0])
outputBytes, err = ne.Exec("chmod", chmodArgs).CombinedOutput()
if err != nil {
glog.Errorf("Output from chmod command: %v", string(outputBytes))
return err

View file

@ -266,7 +266,7 @@ func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
if err != nil {
return true, err
}
rootStat, err := os.Lstat(file + "/..")
rootStat, err := os.Lstat(filepath.Dir(strings.TrimSuffix(file, "/")))
if err != nil {
return true, err
}
@ -432,7 +432,7 @@ func (mounter *Mounter) GetFileType(pathname string) (FileType, error) {
case syscall.S_IFBLK:
return FileTypeBlockDev, nil
case syscall.S_IFCHR:
return FileTypeBlockDev, nil
return FileTypeCharDev, nil
case syscall.S_IFDIR:
return FileTypeDirectory, nil
case syscall.S_IFREG:

61
vendor/k8s.io/kubernetes/pkg/util/net/net.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package net
import (
"net"
)
// IsIPv6 returns if netIP is IPv6.
func IsIPv6(netIP net.IP) bool {
return netIP != nil && netIP.To4() == nil
}
// IsIPv6String returns if ip is IPv6.
func IsIPv6String(ip string) bool {
netIP := net.ParseIP(ip)
return IsIPv6(netIP)
}
// IsIPv6CIDR returns if cidr is IPv6.
// This assumes cidr is a valid CIDR.
func IsIPv6CIDR(cidr string) bool {
ip, _, _ := net.ParseCIDR(cidr)
return IsIPv6(ip)
}
// FilterIncorrectIPVersion filters out the incorrect IP version case from a slice of IP strings.
func FilterIncorrectIPVersion(ipStrings []string, isIPv6Mode bool) ([]string, []string) {
return filterWithCondition(ipStrings, isIPv6Mode, IsIPv6String)
}
// FilterIncorrectCIDRVersion filters out the incorrect IP version case from a slice of CIDR strings.
func FilterIncorrectCIDRVersion(ipStrings []string, isIPv6Mode bool) ([]string, []string) {
return filterWithCondition(ipStrings, isIPv6Mode, IsIPv6CIDR)
}
func filterWithCondition(strs []string, expectedCondition bool, conditionFunc func(string) bool) ([]string, []string) {
var corrects, incorrects []string
for _, str := range strs {
if conditionFunc(str) != expectedCondition {
incorrects = append(incorrects, str)
} else {
corrects = append(corrects, str)
}
}
return corrects, incorrects
}

View file

@ -35,7 +35,7 @@ func NewNsenter() *Nsenter {
}
// Exec executes nsenter commands in hostProcMountNsPath mount namespace
func (ne *Nsenter) Exec(args ...string) exec.Cmd {
func (ne *Nsenter) Exec(cmd string, args []string) exec.Cmd {
return nil
}

View file

@ -19,7 +19,7 @@ package volume
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/fs"
)
var _ MetricsProvider = &metricsDu{}
@ -66,7 +66,7 @@ func (md *metricsDu) GetMetrics() (*Metrics, error) {
// runDu executes the "du" command and writes the results to metrics.Used
func (md *metricsDu) runDu(metrics *Metrics) error {
used, err := util.Du(md.path)
used, err := fs.Du(md.path)
if err != nil {
return err
}
@ -76,7 +76,7 @@ func (md *metricsDu) runDu(metrics *Metrics) error {
// runFind executes the "find" command and writes the results to metrics.InodesUsed
func (md *metricsDu) runFind(metrics *Metrics) error {
inodesUsed, err := util.Find(md.path)
inodesUsed, err := fs.Find(md.path)
if err != nil {
return err
}
@ -87,7 +87,7 @@ func (md *metricsDu) runFind(metrics *Metrics) error {
// getFsInfo writes metrics.Capacity and metrics.Available from the filesystem
// info
func (md *metricsDu) getFsInfo(metrics *Metrics) error {
available, capacity, _, inodes, inodesFree, _, err := util.FsInfo(md.path)
available, capacity, _, inodes, inodesFree, _, err := fs.FsInfo(md.path)
if err != nil {
return NewFsInfoFailedError(err)
}

View file

@ -19,7 +19,7 @@ package volume
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/fs"
)
var _ MetricsProvider = &metricsStatFS{}
@ -55,7 +55,7 @@ func (md *metricsStatFS) GetMetrics() (*Metrics, error) {
// getFsInfo writes metrics.Capacity, metrics.Used and metrics.Available from the filesystem info
func (md *metricsStatFS) getFsInfo(metrics *Metrics) error {
available, capacity, usage, inodes, inodesFree, inodesUsed, err := util.FsInfo(md.path)
available, capacity, usage, inodes, inodesFree, inodesUsed, err := fs.FsInfo(md.path)
if err != nil {
return NewFsInfoFailedError(err)
}

View file

@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
)
const (
@ -161,7 +162,7 @@ type RecyclableVolumePlugin interface {
// Recycle will use the provided recorder to write any events that might be
// interesting to user. It's expected that caller will pass these events to
// the PV being recycled.
Recycle(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) error
Recycle(pvName string, spec *Spec, eventRecorder recyclerclient.RecycleEventRecorder) error
}
// DeletableVolumePlugin is an extended interface of VolumePlugin and is used

View file

@ -1,523 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"fmt"
"reflect"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
"hash/fnv"
"math/rand"
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
)
const (
// GB - GigaByte size
GB = 1000 * 1000 * 1000
// GIB - GibiByte size
GIB = 1024 * 1024 * 1024
)
type RecycleEventRecorder func(eventtype, message string)
// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume
// Recyclers. This function will save the given Pod to the API and watch it
// until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded,
// whichever comes first. An attempt to delete a recycler pod is always
// attempted before returning.
//
// In case there is a pod with the same namespace+name already running, this
// function deletes it as it is not able to judge if it is an old recycler
// or user has forged a fake recycler to block Kubernetes from recycling.//
//
// pod - the pod designed by a volume plugin to recycle the volume. pod.Name
// will be overwritten with unique name based on PV.Name.
// client - kube client for API operations.
func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error {
return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder))
}
// same as above func comments, except 'recyclerClient' is a narrower pod API
// interface to ease testing
func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error {
glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name)
// Generate unique name for the recycler pod - we need to get "already
// exists" error when a previous controller has already started recycling
// the volume. Here we assume that pv.Name is already unique.
pod.Name = "recycler-for-" + pvName
pod.GenerateName = ""
stopChannel := make(chan struct{})
defer close(stopChannel)
podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
if err != nil {
glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err)
return err
}
// Start the pod
_, err = recyclerClient.CreatePod(pod)
if err != nil {
if errors.IsAlreadyExists(err) {
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
if deleteErr != nil {
return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
}
// Recycler will try again and the old pod will be hopefully deleted
// at that time.
return fmt.Errorf("old recycler pod found, will retry later")
}
return fmt.Errorf("unexpected error creating recycler pod: %+v", err)
}
err = waitForPod(pod, recyclerClient, podCh)
// In all cases delete the recycler pod and log its result.
glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
if deleteErr != nil {
glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
// Returning recycler error is preferred, the pod will be deleted again on
// the next retry.
if err != nil {
return fmt.Errorf("failed to recycle volume: %s", err)
}
// Recycle succeeded but we failed to delete the recycler pod. Report it,
// the controller will re-try recycling the PV again shortly.
if deleteErr != nil {
return fmt.Errorf("failed to delete recycler pod: %s", deleteErr)
}
return nil
}
// waitForPod watches the pod it until it finishes and send all events on the
// pod to the PV.
func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.Event) error {
for {
event, ok := <-podCh
if !ok {
return fmt.Errorf("recycler pod %q watch channel had been closed", pod.Name)
}
switch event.Object.(type) {
case *v1.Pod:
// POD changed
pod := event.Object.(*v1.Pod)
glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
switch event.Type {
case watch.Added, watch.Modified:
if pod.Status.Phase == v1.PodSucceeded {
// Recycle succeeded.
return nil
}
if pod.Status.Phase == v1.PodFailed {
if pod.Status.Message != "" {
return fmt.Errorf(pod.Status.Message)
} else {
return fmt.Errorf("pod failed, pod.Status.Message unknown.")
}
}
case watch.Deleted:
return fmt.Errorf("recycler pod was deleted")
case watch.Error:
return fmt.Errorf("recycler pod watcher failed")
}
case *v1.Event:
// Event received
podEvent := event.Object.(*v1.Event)
glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
if event.Type == watch.Added {
recyclerClient.Event(podEvent.Type, podEvent.Message)
}
}
}
}
// recyclerClient abstracts access to a Pod by providing a narrower interface.
// This makes it easier to mock a client for testing.
type recyclerClient interface {
CreatePod(pod *v1.Pod) (*v1.Pod, error)
GetPod(name, namespace string) (*v1.Pod, error)
DeletePod(name, namespace string) error
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used
// to close the reflector backing the watch. The caller is responsible for
// derring a close on the channel to stop the reflector.
WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error)
// Event sends an event to the volume that is being recycled.
Event(eventtype, message string)
}
func newRecyclerClient(client clientset.Interface, recorder RecycleEventRecorder) recyclerClient {
return &realRecyclerClient{
client,
recorder,
}
}
type realRecyclerClient struct {
client clientset.Interface
recorder RecycleEventRecorder
}
func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
return c.client.CoreV1().Pods(pod.Namespace).Create(pod)
}
func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
}
func (c *realRecyclerClient) DeletePod(name, namespace string) error {
return c.client.CoreV1().Pods(namespace).Delete(name, nil)
}
func (c *realRecyclerClient) Event(eventtype, message string) {
c.recorder(eventtype, message)
}
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
podSelector, err := fields.ParseSelector("metadata.name=" + name)
if err != nil {
return nil, err
}
options := metav1.ListOptions{
FieldSelector: podSelector.String(),
Watch: true,
}
podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options)
if err != nil {
return nil, err
}
eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{
FieldSelector: eventSelector.String(),
Watch: true,
})
if err != nil {
podWatch.Stop()
return nil, err
}
eventCh := make(chan watch.Event, 30)
go func() {
defer eventWatch.Stop()
defer podWatch.Stop()
defer close(eventCh)
var podWatchChannelClosed bool
var eventWatchChannelClosed bool
for {
select {
case _ = <-stopChannel:
return
case podEvent, ok := <-podWatch.ResultChan():
if !ok {
podWatchChannelClosed = true
} else {
eventCh <- podEvent
}
case eventEvent, ok := <-eventWatch.ResultChan():
if !ok {
eventWatchChannelClosed = true
} else {
eventCh <- eventEvent
}
}
if podWatchChannelClosed && eventWatchChannelClosed {
break
}
}
}()
return eventCh, nil
}
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
// recycle operation. The calculation and return value is either the
// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is
// greater.
func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.PersistentVolume) int64 {
giQty := resource.MustParse("1Gi")
pvQty := pv.Spec.Capacity[v1.ResourceStorage]
giSize := giQty.Value()
pvSize := pvQty.Value()
timeout := (pvSize / giSize) * int64(timeoutIncrement)
if timeout < int64(minimumTimeout) {
return int64(minimumTimeout)
}
return timeout
}
// RoundUpSize calculates how many allocation units are needed to accommodate
// a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS
// allocates volumes in gibibyte-sized chunks,
// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2'
// (2 GiB is the smallest allocatable volume that can hold 1500MiB)
func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes
}
// RoundUpToGB rounds up given quantity to chunks of GB
func RoundUpToGB(size resource.Quantity) int64 {
requestBytes := size.Value()
return RoundUpSize(requestBytes, GB)
}
// RoundUpToGiB rounds up given quantity upto chunks of GiB
func RoundUpToGiB(size resource.Quantity) int64 {
requestBytes := size.Value()
return RoundUpSize(requestBytes, GIB)
}
// GenerateVolumeName returns a PV name with clusterName prefix. The function
// should be used to generate a name of GCE PD or Cinder volume. It basically
// adds "<clusterName>-dynamic-" before the PV name, making sure the resulting
// string fits given length and cuts "dynamic" if not.
func GenerateVolumeName(clusterName, pvName string, maxLength int) string {
prefix := clusterName + "-dynamic"
pvLen := len(pvName)
// cut the "<clusterName>-dynamic" to fit full pvName into maxLength
// +1 for the '-' dash
if pvLen+1+len(prefix) > maxLength {
prefix = prefix[:maxLength-pvLen-1]
}
return prefix + "-" + pvName
}
// GetPath checks if the path from the mounter is empty.
func GetPath(mounter Mounter) (string, error) {
path := mounter.GetPath()
if path == "" {
return "", fmt.Errorf("Path is empty %s", reflect.TypeOf(mounter).String())
}
return path, nil
}
// ChooseZoneForVolume implements our heuristics for choosing a zone for volume creation based on the volume name
// Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name.
// However, if the PVCName ends with `-<integer>`, we will hash the prefix, and then add the integer to the hash.
// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones,
// assuming the id values are consecutive.
func ChooseZoneForVolume(zones sets.String, pvcName string) string {
// We create the volume in a zone determined by the name
// Eventually the scheduler will coordinate placement into an available zone
hash, index := getPVCNameHashAndIndexOffset(pvcName)
// Zones.List returns zones in a consistent order (sorted)
// We do have a potential failure case where volumes will not be properly spread,
// if the set of zones changes during StatefulSet volume creation. However, this is
// probably relatively unlikely because we expect the set of zones to be essentially
// static for clusters.
// Hopefully we can address this problem if/when we do full scheduler integration of
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
// unhealthy zones)
zoneSlice := zones.List()
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
return zone
}
// ChooseZonesForVolume is identical to ChooseZoneForVolume, but selects a multiple zones, for multi-zone disks.
func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) sets.String {
// We create the volume in a zone determined by the name
// Eventually the scheduler will coordinate placement into an available zone
hash, index := getPVCNameHashAndIndexOffset(pvcName)
// Zones.List returns zones in a consistent order (sorted)
// We do have a potential failure case where volumes will not be properly spread,
// if the set of zones changes during StatefulSet volume creation. However, this is
// probably relatively unlikely because we expect the set of zones to be essentially
// static for clusters.
// Hopefully we can address this problem if/when we do full scheduler integration of
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
// unhealthy zones)
zoneSlice := zones.List()
replicaZones := sets.NewString()
startingIndex := index * numZones
for index = startingIndex; index < startingIndex+numZones; index++ {
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
replicaZones.Insert(zone)
}
glog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q",
pvcName, replicaZones.UnsortedList(), zoneSlice)
return replicaZones
}
func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) {
if pvcName == "" {
// We should always be called with a name; this shouldn't happen
glog.Warningf("No name defined during volume create; choosing random zone")
hash = rand.Uint32()
} else {
hashString := pvcName
// Heuristic to make sure that volumes in a StatefulSet are spread across zones
// StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id,
// where Id is an integer index.
// Note though that if a StatefulSet pod has multiple claims, we need them to be
// in the same zone, because otherwise the pod will be unable to mount both volumes,
// and will be unschedulable. So we hash _only_ the "StatefulSetName" portion when
// it looks like `ClaimName-StatefulSetName-Id`.
// We continue to round-robin volume names that look like `Name-Id` also; this is a useful
// feature for users that are creating statefulset-like functionality without using statefulsets.
lastDash := strings.LastIndexByte(pvcName, '-')
if lastDash != -1 {
statefulsetIDString := pvcName[lastDash+1:]
statefulsetID, err := strconv.ParseUint(statefulsetIDString, 10, 32)
if err == nil {
// Offset by the statefulsetID, so we round-robin across zones
index = uint32(statefulsetID)
// We still hash the volume name, but only the prefix
hashString = pvcName[:lastDash]
// In the special case where it looks like `ClaimName-StatefulSetName-Id`,
// hash only the StatefulSetName, so that different claims on the same StatefulSet
// member end up in the same zone.
// Note that StatefulSetName (and ClaimName) might themselves both have dashes.
// We actually just take the portion after the final - of ClaimName-StatefulSetName.
// For our purposes it doesn't much matter (just suboptimal spreading).
lastDash := strings.LastIndexByte(hashString, '-')
if lastDash != -1 {
hashString = hashString[lastDash+1:]
}
glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index)
}
}
// We hash the (base) volume name, so we don't bias towards the first N zones
h := fnv.New32()
h.Write([]byte(hashString))
hash = h.Sum32()
}
return hash, index
}
// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi
// to empty_dir
func UnmountViaEmptyDir(dir string, host VolumeHost, volName string, volSpec Spec, podUID types.UID) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
// Wrap EmptyDir, let it do the teardown.
wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)
if err != nil {
return err
}
return wrapped.TearDownAt(dir)
}
// MountOptionFromSpec extracts and joins mount options from volume spec with supplied options
func MountOptionFromSpec(spec *Spec, options ...string) []string {
pv := spec.PersistentVolume
if pv != nil {
// Use beta annotation first
if mo, ok := pv.Annotations[v1.MountOptionAnnotation]; ok {
moList := strings.Split(mo, ",")
return JoinMountOptions(moList, options)
}
if len(pv.Spec.MountOptions) > 0 {
return JoinMountOptions(pv.Spec.MountOptions, options)
}
}
return options
}
// JoinMountOptions joins mount options eliminating duplicates
func JoinMountOptions(userOptions []string, systemOptions []string) []string {
allMountOptions := sets.NewString()
for _, mountOption := range userOptions {
if len(mountOption) > 0 {
allMountOptions.Insert(mountOption)
}
}
for _, mountOption := range systemOptions {
allMountOptions.Insert(mountOption)
}
return allMountOptions.UnsortedList()
}
// ValidateZone returns:
// - an error in case zone is an empty string or contains only any combination of spaces and tab characters
// - nil otherwise
func ValidateZone(zone string) error {
if strings.TrimSpace(zone) == "" {
return fmt.Errorf("the provided %q zone is not valid, it's an empty string or contains only spaces and tab characters", zone)
}
return nil
}
// AccessModesContains returns whether the requested mode is contained by modes
func AccessModesContains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
// AccessModesContainedInAll returns whether all of the requested modes are contained by modes
func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool {
for _, mode := range requestedModes {
if !AccessModesContains(indexedModes, mode) {
return false
}
}
return true
}
// GetWindowsPath get a windows path
func GetWindowsPath(path string) string {
windowsPath := strings.Replace(path, "/", "\\", -1)
if strings.HasPrefix(windowsPath, "\\") {
windowsPath = "c:" + windowsPath
}
return windowsPath
}

View file

@ -1,454 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/sets"
)
const (
maxFileNameLength = 255
maxPathLength = 4096
)
// AtomicWriter handles atomically projecting content for a set of files into
// a target directory.
//
// Note:
//
// 1. AtomicWriter reserves the set of pathnames starting with `..`.
// 2. AtomicWriter offers no concurrency guarantees and must be synchronized
// by the caller.
//
// The visible files in this volume are symlinks to files in the writer's data
// directory. Actual files are stored in a hidden timestamped directory which
// is symlinked to by the data directory. The timestamped directory and
// data directory symlink are created in the writer's target dir.  This scheme
// allows the files to be atomically updated by changing the target of the
// data directory symlink.
//
// Consumers of the target directory can monitor the ..data symlink using
// inotify or fanotify to receive events when the content in the volume is
// updated.
type AtomicWriter struct {
targetDir string
logContext string
}
type FileProjection struct {
Data []byte
Mode int32
}
// NewAtomicWriter creates a new AtomicWriter configured to write to the given
// target directory, or returns an error if the target directory does not exist.
func NewAtomicWriter(targetDir string, logContext string) (*AtomicWriter, error) {
_, err := os.Stat(targetDir)
if os.IsNotExist(err) {
return nil, err
}
return &AtomicWriter{targetDir: targetDir, logContext: logContext}, nil
}
const (
dataDirName = "..data"
newDataDirName = "..data_tmp"
)
// Write does an atomic projection of the given payload into the writer's target
// directory. Input paths must not begin with '..'.
//
// The Write algorithm is:
//
// 1. The payload is validated; if the payload is invalid, the function returns
// 2.  The current timestamped directory is detected by reading the data directory
// symlink
// 3. The old version of the volume is walked to determine whether any
// portion of the payload was deleted and is still present on disk.
// 4. The data in the current timestamped directory is compared to the projected
// data to determine if an update is required.
// 5.  A new timestamped dir is created
// 6. The payload is written to the new timestamped directory
// 7.  Symlinks and directory for new user-visible files are created (if needed).
//
// For example, consider the files:
// <target-dir>/podName
// <target-dir>/user/labels
// <target-dir>/k8s/annotations
//
// The user visible files are symbolic links into the internal data directory:
// <target-dir>/podName -> ..data/podName
// <target-dir>/usr -> ..data/usr
// <target-dir>/k8s -> ..data/k8s
//
// The data directory itself is a link to a timestamped directory with
// the real data:
// <target-dir>/..data -> ..2016_02_01_15_04_05.12345678/
// 8.  A symlink to the new timestamped directory ..data_tmp is created that will
// become the new data directory
// 9.  The new data directory symlink is renamed to the data directory; rename is atomic
// 10. Old paths are removed from the user-visible portion of the target directory
// 11.  The previous timestamped directory is removed, if it exists
func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
// (1)
cleanPayload, err := validatePayload(payload)
if err != nil {
glog.Errorf("%s: invalid payload: %v", w.logContext, err)
return err
}
// (2)
dataDirPath := path.Join(w.targetDir, dataDirName)
oldTsDir, err := os.Readlink(dataDirPath)
if err != nil {
if !os.IsNotExist(err) {
glog.Errorf("%s: error reading link for data directory: %v", w.logContext, err)
return err
}
// although Readlink() returns "" on err, don't be fragile by relying on it (since it's not specified in docs)
// empty oldTsDir indicates that it didn't exist
oldTsDir = ""
}
oldTsPath := path.Join(w.targetDir, oldTsDir)
var pathsToRemove sets.String
// if there was no old version, there's nothing to remove
if len(oldTsDir) != 0 {
// (3)
pathsToRemove, err = w.pathsToRemove(cleanPayload, oldTsPath)
if err != nil {
glog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err)
return err
}
// (4)
if should, err := shouldWritePayload(cleanPayload, oldTsPath); err != nil {
glog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err)
return err
} else if !should && len(pathsToRemove) == 0 {
glog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir)
return nil
} else {
glog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir)
}
}
// (5)
tsDir, err := w.newTimestampDir()
if err != nil {
glog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err)
return err
}
tsDirName := filepath.Base(tsDir)
// (6)
if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil {
glog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err)
return err
} else {
glog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir)
}
// (7)
if err = w.createUserVisibleFiles(cleanPayload); err != nil {
glog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err)
return err
}
// (8)
newDataDirPath := path.Join(w.targetDir, newDataDirName)
if err = os.Symlink(tsDirName, newDataDirPath); err != nil {
os.RemoveAll(tsDir)
glog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err)
return err
}
// (9)
if runtime.GOOS == "windows" {
os.Remove(dataDirPath)
err = os.Symlink(tsDirName, dataDirPath)
os.Remove(newDataDirPath)
} else {
err = os.Rename(newDataDirPath, dataDirPath)
}
if err != nil {
os.Remove(newDataDirPath)
os.RemoveAll(tsDir)
glog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err)
return err
}
// (10)
if err = w.removeUserVisiblePaths(pathsToRemove); err != nil {
glog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err)
return err
}
// (11)
if len(oldTsDir) > 0 {
if err = os.RemoveAll(oldTsPath); err != nil {
glog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err)
return err
}
}
return nil
}
// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned.
func validatePayload(payload map[string]FileProjection) (map[string]FileProjection, error) {
cleanPayload := make(map[string]FileProjection)
for k, content := range payload {
if err := validatePath(k); err != nil {
return nil, err
}
cleanPayload[path.Clean(k)] = content
}
return cleanPayload, nil
}
// validatePath validates a single path, returning an error if the path is
// invalid. paths may not:
//
// 1. be absolute
// 2. contain '..' as an element
// 3. start with '..'
// 4. contain filenames larger than 255 characters
// 5. be longer than 4096 characters
func validatePath(targetPath string) error {
// TODO: somehow unify this with the similar api validation,
// validateVolumeSourcePath; the error semantics are just different enough
// from this that it was time-prohibitive trying to find the right
// refactoring to re-use.
if targetPath == "" {
return fmt.Errorf("invalid path: must not be empty: %q", targetPath)
}
if path.IsAbs(targetPath) {
return fmt.Errorf("invalid path: must be relative path: %s", targetPath)
}
if len(targetPath) > maxPathLength {
return fmt.Errorf("invalid path: must be less than %d characters", maxPathLength)
}
items := strings.Split(targetPath, string(os.PathSeparator))
for _, item := range items {
if item == ".." {
return fmt.Errorf("invalid path: must not contain '..': %s", targetPath)
}
if len(item) > maxFileNameLength {
return fmt.Errorf("invalid path: filenames must be less than %d characters", maxFileNameLength)
}
}
if strings.HasPrefix(items[0], "..") && len(items[0]) > 2 {
return fmt.Errorf("invalid path: must not start with '..': %s", targetPath)
}
return nil
}
// shouldWritePayload returns whether the payload should be written to disk.
func shouldWritePayload(payload map[string]FileProjection, oldTsDir string) (bool, error) {
for userVisiblePath, fileProjection := range payload {
shouldWrite, err := shouldWriteFile(path.Join(oldTsDir, userVisiblePath), fileProjection.Data)
if err != nil {
return false, err
}
if shouldWrite {
return true, nil
}
}
return false, nil
}
// shouldWriteFile returns whether a new version of a file should be written to disk.
func shouldWriteFile(path string, content []byte) (bool, error) {
_, err := os.Lstat(path)
if os.IsNotExist(err) {
return true, nil
}
contentOnFs, err := ioutil.ReadFile(path)
if err != nil {
return false, err
}
return (bytes.Compare(content, contentOnFs) != 0), nil
}
// pathsToRemove walks the current version of the data directory and
// determines which paths should be removed (if any) after the payload is
// written to the target directory.
func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir string) (sets.String, error) {
paths := sets.NewString()
visitor := func(path string, info os.FileInfo, err error) error {
relativePath := strings.TrimPrefix(path, oldTsDir)
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
if relativePath == "" {
return nil
}
paths.Insert(relativePath)
return nil
}
err := filepath.Walk(oldTsDir, visitor)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, err
}
glog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List())
newPaths := sets.NewString()
for file := range payload {
// add all subpaths for the payload to the set of new paths
// to avoid attempting to remove non-empty dirs
for subPath := file; subPath != ""; {
newPaths.Insert(subPath)
subPath, _ = filepath.Split(subPath)
subPath = strings.TrimSuffix(subPath, string(os.PathSeparator))
}
}
glog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List())
result := paths.Difference(newPaths)
glog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result)
return result, nil
}
// newTimestampDir creates a new timestamp directory
func (w *AtomicWriter) newTimestampDir() (string, error) {
tsDir, err := ioutil.TempDir(w.targetDir, time.Now().UTC().Format("..2006_01_02_15_04_05."))
if err != nil {
glog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err)
return "", err
}
// 0755 permissions are needed to allow 'group' and 'other' to recurse the
// directory tree. do a chmod here to ensure that permissions are set correctly
// regardless of the process' umask.
err = os.Chmod(tsDir, 0755)
if err != nil {
glog.Errorf("%s: unable to set mode on new temp directory: %v", w.logContext, err)
return "", err
}
return tsDir, nil
}
// writePayloadToDir writes the given payload to the given directory. The
// directory must exist.
func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir string) error {
for userVisiblePath, fileProjection := range payload {
content := fileProjection.Data
mode := os.FileMode(fileProjection.Mode)
fullPath := path.Join(dir, userVisiblePath)
baseDir, _ := filepath.Split(fullPath)
err := os.MkdirAll(baseDir, os.ModePerm)
if err != nil {
glog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err)
return err
}
err = ioutil.WriteFile(fullPath, content, mode)
if err != nil {
glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
return err
}
// Chmod is needed because ioutil.WriteFile() ends up calling
// open(2) to create the file, so the final mode used is "mode &
// ~umask". But we want to make sure the specified mode is used
// in the file no matter what the umask is.
err = os.Chmod(fullPath, mode)
if err != nil {
glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
}
}
return nil
}
// createUserVisibleFiles creates the relative symlinks for all the
// files configured in the payload. If the directory in a file path does not
// exist, it is created.
//
// Viz:
// For files: "bar", "foo/bar", "baz/bar", "foo/baz/blah"
// the following symlinks are created:
// bar -> ..data/bar
// foo -> ..data/foo
// baz -> ..data/baz
func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection) error {
for userVisiblePath := range payload {
slashpos := strings.Index(userVisiblePath, string(os.PathSeparator))
if slashpos == -1 {
slashpos = len(userVisiblePath)
}
linkname := userVisiblePath[:slashpos]
_, err := os.Readlink(path.Join(w.targetDir, linkname))
if err != nil && os.IsNotExist(err) {
// The link into the data directory for this path doesn't exist; create it
visibleFile := path.Join(w.targetDir, linkname)
dataDirFile := path.Join(dataDirName, linkname)
err = os.Symlink(dataDirFile, visibleFile)
if err != nil {
return err
}
}
}
return nil
}
// removeUserVisiblePaths removes the set of paths from the user-visible
// portion of the writer's target directory.
func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error {
ps := string(os.PathSeparator)
var lasterr error
for p := range paths {
// only remove symlinks from the volume root directory (i.e. items that don't contain '/')
if strings.Contains(p, ps) {
continue
}
if err := os.Remove(path.Join(w.targetDir, p)); err != nil {
glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err)
lasterr = err
}
}
return lasterr
}

View file

@ -1,32 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
//DeviceUtil is a util for common device methods
type DeviceUtil interface {
FindMultipathDeviceForDevice(disk string) string
FindSlaveDevicesOnMultipath(disk string) []string
}
type deviceHandler struct {
get_io IoUtil
}
//NewDeviceHandler Create a new IoHandler implementation
func NewDeviceHandler(io IoUtil) DeviceUtil {
return &deviceHandler{get_io: io}
}

View file

@ -1,82 +0,0 @@
// +build linux
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
"path"
"strings"
)
// FindMultipathDeviceForDevice given a device name like /dev/sdx, find the devicemapper parent
func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
io := handler.get_io
disk, err := findDeviceForPath(device, io)
if err != nil {
return ""
}
sysPath := "/sys/block/"
if dirs, err := io.ReadDir(sysPath); err == nil {
for _, f := range dirs {
name := f.Name()
if strings.HasPrefix(name, "dm-") {
if _, err1 := io.Lstat(sysPath + name + "/slaves/" + disk); err1 == nil {
return "/dev/" + name
}
}
}
}
return ""
}
// findDeviceForPath Find the underlaying disk for a linked path such as /dev/disk/by-path/XXXX or /dev/mapper/XXXX
// will return sdX or hdX etc, if /dev/sdX is passed in then sdX will be returned
func findDeviceForPath(path string, io IoUtil) (string, error) {
devicePath, err := io.EvalSymlinks(path)
if err != nil {
return "", err
}
// if path /dev/hdX split into "", "dev", "hdX" then we will
// return just the last part
parts := strings.Split(devicePath, "/")
if len(parts) == 3 && strings.HasPrefix(parts[1], "dev") {
return parts[2], nil
}
return "", errors.New("Illegal path for device " + devicePath)
}
// FindSlaveDevicesOnMultipath given a dm name like /dev/dm-1, find all devices
// which are managed by the devicemapper dm-1.
func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string {
var devices []string
io := handler.get_io
// Split path /dev/dm-1 into "", "dev", "dm-1"
parts := strings.Split(dm, "/")
if len(parts) != 3 || !strings.HasPrefix(parts[1], "dev") {
return devices
}
disk := parts[2]
slavesPath := path.Join("/sys/block/", disk, "/slaves/")
if files, err := io.ReadDir(slavesPath); err == nil {
for _, f := range files {
devices = append(devices, path.Join("/dev/", f.Name()))
}
}
return devices
}

View file

@ -1,30 +0,0 @@
// +build !linux
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
// FindMultipathDeviceForDevice unsupported returns ""
func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
return ""
}
// FindSlaveDevicesOnMultipath unsupported returns ""
func (handler *deviceHandler) FindSlaveDevicesOnMultipath(disk string) []string {
out := []string{}
return out
}

View file

@ -1,41 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
k8stypes "k8s.io/apimachinery/pkg/types"
)
// This error on attach indicates volume is attached to a different node
// than we expected.
type DanglingAttachError struct {
msg string
CurrentNode k8stypes.NodeName
DevicePath string
}
func (err *DanglingAttachError) Error() string {
return err.msg
}
func NewDanglingError(msg string, node k8stypes.NodeName, devicePath string) error {
return &DanglingAttachError{
msg: msg,
CurrentNode: node,
DevicePath: devicePath,
}
}

View file

@ -1,25 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
const (
// Name of finalizer on PVCs that have a running pod.
PVCProtectionFinalizer = "kubernetes.io/pvc-protection"
// Name of finalizer on PVs that are bound by PVCs
PVProtectionFinalizer = "kubernetes.io/pv-protection"
)

View file

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package util
package fs
import (
"bytes"

View file

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package util
package fs
import (
"fmt"

View file

@ -1,47 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"io/ioutil"
"os"
"path/filepath"
)
// IoUtil is a mockable util for common IO operations
type IoUtil interface {
ReadDir(dirname string) ([]os.FileInfo, error)
Lstat(name string) (os.FileInfo, error)
EvalSymlinks(path string) (string, error)
}
type osIOHandler struct{}
//NewIOHandler Create a new IoHandler implementation
func NewIOHandler() IoUtil {
return &osIOHandler{}
}
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}
func (handler *osIOHandler) Lstat(name string) (os.FileInfo, error) {
return os.Lstat(name)
}
func (handler *osIOHandler) EvalSymlinks(path string) (string, error) {
return filepath.EvalSymlinks(path)
}

View file

@ -1,64 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"time"
"github.com/prometheus/client_golang/prometheus"
)
var storageOperationMetric = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "storage_operation_duration_seconds",
Help: "Storage operation duration",
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50},
},
[]string{"volume_plugin", "operation_name"},
)
var storageOperationErrorMetric = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "storage_operation_errors_total",
Help: "Storage operation errors",
},
[]string{"volume_plugin", "operation_name"},
)
func init() {
registerMetrics()
}
func registerMetrics() {
prometheus.MustRegister(storageOperationMetric)
prometheus.MustRegister(storageOperationErrorMetric)
}
// OperationCompleteHook returns a hook to call when an operation is completed
func OperationCompleteHook(plugin, operationName string) func(*error) {
requestTime := time.Now()
opComplete := func(err *error) {
timeTaken := time.Since(requestTime).Seconds()
// Create metric with operation name and plugin name
if *err != nil {
storageOperationErrorMetric.WithLabelValues(plugin, operationName).Inc()
} else {
storageOperationMetric.WithLabelValues(plugin, operationName).Observe(timeTaken)
}
}
return opComplete
}

View file

@ -0,0 +1,252 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package recyclerclient
import (
"fmt"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
)
type RecycleEventRecorder func(eventtype, message string)
// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume
// Recyclers. This function will save the given Pod to the API and watch it
// until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded,
// whichever comes first. An attempt to delete a recycler pod is always
// attempted before returning.
//
// In case there is a pod with the same namespace+name already running, this
// function deletes it as it is not able to judge if it is an old recycler
// or user has forged a fake recycler to block Kubernetes from recycling.//
//
// pod - the pod designed by a volume plugin to recycle the volume. pod.Name
// will be overwritten with unique name based on PV.Name.
// client - kube client for API operations.
func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error {
return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder))
}
// same as above func comments, except 'recyclerClient' is a narrower pod API
// interface to ease testing
func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error {
glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name)
// Generate unique name for the recycler pod - we need to get "already
// exists" error when a previous controller has already started recycling
// the volume. Here we assume that pv.Name is already unique.
pod.Name = "recycler-for-" + pvName
pod.GenerateName = ""
stopChannel := make(chan struct{})
defer close(stopChannel)
podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
if err != nil {
glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err)
return err
}
// Start the pod
_, err = recyclerClient.CreatePod(pod)
if err != nil {
if errors.IsAlreadyExists(err) {
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
if deleteErr != nil {
return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
}
// Recycler will try again and the old pod will be hopefully deleted
// at that time.
return fmt.Errorf("old recycler pod found, will retry later")
}
return fmt.Errorf("unexpected error creating recycler pod: %+v", err)
}
err = waitForPod(pod, recyclerClient, podCh)
// In all cases delete the recycler pod and log its result.
glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
if deleteErr != nil {
glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
// Returning recycler error is preferred, the pod will be deleted again on
// the next retry.
if err != nil {
return fmt.Errorf("failed to recycle volume: %s", err)
}
// Recycle succeeded but we failed to delete the recycler pod. Report it,
// the controller will re-try recycling the PV again shortly.
if deleteErr != nil {
return fmt.Errorf("failed to delete recycler pod: %s", deleteErr)
}
return nil
}
// waitForPod watches the pod it until it finishes and send all events on the
// pod to the PV.
func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.Event) error {
for {
event, ok := <-podCh
if !ok {
return fmt.Errorf("recycler pod %q watch channel had been closed", pod.Name)
}
switch event.Object.(type) {
case *v1.Pod:
// POD changed
pod := event.Object.(*v1.Pod)
glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
switch event.Type {
case watch.Added, watch.Modified:
if pod.Status.Phase == v1.PodSucceeded {
// Recycle succeeded.
return nil
}
if pod.Status.Phase == v1.PodFailed {
if pod.Status.Message != "" {
return fmt.Errorf(pod.Status.Message)
} else {
return fmt.Errorf("pod failed, pod.Status.Message unknown.")
}
}
case watch.Deleted:
return fmt.Errorf("recycler pod was deleted")
case watch.Error:
return fmt.Errorf("recycler pod watcher failed")
}
case *v1.Event:
// Event received
podEvent := event.Object.(*v1.Event)
glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
if event.Type == watch.Added {
recyclerClient.Event(podEvent.Type, podEvent.Message)
}
}
}
}
// recyclerClient abstracts access to a Pod by providing a narrower interface.
// This makes it easier to mock a client for testing.
type recyclerClient interface {
CreatePod(pod *v1.Pod) (*v1.Pod, error)
GetPod(name, namespace string) (*v1.Pod, error)
DeletePod(name, namespace string) error
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used
// to close the reflector backing the watch. The caller is responsible for
// derring a close on the channel to stop the reflector.
WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error)
// Event sends an event to the volume that is being recycled.
Event(eventtype, message string)
}
func newRecyclerClient(client clientset.Interface, recorder RecycleEventRecorder) recyclerClient {
return &realRecyclerClient{
client,
recorder,
}
}
type realRecyclerClient struct {
client clientset.Interface
recorder RecycleEventRecorder
}
func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
return c.client.CoreV1().Pods(pod.Namespace).Create(pod)
}
func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
}
func (c *realRecyclerClient) DeletePod(name, namespace string) error {
return c.client.CoreV1().Pods(namespace).Delete(name, nil)
}
func (c *realRecyclerClient) Event(eventtype, message string) {
c.recorder(eventtype, message)
}
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
podSelector, err := fields.ParseSelector("metadata.name=" + name)
if err != nil {
return nil, err
}
options := metav1.ListOptions{
FieldSelector: podSelector.String(),
Watch: true,
}
podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options)
if err != nil {
return nil, err
}
eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{
FieldSelector: eventSelector.String(),
Watch: true,
})
if err != nil {
podWatch.Stop()
return nil, err
}
eventCh := make(chan watch.Event, 30)
go func() {
defer eventWatch.Stop()
defer podWatch.Stop()
defer close(eventCh)
var podWatchChannelClosed bool
var eventWatchChannelClosed bool
for {
select {
case _ = <-stopChannel:
return
case podEvent, ok := <-podWatch.ResultChan():
if !ok {
podWatchChannelClosed = true
} else {
eventCh <- podEvent
}
case eventEvent, ok := <-eventWatch.ResultChan():
if !ok {
eventWatchChannelClosed = true
} else {
eventCh <- eventEvent
}
}
if podWatchChannelClosed && eventWatchChannelClosed {
break
}
}
}()
return eventCh, nil
}

View file

@ -1,125 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"encoding/json"
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
)
var (
knownResizeConditions map[v1.PersistentVolumeClaimConditionType]bool = map[v1.PersistentVolumeClaimConditionType]bool{
v1.PersistentVolumeClaimFileSystemResizePending: true,
v1.PersistentVolumeClaimResizing: true,
}
)
type resizeProcessStatus struct {
condition v1.PersistentVolumeClaimCondition
processed bool
}
// ClaimToClaimKey return namespace/name string for pvc
func ClaimToClaimKey(claim *v1.PersistentVolumeClaim) string {
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
}
// MarkFSResizeFinished marks file system resizing as done
func MarkFSResizeFinished(
pvc *v1.PersistentVolumeClaim,
capacity v1.ResourceList,
kubeClient clientset.Interface) error {
newPVC := pvc.DeepCopy()
newPVC.Status.Capacity = capacity
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{})
_, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
return err
}
// PatchPVCStatus updates PVC status using PATCH verb
func PatchPVCStatus(
oldPVC *v1.PersistentVolumeClaim,
newPVC *v1.PersistentVolumeClaim,
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
pvcName := oldPVC.Name
oldData, err := json.Marshal(oldPVC)
if err != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to marshal oldData for pvc %q with %v", pvcName, err)
}
newData, err := json.Marshal(newPVC)
if err != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to marshal newData for pvc %q with %v", pvcName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, oldPVC)
if err != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to CreateTwoWayMergePatch for pvc %q with %v ", pvcName, err)
}
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(oldPVC.Namespace).
Patch(pvcName, types.StrategicMergePatchType, patchBytes, "status")
if updateErr != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to patch PVC %q with %v", pvcName, updateErr)
}
return updatedClaim, nil
}
// MergeResizeConditionOnPVC updates pvc with requested resize conditions
// leaving other conditions untouched.
func MergeResizeConditionOnPVC(
pvc *v1.PersistentVolumeClaim,
resizeConditions []v1.PersistentVolumeClaimCondition) *v1.PersistentVolumeClaim {
resizeConditionMap := map[v1.PersistentVolumeClaimConditionType]*resizeProcessStatus{}
for _, condition := range resizeConditions {
resizeConditionMap[condition.Type] = &resizeProcessStatus{condition, false}
}
oldConditions := pvc.Status.Conditions
newConditions := []v1.PersistentVolumeClaimCondition{}
for _, condition := range oldConditions {
// If Condition is of not resize type, we keep it.
if _, ok := knownResizeConditions[condition.Type]; !ok {
newConditions = append(newConditions, condition)
continue
}
if newCondition, ok := resizeConditionMap[condition.Type]; ok {
if newCondition.condition.Status != condition.Status {
newConditions = append(newConditions, newCondition.condition)
} else {
newConditions = append(newConditions, condition)
}
newCondition.processed = true
}
}
// append all unprocessed conditions
for _, newCondition := range resizeConditionMap {
if !newCondition.processed {
newConditions = append(newConditions, newCondition.condition)
}
}
pvc.Status.Conditions = newConditions
return pvc
}

View file

@ -1,512 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"syscall"
"github.com/golang/glog"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/legacyscheme"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/util/mount"
)
const (
readyFileName = "ready"
losetupPath = "losetup"
ErrDeviceNotFound = "device not found"
ErrDeviceNotSupported = "device not supported"
)
// IsReady checks for the existence of a regular file
// called 'ready' in the given directory and returns
// true if that file exists.
func IsReady(dir string) bool {
readyFile := path.Join(dir, readyFileName)
s, err := os.Stat(readyFile)
if err != nil {
return false
}
if !s.Mode().IsRegular() {
glog.Errorf("ready-file is not a file: %s", readyFile)
return false
}
return true
}
// SetReady creates a file called 'ready' in the given
// directory. It logs an error if the file cannot be
// created.
func SetReady(dir string) {
if err := os.MkdirAll(dir, 0750); err != nil && !os.IsExist(err) {
glog.Errorf("Can't mkdir %s: %v", dir, err)
return
}
readyFile := path.Join(dir, readyFileName)
file, err := os.Create(readyFile)
if err != nil {
glog.Errorf("Can't touch %s: %v", readyFile, err)
return
}
file.Close()
}
// UnmountPath is a common unmount routine that unmounts the given path and
// deletes the remaining directory if successful.
func UnmountPath(mountPath string, mounter mount.Interface) error {
return UnmountMountPoint(mountPath, mounter, false /* extensiveMountPointCheck */)
}
// UnmountMountPoint is a common unmount routine that unmounts the given path and
// deletes the remaining directory if successful.
// if extensiveMountPointCheck is true
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
// IsNotMountPoint is more expensive but properly handles bind mounts.
func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error {
pathExists, pathErr := PathExists(mountPath)
if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
return nil
}
corruptedMnt := isCorruptedMnt(pathErr)
if pathErr != nil && !corruptedMnt {
return fmt.Errorf("Error checking path: %v", pathErr)
}
return doUnmountMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt)
}
// doUnmountMountPoint is a common unmount routine that unmounts the given path and
// deletes the remaining directory if successful.
// if extensiveMountPointCheck is true
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
// IsNotMountPoint is more expensive but properly handles bind mounts.
// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, Take it as an argument for convenience of testing
func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool, corruptedMnt bool) error {
if !corruptedMnt {
var notMnt bool
var err error
if extensiveMountPointCheck {
notMnt, err = mount.IsNotMountPoint(mounter, mountPath)
} else {
notMnt, err = mounter.IsLikelyNotMountPoint(mountPath)
}
if err != nil {
return err
}
if notMnt {
glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
return os.Remove(mountPath)
}
}
// Unmount the mount path
glog.V(4).Infof("%q is a mountpoint, unmounting", mountPath)
if err := mounter.Unmount(mountPath); err != nil {
return err
}
notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath)
if mntErr != nil {
return mntErr
}
if notMnt {
glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
return os.Remove(mountPath)
}
return fmt.Errorf("Failed to unmount path %v", mountPath)
}
// PathExists returns true if the specified path exists.
func PathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
} else if os.IsNotExist(err) {
return false, nil
} else if isCorruptedMnt(err) {
return true, err
} else {
return false, err
}
}
// isCorruptedMnt return true if err is about corrupted mount point
func isCorruptedMnt(err error) bool {
if err == nil {
return false
}
var underlyingError error
switch pe := err.(type) {
case nil:
return false
case *os.PathError:
underlyingError = pe.Err
case *os.LinkError:
underlyingError = pe.Err
case *os.SyscallError:
underlyingError = pe.Err
}
return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE
}
// GetSecretForPod locates secret by name in the pod's namespace and returns secret map
func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) {
secret := make(map[string]string)
if kubeClient == nil {
return secret, fmt.Errorf("Cannot get kube client")
}
secrets, err := kubeClient.CoreV1().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{})
if err != nil {
return secret, err
}
for name, data := range secrets.Data {
secret[name] = string(data)
}
return secret, nil
}
// GetSecretForPV locates secret by name and namespace, verifies the secret type, and returns secret map
func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeClient clientset.Interface) (map[string]string, error) {
secret := make(map[string]string)
if kubeClient == nil {
return secret, fmt.Errorf("Cannot get kube client")
}
secrets, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{})
if err != nil {
return secret, err
}
if secrets.Type != v1.SecretType(volumePluginName) {
return secret, fmt.Errorf("Cannot get secret of type %s", volumePluginName)
}
for name, data := range secrets.Data {
secret[name] = string(data)
}
return secret, nil
}
func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) (*storage.StorageClass, error) {
if kubeClient == nil {
return nil, fmt.Errorf("Cannot get kube client")
}
className := v1helper.GetPersistentVolumeClass(pv)
if className == "" {
return nil, fmt.Errorf("Volume has no storage class")
}
class, err := kubeClient.StorageV1().StorageClasses().Get(className, metav1.GetOptions{})
if err != nil {
return nil, err
}
return class, nil
}
// CheckNodeAffinity looks at the PV node affinity, and checks if the node has the same corresponding labels
// This ensures that we don't mount a volume that doesn't belong to this node
func CheckNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) error {
affinity, err := v1helper.GetStorageNodeAffinityFromAnnotation(pv.Annotations)
if err != nil {
return fmt.Errorf("Error getting storage node affinity: %v", err)
}
if affinity == nil {
return nil
}
if affinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
terms := affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
glog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", terms)
for _, term := range terms {
selector, err := v1helper.NodeSelectorRequirementsAsSelector(term.MatchExpressions)
if err != nil {
return fmt.Errorf("Failed to parse MatchExpressions: %v", err)
}
if !selector.Matches(labels.Set(nodeLabels)) {
return fmt.Errorf("NodeSelectorTerm %+v does not match node labels", term.MatchExpressions)
}
}
}
return nil
}
// LoadPodFromFile will read, decode, and return a Pod from a file.
func LoadPodFromFile(filePath string) (*v1.Pod, error) {
if filePath == "" {
return nil, fmt.Errorf("file path not specified")
}
podDef, err := ioutil.ReadFile(filePath)
if err != nil {
return nil, fmt.Errorf("failed to read file path %s: %+v", filePath, err)
}
if len(podDef) == 0 {
return nil, fmt.Errorf("file was empty: %s", filePath)
}
pod := &v1.Pod{}
codec := legacyscheme.Codecs.UniversalDecoder()
if err := runtime.DecodeInto(codec, podDef, pod); err != nil {
return nil, fmt.Errorf("failed decoding file: %v", err)
}
return pod, nil
}
func ZonesSetToLabelValue(strSet sets.String) string {
return strings.Join(strSet.UnsortedList(), kubeletapis.LabelMultiZoneDelimiter)
}
// ZonesToSet converts a string containing a comma separated list of zones to set
func ZonesToSet(zonesString string) (sets.String, error) {
return stringToSet(zonesString, ",")
}
// LabelZonesToSet converts a PV label value from string containing a delimited list of zones to set
func LabelZonesToSet(labelZonesValue string) (sets.String, error) {
return stringToSet(labelZonesValue, kubeletapis.LabelMultiZoneDelimiter)
}
// StringToSet converts a string containing list separated by specified delimiter to to a set
func stringToSet(str, delimiter string) (sets.String, error) {
zonesSlice := strings.Split(str, delimiter)
zonesSet := make(sets.String)
for _, zone := range zonesSlice {
trimmedZone := strings.TrimSpace(zone)
if trimmedZone == "" {
return make(sets.String), fmt.Errorf(
"%q separated list (%q) must not contain an empty string",
delimiter,
str)
}
zonesSet.Insert(trimmedZone)
}
return zonesSet, nil
}
// BlockVolumePathHandler defines a set of operations for handling block volume-related operations
type BlockVolumePathHandler interface {
// MapDevice creates a symbolic link to block device under specified map path
MapDevice(devicePath string, mapPath string, linkName string) error
// UnmapDevice removes a symbolic link to block device under specified map path
UnmapDevice(mapPath string, linkName string) error
// RemovePath removes a file or directory on specified map path
RemoveMapPath(mapPath string) error
// IsSymlinkExist returns true if specified symbolic link exists
IsSymlinkExist(mapPath string) (bool, error)
// GetDeviceSymlinkRefs searches symbolic links under global map path
GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error)
// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath
// corresponding to map path symlink, and then return global map path with pod uuid.
FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
AttachFileDevice(path string) (string, error)
// GetLoopDevice returns the full path to the loop device associated with the given path.
GetLoopDevice(path string) (string, error)
// RemoveLoopDevice removes specified loopback device
RemoveLoopDevice(device string) error
}
// NewBlockVolumePathHandler returns a new instance of BlockVolumeHandler.
func NewBlockVolumePathHandler() BlockVolumePathHandler {
var volumePathHandler VolumePathHandler
return volumePathHandler
}
// VolumePathHandler is path related operation handlers for block volume
type VolumePathHandler struct {
}
// MapDevice creates a symbolic link to block device under specified map path
func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string) error {
// Example of global map path:
// globalMapPath/linkName: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{podUid}
// linkName: {podUid}
//
// Example of pod device map path:
// podDeviceMapPath/linkName: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
// linkName: {volumeName}
if len(devicePath) == 0 {
return fmt.Errorf("Failed to map device to map path. devicePath is empty")
}
if len(mapPath) == 0 {
return fmt.Errorf("Failed to map device to map path. mapPath is empty")
}
if !filepath.IsAbs(mapPath) {
return fmt.Errorf("The map path should be absolute: map path: %s", mapPath)
}
glog.V(5).Infof("MapDevice: devicePath %s", devicePath)
glog.V(5).Infof("MapDevice: mapPath %s", mapPath)
glog.V(5).Infof("MapDevice: linkName %s", linkName)
// Check and create mapPath
_, err := os.Stat(mapPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate map path: %s", mapPath)
return err
}
if err = os.MkdirAll(mapPath, 0750); err != nil {
return fmt.Errorf("Failed to mkdir %s, error %v", mapPath, err)
}
// Remove old symbolic link(or file) then create new one.
// This should be done because current symbolic link is
// stale across node reboot.
linkPath := path.Join(mapPath, string(linkName))
if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
return err
}
err = os.Symlink(devicePath, linkPath)
return err
}
// UnmapDevice removes a symbolic link associated to block device under specified map path
func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error {
if len(mapPath) == 0 {
return fmt.Errorf("Failed to unmap device from map path. mapPath is empty")
}
glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath)
glog.V(5).Infof("UnmapDevice: linkName %s", linkName)
// Check symbolic link exists
linkPath := path.Join(mapPath, string(linkName))
if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil {
return checkErr
} else if !islinkExist {
glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath)
return nil
}
err := os.Remove(linkPath)
return err
}
// RemoveMapPath removes a file or directory on specified map path
func (v VolumePathHandler) RemoveMapPath(mapPath string) error {
if len(mapPath) == 0 {
return fmt.Errorf("Failed to remove map path. mapPath is empty")
}
glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath)
err := os.RemoveAll(mapPath)
if err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// IsSymlinkExist returns true if specified file exists and the type is symbolik link.
// If file doesn't exist, or file exists but not symbolick link, return false with no error.
// On other cases, return false with error from Lstat().
func (v VolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) {
fi, err := os.Lstat(mapPath)
if err == nil {
// If file exits and it's symbolick link, return true and no error
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
return true, nil
}
// If file exits but it's not symbolick link, return fale and no error
return false, nil
}
// If file doesn't exist, return false and no error
if os.IsNotExist(err) {
return false, nil
}
// Return error from Lstat()
return false, err
}
// GetDeviceSymlinkRefs searches symbolic links under global map path
func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) {
var refs []string
files, err := ioutil.ReadDir(mapPath)
if err != nil {
return nil, fmt.Errorf("Directory cannot read %v", err)
}
for _, file := range files {
if file.Mode()&os.ModeSymlink != os.ModeSymlink {
continue
}
filename := file.Name()
filepath, err := os.Readlink(path.Join(mapPath, filename))
if err != nil {
return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err)
}
glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath)
if filepath == devPath {
refs = append(refs, path.Join(mapPath, filename))
}
}
glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs)
return refs, nil
}
// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath
// corresponding to map path symlink, and then return global map path with pod uuid.
// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX
// globalMapPath/{pod uuid}: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX
func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) {
var globalMapPathUUID string
// Find symbolic link named pod uuid under plugin dir
err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) {
glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath)
if res, err := compareSymlinks(path, mapPath); err == nil && res {
globalMapPathUUID = path
}
}
return nil
})
if err != nil {
return "", err
}
glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID)
// Return path contains global map path + {pod uuid}
return globalMapPathUUID, nil
}
func compareSymlinks(global, pod string) (bool, error) {
devGlobal, err := os.Readlink(global)
if err != nil {
return false, err
}
devPod, err := os.Readlink(pod)
if err != nil {
return false, err
}
glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod)
if devGlobal == devPod {
return true, nil
}
return false, nil
}

View file

@ -1,108 +0,0 @@
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
"fmt"
"os"
"os/exec"
"strings"
"github.com/golang/glog"
)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
blockDevicePath, err := v.GetLoopDevice(path)
if err != nil && err.Error() != ErrDeviceNotFound {
return "", err
}
// If no existing loop device for the path, create one
if blockDevicePath == "" {
glog.V(4).Infof("Creating device for path: %s", path)
blockDevicePath, err = makeLoopDevice(path)
if err != nil {
return "", err
}
}
return blockDevicePath, nil
}
// GetLoopDevice returns the full path to the loop device associated with the given path.
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
_, err := os.Stat(path)
if os.IsNotExist(err) {
return "", errors.New(ErrDeviceNotFound)
}
if err != nil {
return "", fmt.Errorf("not attachable: %v", err)
}
args := []string{"-j", path}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
glog.V(2).Infof("Failed device discover command for path %s: %v", path, err)
return "", err
}
return parseLosetupOutputForDevice(out)
}
func makeLoopDevice(path string) (string, error) {
args := []string{"-f", "--show", path}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
glog.V(2).Infof("Failed device create command for path %s: %v", path, err)
return "", err
}
return parseLosetupOutputForDevice(out)
}
// RemoveLoopDevice removes specified loopback device
func (v VolumePathHandler) RemoveLoopDevice(device string) error {
args := []string{"-d", device}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
if _, err := os.Stat(device); os.IsNotExist(err) {
return nil
}
glog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out)
return err
}
return nil
}
func parseLosetupOutputForDevice(output []byte) (string, error) {
if len(output) == 0 {
return "", errors.New(ErrDeviceNotFound)
}
// losetup returns device in the format:
// /dev/loop1: [0073]:148662 (/dev/sda)
device := strings.TrimSpace(strings.SplitN(string(output), ":", 2)[0])
if len(device) == 0 {
return "", errors.New(ErrDeviceNotFound)
}
return device, nil
}

View file

@ -1,39 +0,0 @@
// +build !linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
return "", fmt.Errorf("AttachFileDevice not supported for this build.")
}
// GetLoopDevice returns the full path to the loop device associated with the given path.
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
return "", fmt.Errorf("GetLoopDevice not supported for this build.")
}
// RemoveLoopDevice removes specified loopback device
func (v VolumePathHandler) RemoveLoopDevice(device string) error {
return fmt.Errorf("RemoveLoopDevice not supported for this build.")
}