Update code for latest k8s

Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
Mrunal Patel 2018-02-12 12:13:07 -08:00
parent 8f5e37a83c
commit 5f7ac28059
792 changed files with 25023 additions and 19841 deletions

10
vendor/k8s.io/kubernetes/README.md generated vendored
View file

@ -7,7 +7,7 @@
----
Kubernetes is an open source system for managing [containerized applications]
across multiple hosts, providing basic mechanisms for deployment, maintenance,
across multiple hosts; providing basic mechanisms for deployment, maintenance,
and scaling of applications.
Kubernetes builds upon a decade and a half of experience at Google running
@ -55,11 +55,11 @@ $ cd kubernetes
$ make quick-release
```
If you are less impatient, head over to the [developer's documentation].
For the full story, head over to the [developer's documentation].
## Support
If you need support, start with the [troubleshooting guide]
If you need support, start with the [troubleshooting guide],
and work your way through the process that we've outlined.
That said, if you have questions, reach out to us
@ -71,7 +71,7 @@ That said, if you have questions, reach out to us
[communication]: https://github.com/kubernetes/community/blob/master/communication.md
[community repository]: https://github.com/kubernetes/community
[containerized applications]: https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/
[developer's documentation]: https://github.com/kubernetes/community/tree/master/contributors/devel
[developer's documentation]: https://github.com/kubernetes/community/tree/master/contributors/devel#readme
[Docker environment]: https://docs.docker.com/engine
[Go environment]: https://golang.org/doc/install
[GoDoc]: https://godoc.org/k8s.io/kubernetes
@ -81,6 +81,6 @@ That said, if you have questions, reach out to us
[Scalable Microservices with Kubernetes]: https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615
[Submit Queue]: http://submit-queue.k8s.io/#/ci
[Submit Queue Widget]: http://submit-queue.k8s.io/health.svg?v=1
[troubleshooting guide]: https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/
[troubleshooting guide]: https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/README.md?pixel)]()

View file

@ -238,7 +238,7 @@ func IsPodReady(pod *v1.Pod) bool {
return IsPodReadyConditionTrue(pod.Status)
}
// IsPodReady retruns true if a pod is ready; false otherwise.
// IsPodReady returns true if a pod is ready; false otherwise.
func IsPodReadyConditionTrue(status v1.PodStatus) bool {
condition := GetPodReadyCondition(status)
return condition != nil && condition.Status == v1.ConditionTrue

View file

@ -116,7 +116,8 @@ var (
// MetricSpec specifies how to scale based on a single metric
// (only `type` and one other matching field should be set at once).
type MetricSpec struct {
// Type is the type of metric source. It should match one of the fields below.
// Type is the type of metric source. It should be one of "Object",
// "Pods" or "Resource", each mapping to a matching field in the object.
Type MetricSourceType
// Object refers to a metric describing a single kubernetes object
@ -261,7 +262,8 @@ type HorizontalPodAutoscalerCondition struct {
// MetricStatus describes the last-read state of a single metric.
type MetricStatus struct {
// Type is the type of metric source. It will match one of the fields below.
// Type is the type of metric source. It will be one of "Object",
// "Pods" or "Resource", each corresponds to a matching field in the object.
Type MetricSourceType
// Object refers to a metric describing a single kubernetes object

View file

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View file

@ -254,46 +254,18 @@ func IsIntegerResourceName(str string) bool {
return integerResources.Has(str) || IsExtendedResourceName(core.ResourceName(str))
}
// Extended and HugePages resources
func IsScalarResourceName(name core.ResourceName) bool {
return IsExtendedResourceName(name) || IsHugePageResourceName(name)
}
// this function aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here
func IsServiceIPSet(service *core.Service) bool {
return service.Spec.ClusterIP != core.ClusterIPNone && service.Spec.ClusterIP != ""
}
// this function aims to check if the service's cluster IP is requested or not
func IsServiceIPRequested(service *core.Service) bool {
// ExternalName services are CNAME aliases to external ones. Ignore the IP.
if service.Spec.Type == core.ServiceTypeExternalName {
return false
}
return service.Spec.ClusterIP == ""
}
var standardFinalizers = sets.NewString(
string(core.FinalizerKubernetes),
metav1.FinalizerOrphanDependents,
metav1.FinalizerDeleteDependents,
)
// HasAnnotation returns a bool if passed in annotation exists
func HasAnnotation(obj core.ObjectMeta, ann string) bool {
_, found := obj.Annotations[ann]
return found
}
// SetMetaDataAnnotation sets the annotation and value
func SetMetaDataAnnotation(obj *core.ObjectMeta, ann string, value string) {
if obj.Annotations == nil {
obj.Annotations = make(map[string]string)
}
obj.Annotations[ann] = value
}
func IsStandardFinalizerName(str string) bool {
return standardFinalizers.Has(str)
}
@ -482,37 +454,6 @@ func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool
return true
}
// TolerationToleratesTaint checks if the toleration tolerates the taint.
func TolerationToleratesTaint(toleration *core.Toleration, taint *core.Taint) bool {
if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect {
return false
}
if toleration.Key != taint.Key {
return false
}
// TODO: Use proper defaulting when Toleration becomes a field of PodSpec
if (len(toleration.Operator) == 0 || toleration.Operator == core.TolerationOpEqual) && toleration.Value == taint.Value {
return true
}
if toleration.Operator == core.TolerationOpExists {
return true
}
return false
}
// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations.
func TaintToleratedByTolerations(taint *core.Taint, tolerations []core.Toleration) bool {
tolerated := false
for i := range tolerations {
if TolerationToleratesTaint(&tolerations[i], taint) {
tolerated = true
break
}
}
return tolerated
}
// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations
// and converts it to the []Taint type in core.
func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) {

View file

@ -269,7 +269,7 @@ type VolumeSource struct {
Quobyte *QuobyteVolumeSource
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
// provisioned/attached using an exec based plugin.
// +optional
FlexVolume *FlexVolumeSource
@ -352,9 +352,9 @@ type PersistentVolumeSource struct {
// +optional
ISCSI *ISCSIPersistentVolumeSource
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
// provisioned/attached using an exec based plugin.
// +optional
FlexVolume *FlexVolumeSource
FlexVolume *FlexPersistentVolumeSource
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// +optional
Cinder *CinderVolumeSource
@ -475,6 +475,7 @@ type PersistentVolumeReclaimPolicy string
const (
// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
// The volume plugin must support Recycling.
// DEPRECATED: The PersistentVolumeReclaimRecycle called Recycle is being deprecated. See announcement here: https://groups.google.com/forum/#!topic/kubernetes-dev/uexugCza84I
PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
// The volume plugin must support Deletion.
@ -576,6 +577,8 @@ type PersistentVolumeClaimConditionType string
const (
// An user trigger resize of pvc has been started
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
)
type PersistentVolumeClaimCondition struct {
@ -867,8 +870,34 @@ type FCVolumeSource struct {
WWIDs []string
}
// FlexPersistentVolumeSource represents a generic persistent volume resource that is
// provisioned/attached using an exec based plugin.
type FlexPersistentVolumeSource struct {
// Driver is the name of the driver to use for this volume.
Driver string
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string
// Optional: SecretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *SecretReference
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool
// Optional: Extra driver options if any.
// +optional
Options map[string]string
}
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
// provisioned/attached using an exec based plugin.
type FlexVolumeSource struct {
// Driver is the name of the driver to use for this volume.
Driver string
@ -1589,6 +1618,12 @@ type CSIPersistentVolumeSource struct {
// Defaults to false (read/write).
// +optional
ReadOnly bool
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string
}
// ContainerPort represents a network port in a single container
@ -2338,13 +2373,13 @@ type PodAffinityTerm struct {
LabelSelector *metav1.LabelSelector
// namespaces specifies which namespaces the labelSelector applies to (matches against);
// null or empty list means "this pod's namespace"
// +optional
Namespaces []string
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
// whose value of the label with key topologyKey matches that of any node on which any of the
// selected pods is running.
// Empty topologyKey is not allowed.
// +optional
TopologyKey string
}
@ -2542,9 +2577,10 @@ type PodSpec struct {
// file if specified. This is only valid for non-hostNetwork pods.
// +optional
HostAliases []HostAlias
// If specified, indicates the pod's priority. "SYSTEM" is a special keyword
// which indicates the highest priority. Any other name must be defined by
// creating a PriorityClass object with that name.
// If specified, indicates the pod's priority. "system-node-critical" and
// "system-cluster-critical" are two special keywords which indicate the
// highest priorities with the former being the highest priority. Any other
// name must be defined by creating a PriorityClass object with that name.
// If not specified, the pod priority will be default or zero if there is no
// default.
// +optional
@ -2692,6 +2728,13 @@ type PodStatus struct {
// A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'
// +optional
Reason string
// nominatedNodeName is set when this pod preempts other pods on the node, but it cannot be
// scheduled right away as preemption victims receive their graceful termination periods.
// This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide
// to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to
// give the resources on this node to a higher priority pod that is created after preemption.
// +optional
NominatedNodeName string
// +optional
HostIP string
@ -4022,7 +4065,7 @@ type Event struct {
type EventSeries struct {
// Number of occurrences in this series up to the last heartbeat time
Count int32
// Time of the last occurence observed
// Time of the last occurrence observed
LastObservedTime metav1.MicroTime
// State of this Series: Ongoing or Finished
State EventSeriesState
@ -4357,8 +4400,21 @@ type ConfigMap struct {
// Data contains the configuration data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// Values with non-UTF-8 byte sequences must use the BinaryData field.
// The keys stored in Data must not overlap with the keys in
// the BinaryData field, this is enforced during validation process.
// +optional
Data map[string]string
// BinaryData contains the binary data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// BinaryData can contain byte sequences that are not in the UTF-8 range.
// The keys stored in BinaryData must not overlap with the ones in
// the Data field, this is enforced during validation process.
// Using this field will require 1.10+ apiserver and
// kubelet.
// +optional
BinaryData map[string][]byte
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View file

@ -163,7 +163,8 @@ func addConversionFuncs(scheme *runtime.Scheme) error {
"spec.restartPolicy",
"spec.schedulerName",
"status.phase",
"status.podIP":
"status.podIP",
"status.nominatedNodeName":
return label, value, nil
// This is for backwards compatibility with old v1 clients which send spec.host
case "spec.host":

View file

@ -89,15 +89,6 @@ func IsServiceIPSet(service *v1.Service) bool {
return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != ""
}
// this function aims to check if the service's cluster IP is requested or not
func IsServiceIPRequested(service *v1.Service) bool {
// ExternalName services are CNAME aliases to external ones. Ignore the IP.
if service.Spec.Type == v1.ServiceTypeExternalName {
return false
}
return service.Spec.ClusterIP == ""
}
// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice,
// only if they do not already exist
func AddToNodeAddresses(addresses *[]v1.NodeAddress, addAddresses ...v1.NodeAddress) {
@ -416,20 +407,6 @@ func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string {
return ""
}
// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field.
func PersistentVolumeClaimHasClass(claim *v1.PersistentVolumeClaim) bool {
// Use beta annotation first
if _, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found {
return true
}
if claim.Spec.StorageClassName != nil {
return true
}
return false
}
// GetStorageNodeAffinityFromAnnotation gets the json serialized data from PersistentVolume.Annotations
// and converts it to the NodeAffinity type in api.
// TODO: update when storage node affinity graduates to beta

View file

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -21,6 +21,8 @@ limitations under the License.
package v1
import (
unsafe "unsafe"
v1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -28,7 +30,6 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
core "k8s.io/kubernetes/pkg/apis/core"
unsafe "unsafe"
)
func init() {
@ -141,6 +142,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_core_ExecAction_To_v1_ExecAction,
Convert_v1_FCVolumeSource_To_core_FCVolumeSource,
Convert_core_FCVolumeSource_To_v1_FCVolumeSource,
Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource,
Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource,
Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource,
Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource,
Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource,
@ -616,6 +619,7 @@ func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(
out.Driver = in.Driver
out.VolumeHandle = in.VolumeHandle
out.ReadOnly = in.ReadOnly
out.FSType = in.FSType
return nil
}
@ -628,6 +632,7 @@ func autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(
out.Driver = in.Driver
out.VolumeHandle = in.VolumeHandle
out.ReadOnly = in.ReadOnly
out.FSType = in.FSType
return nil
}
@ -835,6 +840,7 @@ func Convert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.Compone
func autoConvert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data))
out.BinaryData = *(*map[string][]byte)(unsafe.Pointer(&in.BinaryData))
return nil
}
@ -846,6 +852,7 @@ func Convert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMa
func autoConvert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data))
out.BinaryData = *(*map[string][]byte)(unsafe.Pointer(&in.BinaryData))
return nil
}
@ -1758,6 +1765,34 @@ func Convert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, o
return autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in, out, s)
}
func autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *v1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.FSType = in.FSType
out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options))
return nil
}
// Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *v1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in, out, s)
}
func autoConvert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *v1.FlexPersistentVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.FSType = in.FSType
out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options))
return nil
}
// Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource is an autogenerated conversion function.
func Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *v1.FlexPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in, out, s)
}
func autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.FSType = in.FSType
@ -3250,7 +3285,7 @@ func autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1
out.CephFS = (*core.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS))
out.FC = (*core.FCVolumeSource)(unsafe.Pointer(in.FC))
out.Flocker = (*core.FlockerVolumeSource)(unsafe.Pointer(in.Flocker))
out.FlexVolume = (*core.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume))
out.FlexVolume = (*core.FlexPersistentVolumeSource)(unsafe.Pointer(in.FlexVolume))
out.AzureFile = (*core.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile))
out.VsphereVolume = (*core.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume))
out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte))
@ -3278,7 +3313,7 @@ func autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *co
out.RBD = (*v1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD))
out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte))
out.ISCSI = (*v1.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI))
out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume))
out.FlexVolume = (*v1.FlexPersistentVolumeSource)(unsafe.Pointer(in.FlexVolume))
out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder))
out.CephFS = (*v1.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS))
out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC))
@ -3907,6 +3942,7 @@ func autoConvert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodS
out.Conditions = *(*[]core.PodCondition)(unsafe.Pointer(&in.Conditions))
out.Message = in.Message
out.Reason = in.Reason
out.NominatedNodeName = in.NominatedNodeName
out.HostIP = in.HostIP
out.PodIP = in.PodIP
out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime))
@ -3926,6 +3962,7 @@ func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodS
out.Conditions = *(*[]v1.PodCondition)(unsafe.Pointer(&in.Conditions))
out.Message = in.Message
out.Reason = in.Reason
out.NominatedNodeName = in.NominatedNodeName
out.HostIP = in.HostIP
out.PodIP = in.PodIP
out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime))

View file

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View file

@ -51,6 +51,7 @@ import (
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/security/apparmor"
)
@ -63,6 +64,8 @@ const isInvalidQuotaResource string = `must be a standard resource for quota`
const fieldImmutableErrorMsg string = apimachineryvalidation.FieldImmutableErrorMsg
const isNotIntegerErrorMsg string = `must be an integer`
const isNotPositiveErrorMsg string = `must be greater than zero`
const csiDriverNameRexpErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
const csiDriverNameRexpFmt string = `^[a-zA-Z0-9][-a-zA-Z0-9_.]{0,61}[a-zA-Z-0-9]$`
var pdPartitionErrorMsg string = validation.InclusiveRangeError(1, 255)
var fileModeErrorMsg string = "must be a number between 0 and 0777 (octal), both inclusive"
@ -74,6 +77,8 @@ var iscsiInitiatorIqnRegex = regexp.MustCompile(`iqn\.\d{4}-\d{2}\.([[:alnum:]-.
var iscsiInitiatorEuiRegex = regexp.MustCompile(`^eui.[[:alnum:]]{16}$`)
var iscsiInitiatorNaaRegex = regexp.MustCompile(`^naa.[[:alnum:]]{32}$`)
var csiDriverNameRexp = regexp.MustCompile(csiDriverNameRexpFmt)
// ValidateHasLabel requires that metav1.ObjectMeta has a Label with key and expectedValue
func ValidateHasLabel(meta metav1.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList {
allErrs := field.ErrorList{}
@ -201,10 +206,6 @@ func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath
return allErrs
}
func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList {
return apimachineryvalidation.ValidateOwnerReferences(ownerReferences, fldPath)
}
// ValidateNameFunc validates that the provided name is valid for a given resource type.
// Not all resources have the same validation rules for names. Prefix is true
// if the name will have a value appended to it. If the name is not valid,
@ -212,15 +213,6 @@ func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *f
// value that were not valid. Otherwise this returns an empty list or nil.
type ValidateNameFunc apimachineryvalidation.ValidateNameFunc
// maskTrailingDash replaces the final character of a string with a subdomain safe
// value if is a dash.
func maskTrailingDash(name string) string {
if strings.HasSuffix(name, "-") {
return name[:len(name)-2] + "a"
}
return name
}
// ValidatePodName can be used to check whether the given pod name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
@ -291,11 +283,6 @@ func NameIsDNSSubdomain(name string, prefix bool) []string {
return apimachineryvalidation.NameIsDNSSubdomain(name, prefix)
}
// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label.
func NameIsDNSLabel(name string, prefix bool) []string {
return apimachineryvalidation.NameIsDNSLabel(name, prefix)
}
// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label.
func NameIsDNS1035Label(name string, prefix bool) []string {
return apimachineryvalidation.NameIsDNS1035Label(name, prefix)
@ -361,10 +348,6 @@ func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *fiel
return allErrs
}
func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList {
return apimachineryvalidation.ValidateNoNewFinalizers(newFinalizers, oldFinalizers, fldPath)
}
func ValidateVolumes(volumes []core.Volume, fldPath *field.Path) (map[string]core.VolumeSource, field.ErrorList) {
allErrs := field.ErrorList{}
@ -1257,6 +1240,27 @@ func validateFlexVolumeSource(fv *core.FlexVolumeSource, fldPath *field.Path) fi
return allErrs
}
func validateFlexPersistentVolumeSource(fv *core.FlexPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(fv.Driver) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("driver"), ""))
}
// Make sure user-specified options don't use kubernetes namespaces
for k := range fv.Options {
namespace := k
if parts := strings.SplitN(k, "/", 2); len(parts) == 2 {
namespace = parts[0]
}
normalized := "." + strings.ToLower(namespace)
if strings.HasSuffix(normalized, ".kubernetes.io") || strings.HasSuffix(normalized, ".k8s.io") {
allErrs = append(allErrs, field.Invalid(fldPath.Child("options").Key(k), k, "kubernetes.io and k8s.io namespaces are reserved"))
}
}
return allErrs
}
func validateAzureFile(azure *core.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if azure.SecretName == "" {
@ -1433,6 +1437,14 @@ func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldP
allErrs = append(allErrs, field.Required(fldPath.Child("driver"), ""))
}
if len(csi.Driver) > 63 {
allErrs = append(allErrs, field.TooLong(fldPath.Child("driver"), csi.Driver, 63))
}
if !csiDriverNameRexp.MatchString(csi.Driver) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("driver"), csi.Driver, validation.RegexError(csiDriverNameRexpErrMsg, csiDriverNameRexpFmt, "csi-hostpath")))
}
if len(csi.VolumeHandle) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), ""))
}
@ -1588,7 +1600,7 @@ func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList {
}
if pv.Spec.FlexVolume != nil {
numVolumes++
allErrs = append(allErrs, validateFlexVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...)
allErrs = append(allErrs, validateFlexPersistentVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...)
}
if pv.Spec.AzureFile != nil {
if numVolumes > 0 {
@ -1773,27 +1785,36 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld
func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc)...)
newPvcClone := newPvc.DeepCopy()
oldPvcClone := oldPvc.DeepCopy()
// PVController needs to update PVC.Spec w/ VolumeName.
// Claims are immutable in order to enforce quota, range limits, etc. without gaming the system.
if len(oldPvc.Spec.VolumeName) == 0 {
// volumeName changes are allowed once.
// Reset back to empty string after equality check
oldPvc.Spec.VolumeName = newPvc.Spec.VolumeName
defer func() { oldPvc.Spec.VolumeName = "" }()
oldPvcClone.Spec.VolumeName = newPvcClone.Spec.VolumeName
}
if validateStorageClassUpgrade(oldPvcClone.Annotations, newPvcClone.Annotations,
oldPvcClone.Spec.StorageClassName, newPvcClone.Spec.StorageClassName) {
newPvcClone.Spec.StorageClassName = nil
metav1.SetMetaDataAnnotation(&newPvcClone.ObjectMeta, core.BetaStorageClassAnnotation, oldPvcClone.Annotations[core.BetaStorageClassAnnotation])
} else {
// storageclass annotation should be immutable after creation
// TODO: remove Beta when no longer needed
allErrs = append(allErrs, ValidateImmutableAnnotation(newPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], oldPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], v1.BetaStorageClassAnnotation, field.NewPath("metadata"))...)
}
if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) {
newPVCSpecCopy := newPvc.Spec.DeepCopy()
// lets make sure storage values are same.
if newPvc.Status.Phase == core.ClaimBound && newPVCSpecCopy.Resources.Requests != nil {
newPVCSpecCopy.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"]
if newPvc.Status.Phase == core.ClaimBound && newPvcClone.Spec.Resources.Requests != nil {
newPvcClone.Spec.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"]
}
oldSize := oldPvc.Spec.Resources.Requests["storage"]
newSize := newPvc.Spec.Resources.Requests["storage"]
if !apiequality.Semantic.DeepEqual(*newPVCSpecCopy, oldPvc.Spec) {
if !apiequality.Semantic.DeepEqual(newPvcClone.Spec, oldPvcClone.Spec) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "is immutable after creation except resources.requests for bound claims"))
}
if newSize.Cmp(oldSize) < 0 {
@ -1803,23 +1824,33 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeCl
} else {
// changes to Spec are not allowed, but updates to label/and some annotations are OK.
// no-op updates pass validation.
if !apiequality.Semantic.DeepEqual(newPvc.Spec, oldPvc.Spec) {
if !apiequality.Semantic.DeepEqual(newPvcClone.Spec, oldPvcClone.Spec) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "field is immutable after creation"))
}
}
// storageclass annotation should be immutable after creation
// TODO: remove Beta when no longer needed
allErrs = append(allErrs, ValidateImmutableAnnotation(newPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], oldPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], v1.BetaStorageClassAnnotation, field.NewPath("metadata"))...)
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
allErrs = append(allErrs, ValidateImmutableField(newPvc.Spec.VolumeMode, oldPvc.Spec.VolumeMode, field.NewPath("volumeMode"))...)
}
newPvc.Status = oldPvc.Status
return allErrs
}
// Provide an upgrade path from PVC with storage class specified in beta
// annotation to storage class specified in attribute. We allow update of
// StorageClassName only if following four conditions are met at the same time:
// 1. The old pvc's StorageClassAnnotation is set
// 2. The old pvc's StorageClassName is not set
// 3. The new pvc's StorageClassName is set and equal to the old value in annotation
// 4. If the new pvc's StorageClassAnnotation is set,it must be equal to the old pv/pvc's StorageClassAnnotation
func validateStorageClassUpgrade(oldAnnotations, newAnnotations map[string]string, oldScName, newScName *string) bool {
oldSc, oldAnnotationExist := oldAnnotations[core.BetaStorageClassAnnotation]
newScInAnnotation, newAnnotationExist := newAnnotations[core.BetaStorageClassAnnotation]
return oldAnnotationExist /* condition 1 */ &&
oldScName == nil /* condition 2*/ &&
(newScName != nil && *newScName == oldSc) /* condition 3 */ &&
(!newAnnotationExist || newScInAnnotation == oldSc) /* condition 4 */
}
// ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim
func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
@ -2647,19 +2678,8 @@ func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorL
allErrs := field.ErrorList{}
if affinity != nil {
if na := affinity.NodeAffinity; na != nil {
// TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented.
// if na.RequiredDuringSchedulingRequiredDuringExecution != nil {
// allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
// }
if na.RequiredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
}
if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
}
if affinity.NodeAffinity != nil {
allErrs = append(allErrs, validateNodeAffinity(affinity.NodeAffinity, fldPath.Child("nodeAffinity"))...)
}
if affinity.PodAffinity != nil {
allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, fldPath.Child("podAffinity"))...)
@ -3076,6 +3096,22 @@ func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, fldPath *fie
return allErrs
}
// validateNodeAffinity tests that the specified nodeAffinity fields have valid data
func validateNodeAffinity(na *core.NodeAffinity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented.
// if na.RequiredDuringSchedulingRequiredDuringExecution != nil {
// allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
// }
if na.RequiredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
}
if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
}
return allErrs
}
// validatePodAffinity tests that the specified podAffinity fields have valid data
func validatePodAffinity(podAffinity *core.PodAffinity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
@ -3322,6 +3358,31 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod) field.ErrorList {
return allErrs
}
// ValidateContainerStateTransition test to if any illegal container state transitions are being attempted
func ValidateContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldpath *field.Path, restartPolicy core.RestartPolicy) field.ErrorList {
allErrs := field.ErrorList{}
// If we should always restart, containers are allowed to leave the terminated state
if restartPolicy == core.RestartPolicyAlways {
return allErrs
}
for i, oldStatus := range oldStatuses {
// Skip any container that is not terminated
if oldStatus.State.Terminated == nil {
continue
}
// Skip any container that failed but is allowed to restart
if oldStatus.State.Terminated.ExitCode != 0 && restartPolicy == core.RestartPolicyOnFailure {
continue
}
for _, newStatus := range newStatuses {
if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil {
allErrs = append(allErrs, field.Forbidden(fldpath.Index(i).Child("state"), "may not be transitioned to non-terminated state"))
}
}
}
return allErrs
}
// ValidatePodStatusUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
// that cannot be changed.
func ValidatePodStatusUpdate(newPod, oldPod *core.Pod) field.ErrorList {
@ -3329,10 +3390,22 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...)
fldPath = field.NewPath("status")
if newPod.Spec.NodeName != oldPod.Spec.NodeName {
allErrs = append(allErrs, field.Forbidden(field.NewPath("status", "nodeName"), "may not be changed directly"))
allErrs = append(allErrs, field.Forbidden(fldPath.Child("nodeName"), "may not be changed directly"))
}
if newPod.Status.NominatedNodeName != oldPod.Status.NominatedNodeName && len(newPod.Status.NominatedNodeName) > 0 {
for _, msg := range ValidateNodeName(newPod.Status.NominatedNodeName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("nominatedNodeName"), newPod.Status.NominatedNodeName, msg))
}
}
// If pod should not restart, make sure the status update does not transition
// any terminated containers to a non-terminated state.
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec.RestartPolicy)...)
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), oldPod.Spec.RestartPolicy)...)
// For status update we ignore changes to pod spec.
newPod.Spec = oldPod.Spec
@ -3390,9 +3463,9 @@ func ValidateService(service *core.Service) field.ErrorList {
// This is a workaround for broken cloud environments that
// over-open firewalls. Hopefully it can go away when more clouds
// understand containers better.
if port.Port == 10250 {
if port.Port == ports.KubeletPort {
portPath := specPath.Child("ports").Index(ix)
allErrs = append(allErrs, field.Invalid(portPath, port.Port, "may not expose port 10250 externally since it is used by kubelet"))
allErrs = append(allErrs, field.Invalid(portPath, port.Port, fmt.Sprintf("may not expose port %v externally since it is used by kubelet", ports.KubeletPort)))
}
}
if service.Spec.ClusterIP == "None" {
@ -3404,7 +3477,7 @@ func ValidateService(service *core.Service) field.ErrorList {
}
case core.ServiceTypeExternalName:
if service.Spec.ClusterIP != "" {
allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty for ExternalName services"))
allErrs = append(allErrs, field.Forbidden(specPath.Child("clusterIP"), "must be empty for ExternalName services"))
}
if len(service.Spec.ExternalName) > 0 {
allErrs = append(allErrs, ValidateDNS1123Subdomain(service.Spec.ExternalName, specPath.Child("externalName"))...)
@ -3483,7 +3556,7 @@ func ValidateService(service *core.Service) field.ErrorList {
for i := range service.Spec.Ports {
portPath := portsPath.Index(i)
if service.Spec.Ports[i].NodePort != 0 {
allErrs = append(allErrs, field.Invalid(portPath.Child("nodePort"), service.Spec.Ports[i].NodePort, "may not be used when `type` is 'ClusterIP'"))
allErrs = append(allErrs, field.Forbidden(portPath.Child("nodePort"), "may not be used when `type` is 'ClusterIP'"))
}
}
}
@ -3533,7 +3606,7 @@ func ValidateService(service *core.Service) field.ErrorList {
val = service.Annotations[core.AnnotationLoadBalancerSourceRangesKey]
}
if service.Spec.Type != core.ServiceTypeLoadBalancer {
allErrs = append(allErrs, field.Invalid(fieldPath, "", "may only be used when `type` is 'LoadBalancer'"))
allErrs = append(allErrs, field.Forbidden(fieldPath, "may only be used when `type` is 'LoadBalancer'"))
}
_, err := apiservice.GetLoadBalancerSourceRanges(service)
if err != nil {
@ -4310,10 +4383,22 @@ func ValidateConfigMap(cfg *core.ConfigMap) field.ErrorList {
for _, msg := range validation.IsConfigMapKey(key) {
allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg))
}
// check if we have a duplicate key in the other bag
if _, isValue := cfg.BinaryData[key]; isValue {
msg := "duplicate of key present in binaryData"
allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg))
}
totalSize += len(value)
}
for key, value := range cfg.BinaryData {
for _, msg := range validation.IsConfigMapKey(key) {
allErrs = append(allErrs, field.Invalid(field.NewPath("binaryData").Key(key), key, msg))
}
totalSize += len(value)
}
if totalSize > core.MaxSecretSize {
allErrs = append(allErrs, field.TooLong(field.NewPath("data"), "", core.MaxSecretSize))
// pass back "" to indicate that the error refers to the whole object.
allErrs = append(allErrs, field.TooLong(field.NewPath(""), cfg, core.MaxSecretSize))
}
return allErrs
@ -4340,7 +4425,13 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa
allErrs := field.ErrorList{}
limPath := fldPath.Child("limits")
reqPath := fldPath.Child("requests")
limContainsCpuOrMemory := false
reqContainsCpuOrMemory := false
limContainsHugePages := false
reqContainsHugePages := false
supportedQoSComputeResources := sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
for resourceName, quantity := range requirements.Limits {
fldPath := limPath.Key(string(resourceName))
// Validate resource name.
allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...)
@ -4351,10 +4442,17 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa
if resourceName == core.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
allErrs = append(allErrs, field.Forbidden(limPath, "ResourceEphemeralStorage field disabled by feature-gate for ResourceRequirements"))
}
if helper.IsHugePageResourceName(resourceName) && !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) {
allErrs = append(allErrs, field.Forbidden(limPath, fmt.Sprintf("%s field disabled by feature-gate for ResourceRequirements", resourceName)))
if helper.IsHugePageResourceName(resourceName) {
if !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) {
allErrs = append(allErrs, field.Forbidden(limPath, fmt.Sprintf("%s field disabled by feature-gate for ResourceRequirements", resourceName)))
} else {
limContainsHugePages = true
}
}
if supportedQoSComputeResources.Has(string(resourceName)) {
limContainsCpuOrMemory = true
}
}
for resourceName, quantity := range requirements.Requests {
fldPath := reqPath.Key(string(resourceName))
@ -4366,15 +4464,25 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa
// Check that request <= limit.
limitQuantity, exists := requirements.Limits[resourceName]
if exists {
// For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal.
// For non overcommitable resources, not only requests can't exceed limits, they also can't be lower, i.e. must be equal.
if quantity.Cmp(limitQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName)))
} else if quantity.Cmp(limitQuantity) > 0 {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName)))
}
} else if resourceName == core.ResourceNvidiaGPU {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s request", core.ResourceNvidiaGPU)))
} else if !helper.IsOvercommitAllowed(resourceName) {
allErrs = append(allErrs, field.Required(limPath, "Limit must be set for non overcommitable resources"))
}
if helper.IsHugePageResourceName(resourceName) {
reqContainsHugePages = true
}
if supportedQoSComputeResources.Has(string(resourceName)) {
reqContainsCpuOrMemory = true
}
}
if !limContainsCpuOrMemory && !reqContainsCpuOrMemory && (reqContainsHugePages || limContainsHugePages) {
allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("HugePages require cpu or memory")))
}
return allErrs
@ -4744,8 +4852,8 @@ func ValidateSecurityContext(sc *core.SecurityContext, fldPath *field.Path) fiel
}
if sc.RunAsUser != nil {
if *sc.RunAsUser < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, isNegativeErrorMsg))
for _, msg := range validation.IsValidUserID(*sc.RunAsUser) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, msg))
}
}
@ -4858,3 +4966,13 @@ func ValidateCIDR(cidr string) (*net.IPNet, error) {
}
return net, nil
}
func IsDecremented(update, old *int32) bool {
if update == nil && old != nil {
return true
}
if update == nil || old == nil {
return false
}
return *update < *old
}

View file

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -483,6 +483,18 @@ func (in *ConfigMap) DeepCopyInto(out *ConfigMap) {
(*out)[key] = val
}
}
if in.BinaryData != nil {
in, out := &in.BinaryData, &out.BinaryData
*out = make(map[string][]byte, len(*in))
for key, val := range *in {
if val == nil {
(*out)[key] = nil
} else {
(*out)[key] = make([]byte, len(val))
copy((*out)[key], val)
}
}
}
return
}
@ -1547,6 +1559,38 @@ func (in *FCVolumeSource) DeepCopy() *FCVolumeSource {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
if in.Options != nil {
in, out := &in.Options, &out.Options
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource.
func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource {
if in == nil {
return nil
}
out := new(FlexPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) {
*out = *in
@ -3152,7 +3196,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
if *in == nil {
*out = nil
} else {
*out = new(FlexVolumeSource)
*out = new(FlexPersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
}

View file

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View file

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View file

@ -17,6 +17,7 @@ limitations under the License.
package cloudprovider
import (
"context"
"errors"
"fmt"
"strings"
@ -44,8 +45,6 @@ type Interface interface {
Routes() (Routes, bool)
// ProviderName returns the cloud provider ID.
ProviderName() string
// ScrubDNS provides an opportunity for cloud-provider-specific code to process DNS settings for pods.
ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string)
// HasClusterID returns true if a ClusterID is required and set
HasClusterID() bool
}
@ -58,9 +57,9 @@ type InformerUser interface {
// Clusters is an abstract, pluggable interface for clusters of containers.
type Clusters interface {
// ListClusters lists the names of the available clusters.
ListClusters() ([]string, error)
ListClusters(ctx context.Context) ([]string, error)
// Master gets back the address (either DNS name or IP address) of the master node for the cluster.
Master(clusterName string) (string, error)
Master(ctx context.Context, clusterName string) (string, error)
}
// TODO(#6812): Use a shorter name that's less likely to be longer than cloud
@ -77,12 +76,12 @@ func GetLoadBalancerName(service *v1.Service) string {
}
// GetInstanceProviderID builds a ProviderID for a node in a cloud.
func GetInstanceProviderID(cloud Interface, nodeName types.NodeName) (string, error) {
func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types.NodeName) (string, error) {
instances, ok := cloud.Instances()
if !ok {
return "", fmt.Errorf("failed to get instances from cloud provider")
}
instanceID, err := instances.InstanceID(nodeName)
instanceID, err := instances.InstanceID(ctx, nodeName)
if err != nil {
return "", fmt.Errorf("failed to get instance ID from cloud provider: %v", err)
}
@ -96,17 +95,17 @@ type LoadBalancer interface {
// if so, what its status is.
// Implementations must treat the *v1.Service parameter as read-only and not modify it.
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
GetLoadBalancer(clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error)
GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error)
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
// Implementations must treat the *v1.Service and *v1.Node
// parameters as read-only and not modify them.
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error)
EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error)
// UpdateLoadBalancer updates hosts under the specified load balancer.
// Implementations must treat the *v1.Service and *v1.Node
// parameters as read-only and not modify them.
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error
UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error
// EnsureLoadBalancerDeleted deletes the specified load balancer if it
// exists, returning nil if the load balancer specified either didn't exist or
// was successfully deleted.
@ -115,7 +114,7 @@ type LoadBalancer interface {
// doesn't exist even if some part of it is still laying around.
// Implementations must treat the *v1.Service parameter as read-only and not modify it.
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error
EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error
}
// Instances is an abstract, pluggable interface for sets of instances.
@ -124,31 +123,31 @@ type Instances interface {
// TODO(roberthbailey): This currently is only used in such a way that it
// returns the address of the calling instance. We should do a rename to
// make this clearer.
NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error)
NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error)
// NodeAddressesByProviderID returns the addresses of the specified instance.
// The instance is specified using the providerID of the node. The
// ProviderID is a unique identifier of the node. This will not be called
// from the node whose nodeaddresses are being queried. i.e. local metadata
// services cannot be used in this method to obtain nodeaddresses
NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error)
NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error)
// ExternalID returns the cloud provider ID of the node with the specified NodeName.
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
ExternalID(nodeName types.NodeName) (string, error)
ExternalID(ctx context.Context, nodeName types.NodeName) (string, error)
// InstanceID returns the cloud provider ID of the node with the specified NodeName.
InstanceID(nodeName types.NodeName) (string, error)
InstanceID(ctx context.Context, nodeName types.NodeName) (string, error)
// InstanceType returns the type of the specified instance.
InstanceType(name types.NodeName) (string, error)
InstanceType(ctx context.Context, name types.NodeName) (string, error)
// InstanceTypeByProviderID returns the type of the specified instance.
InstanceTypeByProviderID(providerID string) (string, error)
InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error)
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
AddSSHKeyToAllInstances(user string, keyData []byte) error
AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error
// CurrentNodeName returns the name of the node we are currently running on
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
CurrentNodeName(hostname string) (types.NodeName, error)
CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error)
// InstanceExistsByProviderID returns true if the instance for the given provider id still is running.
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
InstanceExistsByProviderID(providerID string) (bool, error)
InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error)
}
// Route is a representation of an advanced routing rule.
@ -169,14 +168,14 @@ type Route struct {
// Routes is an abstract, pluggable interface for advanced routing rules.
type Routes interface {
// ListRoutes lists all managed routes that belong to the specified clusterName
ListRoutes(clusterName string) ([]*Route, error)
ListRoutes(ctx context.Context, clusterName string) ([]*Route, error)
// CreateRoute creates the described managed route
// route.Name will be ignored, although the cloud-provider may use nameHint
// to create a more user-meaningful name.
CreateRoute(clusterName string, nameHint string, route *Route) error
CreateRoute(ctx context.Context, clusterName string, nameHint string, route *Route) error
// DeleteRoute deletes the specified managed route
// Route should be as returned by ListRoutes
DeleteRoute(clusterName string, route *Route) error
DeleteRoute(ctx context.Context, clusterName string, route *Route) error
}
var (
@ -194,23 +193,23 @@ type Zone struct {
// Zones is an abstract, pluggable interface for zone enumeration.
type Zones interface {
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
// In most cases, this method is called from the kubelet querying a local metadata service to aquire its zone.
// In most cases, this method is called from the kubelet querying a local metadata service to acquire its zone.
// For the case of external cloud providers, use GetZoneByProviderID or GetZoneByNodeName since GetZone
// can no longer be called from the kubelets.
GetZone() (Zone, error)
GetZone(ctx context.Context) (Zone, error)
// GetZoneByProviderID returns the Zone containing the current zone and locality region of the node specified by providerId
// This method is particularly used in the context of external cloud providers where node initialization must be down
// outside the kubelets.
GetZoneByProviderID(providerID string) (Zone, error)
GetZoneByProviderID(ctx context.Context, providerID string) (Zone, error)
// GetZoneByNodeName returns the Zone containing the current zone and locality region of the node specified by node name
// This method is particularly used in the context of external cloud providers where node initialization must be down
// outside the kubelets.
GetZoneByNodeName(nodeName types.NodeName) (Zone, error)
GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (Zone, error)
}
// PVLabeler is an abstract, pluggable interface for fetching labels for volumes
type PVLabeler interface {
GetLabelsForVolume(pv *v1.PersistentVolume) (map[string]string, error)
GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error)
}

View file

@ -60,23 +60,11 @@ func IsCloudProvider(name string) bool {
return found
}
// CloudProviders returns the name of all registered cloud providers in a
// string slice
func CloudProviders() []string {
names := []string{}
providersMutex.Lock()
defer providersMutex.Unlock()
for name := range providers {
names = append(names, name)
}
return names
}
// GetCloudProvider creates an instance of the named cloud provider, or nil if
// the name is unknown. The error return is only used if the named provider
// was known but failed to initialize. The config parameter specifies the
// io.Reader handler of the configuration file for the cloud provider, or nil
// for no configuation.
// for no configuration.
func GetCloudProvider(name string, config io.Reader) (Interface, error) {
providersMutex.Lock()
defer providersMutex.Unlock()

View file

@ -429,7 +429,7 @@ func NewControllerRevisionControllerRefManager(
// * Adopt orphans if the selector matches.
// * Release owned objects if the selector no longer matches.
//
// A non-nil error is returned if some form of reconciliation was attemped and
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//

View file

@ -64,7 +64,7 @@ const (
// latency/pod at the scale of 3000 pods over 100 nodes.
ExpectationsTimeout = 5 * time.Minute
// When batching pod creates, SlowStartInitialBatchSize is the size of the
// inital batch. The size of each successive batch is twice the size of
// initial batch. The size of each successive batch is twice the size of
// the previous batch. For example, for a value of 1, batch sizes would be
// 1, 2, 4, 8, ... and for a value of 10, batch sizes would be
// 10, 20, 40, 80, ... Setting the value higher means that quota denials

View file

@ -33,18 +33,10 @@ const (
// beta: v1.4
AppArmor utilfeature.Feature = "AppArmor"
// owner: @girishkalele
// alpha: v1.4
ExternalTrafficLocalOnly utilfeature.Feature = "AllowExtTrafficLocalEndpoints"
// owner: @mtaufen
// alpha: v1.4
DynamicKubeletConfig utilfeature.Feature = "DynamicKubeletConfig"
// owner: @mtaufen
// alpha: v1.8
KubeletConfigFile utilfeature.Feature = "KubeletConfigFile"
// owner: @pweil-
// alpha: v1.5
//
@ -64,6 +56,8 @@ const (
// owner: @vishh
// alpha: v1.6
//
// This is deprecated and will be removed in v1.11. Use DevicePlugins instead.
//
// Enables support for GPUs as a schedulable resource.
// Only Nvidia GPUs are supported as of v1.6.
// Works only with Docker Container Runtime.
@ -116,11 +110,17 @@ const (
ExpandPersistentVolumes utilfeature.Feature = "ExpandPersistentVolumes"
// owner: @verb
// alpha: v1.8
// alpha: v1.10
//
// Allows running a "debug container" in a pod namespaces to troubleshoot a running pod.
DebugContainers utilfeature.Feature = "DebugContainers"
// owner: @verb
// alpha: v1.10
//
// Allows all containers in a pod to share a process namespace.
PodShareProcessNamespace utilfeature.Feature = "PodShareProcessNamespace"
// owner: @bsalamat
// alpha: v1.8
//
@ -141,7 +141,7 @@ const (
TaintNodesByCondition utilfeature.Feature = "TaintNodesByCondition"
// owner: @jsafrane
// alpha: v1.8
// beta: v1.10
//
// Enable mount propagation of volumes.
MountPropagation utilfeature.Feature = "MountPropagation"
@ -153,17 +153,24 @@ const (
CPUManager utilfeature.Feature = "CPUManager"
// owner: @derekwaynecarr
// alpha: v1.8
// beta: v1.10
//
// Enable pods to consume pre-allocated huge pages of varying page sizes
HugePages utilfeature.Feature = "HugePages"
// owner @brendandburns
// alpha: v1.8
// alpha: v1.9
//
// Enable nodes to exclude themselves from service load balancers
ServiceNodeExclusion utilfeature.Feature = "ServiceNodeExclusion"
// owner @brendandburns
// deprecated: v1.10
//
// Enable the service proxy to contact external IP addresses. Note this feature is present
// only for backward compatibility, it will be removed in the 1.11 release.
ServiceProxyAllowExternalIPs utilfeature.Feature = "ServiceProxyAllowExternalIPs"
// owner: @jsafrane
// alpha: v1.9
//
@ -196,10 +203,10 @@ const (
BlockVolume utilfeature.Feature = "BlockVolume"
// owner: @pospispa
//
// alpha: v1.9
// Postpone deletion of a persistent volume claim in case it is used by a pod
PVCProtection utilfeature.Feature = "PVCProtection"
//
// Postpone deletion of a PV or a PVC when they are being used
StorageProtection utilfeature.Feature = "StorageProtection"
// owner: @aveshagarwal
// alpha: v1.9
@ -212,6 +219,25 @@ const (
//
// Implement IPVS-based in-cluster service load balancing
SupportIPVSProxyMode utilfeature.Feature = "SupportIPVSProxyMode"
// owner: @dims
// alpha: v1.10
//
// Implement support for limiting pids in pods
SupportPodPidsLimit utilfeature.Feature = "SupportPodPidsLimit"
// owner: @feiskyer
// alpha: v1.10
//
// Enable Hyper-V containers on Windows
HyperVContainer utilfeature.Feature = "HyperVContainer"
// owner: @joelsmith
// deprecated: v1.10
//
// Mount secret, configMap, downwardAPI and projected volumes ReadOnly. Note: this feature
// gate is present only for backward compatibility, it will be removed in the 1.11 release.
ReadOnlyAPIDataVolumes utilfeature.Feature = "ReadOnlyAPIDataVolumes"
)
func init() {
@ -222,10 +248,8 @@ func init() {
// To add a new feature, define a key for it above and add it here. The features will be
// available throughout Kubernetes binaries.
var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{
ExternalTrafficLocalOnly: {Default: true, PreRelease: utilfeature.GA},
AppArmor: {Default: true, PreRelease: utilfeature.Beta},
DynamicKubeletConfig: {Default: false, PreRelease: utilfeature.Alpha},
KubeletConfigFile: {Default: false, PreRelease: utilfeature.Alpha},
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta},
ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha},
Accelerators: {Default: false, PreRelease: utilfeature.Alpha},
@ -235,23 +259,26 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
RotateKubeletClientCertificate: {Default: true, PreRelease: utilfeature.Beta},
PersistentLocalVolumes: {Default: false, PreRelease: utilfeature.Alpha},
LocalStorageCapacityIsolation: {Default: false, PreRelease: utilfeature.Alpha},
HugePages: {Default: false, PreRelease: utilfeature.Alpha},
HugePages: {Default: true, PreRelease: utilfeature.Beta},
DebugContainers: {Default: false, PreRelease: utilfeature.Alpha},
PodShareProcessNamespace: {Default: false, PreRelease: utilfeature.Alpha},
PodPriority: {Default: false, PreRelease: utilfeature.Alpha},
EnableEquivalenceClassCache: {Default: false, PreRelease: utilfeature.Alpha},
TaintNodesByCondition: {Default: false, PreRelease: utilfeature.Alpha},
MountPropagation: {Default: false, PreRelease: utilfeature.Alpha},
MountPropagation: {Default: true, PreRelease: utilfeature.Beta},
ExpandPersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha},
CPUManager: {Default: false, PreRelease: utilfeature.Alpha},
CPUManager: {Default: true, PreRelease: utilfeature.Beta},
ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha},
MountContainers: {Default: false, PreRelease: utilfeature.Alpha},
VolumeScheduling: {Default: false, PreRelease: utilfeature.Alpha},
CSIPersistentVolume: {Default: false, PreRelease: utilfeature.Alpha},
CSIPersistentVolume: {Default: true, PreRelease: utilfeature.Beta},
CustomPodDNS: {Default: false, PreRelease: utilfeature.Alpha},
BlockVolume: {Default: false, PreRelease: utilfeature.Alpha},
PVCProtection: {Default: false, PreRelease: utilfeature.Alpha},
StorageProtection: {Default: false, PreRelease: utilfeature.Alpha},
ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha},
SupportIPVSProxyMode: {Default: false, PreRelease: utilfeature.Beta},
SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha},
HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha},
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
@ -264,4 +291,8 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
// inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta},
// features that enable backwards compatibility but are scheduled to be removed
ServiceProxyAllowExternalIPs: {Default: false, PreRelease: utilfeature.Deprecated},
ReadOnlyAPIDataVolumes: {Default: true, PreRelease: utilfeature.Deprecated},
}

View file

@ -1,7 +1,8 @@
// To regenerate api.pb.go run hack/update-generated-runtime.sh
syntax = 'proto3';
package runtime;
package runtime.v1alpha2;
option go_package = "v1alpha2";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
@ -63,6 +64,10 @@ service RuntimeService {
rpc ContainerStatus(ContainerStatusRequest) returns (ContainerStatusResponse) {}
// UpdateContainerResources updates ContainerConfig of the container.
rpc UpdateContainerResources(UpdateContainerResourcesRequest) returns (UpdateContainerResourcesResponse) {}
// ReopenContainerLog asks runtime to reopen the stdout/stderr log file
// for the container. This is often called after the log file has been
// rotated.
rpc ReopenContainerLog(ReopenContainerLogRequest) returns (ReopenContainerLogResponse) {}
// ExecSync runs a command in a container synchronously.
rpc ExecSync(ExecSyncRequest) returns (ExecSyncResponse) {}
@ -174,14 +179,39 @@ message Mount {
MountPropagation propagation = 5;
}
// A NamespaceMode describes the intended namespace configuration for each
// of the namespaces (Network, PID, IPC) in NamespaceOption. Runtimes should
// map these modes as appropriate for the technology underlying the runtime.
enum NamespaceMode {
// A POD namespace is common to all containers in a pod.
// For example, a container with a PID namespace of POD expects to view
// all of the processes in all of the containers in the pod.
POD = 0;
// A CONTAINER namespace is restricted to a single container.
// For example, a container with a PID namespace of CONTAINER expects to
// view only the processes in that container.
CONTAINER = 1;
// A NODE namespace is the namespace of the Kubernetes node.
// For example, a container with a PID namespace of NODE expects to view
// all of the processes on the host running the kubelet.
NODE = 2;
}
// NamespaceOption provides options for Linux namespaces.
message NamespaceOption {
// If set, use the host's network namespace.
bool host_network = 1;
// If set, use the host's PID namespace.
bool host_pid = 2;
// If set, use the host's IPC namespace.
bool host_ipc = 3;
// Network namespace for this container/sandbox.
// Note: There is currently no way to set CONTAINER scoped network in the Kubernetes API.
// Namespaces currently set by the kubelet: POD, NODE
NamespaceMode network = 1;
// PID namespace for this container/sandbox.
// Note: The CRI default is POD, but the v1.PodSpec default is CONTAINER.
// The kubelet's runtime manager will set this to CONTAINER explicitly for v1 pods.
// Namespaces currently set by the kubelet: POD, CONTAINER, NODE
NamespaceMode pid = 2;
// IPC namespace for this container/sandbox.
// Note: There is currently no way to set CONTAINER scoped IPC in the Kubernetes API.
// Namespaces currently set by the kubelet: POD, NODE
NamespaceMode ipc = 3;
}
// Int64Value is the wrapper of int64.
@ -556,6 +586,26 @@ message LinuxContainerConfig {
LinuxContainerSecurityContext security_context = 2;
}
// WindowsContainerConfig contains platform-specific configuration for
// Windows-based containers.
message WindowsContainerConfig {
// Resources specification for the container.
WindowsContainerResources resources = 1;
}
// WindowsContainerResources specifies Windows specific configuration for
// resources.
message WindowsContainerResources {
// CPU shares (relative weight vs. other containers). Default: 0 (not specified).
int64 cpu_shares = 1;
// Number of CPUs available to the container. Default: 0 (not specified).
int64 cpu_count = 2;
// Specifies the portion of processor cycles that this container can use as a percentage times 100.
int64 cpu_maximum = 3;
// Memory limit in bytes. Default: 0 (not specified).
int64 memory_limit_in_bytes = 4;
}
// ContainerMetadata holds all necessary information for building the container
// name. The container runtime is encouraged to expose the metadata in its user
// interface for better user experience. E.g., runtime can construct a unique
@ -643,6 +693,8 @@ message ContainerConfig {
// Configuration specific to Linux containers.
LinuxContainerConfig linux = 15;
// Configuration specific to Windows containers.
WindowsContainerConfig windows = 16;
}
message CreateContainerRequest {
@ -1051,18 +1103,18 @@ message UInt64Value {
uint64 value = 1;
}
// StorageIdentifier uniquely identify the storage..
message StorageIdentifier{
// UUID of the device.
string uuid = 1;
// FilesystemIdentifier uniquely identify the filesystem.
message FilesystemIdentifier{
// Mountpoint of a filesystem.
string mountpoint = 1;
}
// FilesystemUsage provides the filesystem usage information.
message FilesystemUsage {
// Timestamp in nanoseconds at which the information were collected. Must be > 0.
int64 timestamp = 1;
// The underlying storage of the filesystem.
StorageIdentifier storage_id = 2;
// The unique identifier of the filesystem.
FilesystemIdentifier fs_id = 2;
// UsedBytes represents the bytes used for images on the filesystem.
// This may differ from the total bytes used on the filesystem and may not
// equal CapacityBytes - AvailableBytes.
@ -1153,3 +1205,11 @@ message MemoryUsage {
// The amount of working set memory in bytes.
UInt64Value working_set_bytes = 2;
}
message ReopenContainerLogRequest {
// ID of the container for which to reopen the log.
string container_id = 1;
}
message ReopenContainerLogResponse{
}

View file

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
package v1alpha2
// This file contains all constants defined in CRI.
@ -38,7 +38,7 @@ const (
// LogTag is the tag of a log line in CRI container log.
// Currently defined log tags:
// * First tag: Partial/End - P/E.
// * First tag: Partial/Full - P/F.
// The field in the container log format can be extended to include multiple
// tags by using a delimiter, but changes should be rare. If it becomes clear
// that better extensibility is desired, a more extensible format (e.g., json)

View file

@ -58,7 +58,7 @@ type realContainerGC struct {
// Policy for garbage collection.
policy ContainerGCPolicy
// sourcesReadyProvider provides the readyness of kubelet configuration sources.
// sourcesReadyProvider provides the readiness of kubelet configuration sources.
sourcesReadyProvider SourcesReadyProvider
}

View file

@ -31,7 +31,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
hashutil "k8s.io/kubernetes/pkg/util/hash"
@ -48,7 +48,7 @@ type HandlerRunner interface {
type RuntimeHelper interface {
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, err error)
GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error)
// GetPodCgroupParent returns the CgroupName identifer, and its literal cgroupfs form on the host
// GetPodCgroupParent returns the CgroupName identifier, and its literal cgroupfs form on the host
// of a pod.
GetPodCgroupParent(pod *v1.Pod) string
GetPodDir(podUID types.UID) string
@ -302,7 +302,7 @@ func GetContainerSpec(pod *v1.Pod, containerName string) *v1.Container {
// HasPrivilegedContainer returns true if any of the containers in the pod are privileged.
func HasPrivilegedContainer(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
for _, c := range append(pod.Spec.Containers, pod.Spec.InitContainers...) {
if c.SecurityContext != nil &&
c.SecurityContext.Privileged != nil &&
*c.SecurityContext.Privileged {

View file

@ -29,7 +29,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/client-go/util/flowcontrol"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/volume"
)
@ -265,6 +265,13 @@ const (
ContainerStateUnknown ContainerState = "unknown"
)
type ContainerType string
const (
ContainerTypeInit ContainerType = "INIT"
ContainerTypeRegular ContainerType = "REGULAR"
)
// Container provides the runtime information for a container, such as ID, hash,
// state of the container.
type Container struct {
@ -375,6 +382,11 @@ type EnvVar struct {
Value string
}
type Annotation struct {
Name string
Value string
}
type Mount struct {
// Name of the volume mount.
// TODO(yifan): Remove this field, as this is not representing the unique name of the mount,
@ -424,6 +436,10 @@ type RunContainerOptions struct {
Devices []DeviceInfo
// The port mappings for the containers.
PortMappings []PortMapping
// The annotations for the container
// These annotations are generated by other components (i.e.,
// not users). Currently, only device plugins populate the annotations.
Annotations []Annotation
// If the container has specified the TerminationMessagePath, then
// this directory will be used to create and mount the log file to
// container.TerminationMessagePath

View file

@ -178,8 +178,6 @@ func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (er
chainsToRemove := []utiliptables.Chain{}
for _, pm := range hostportMappings {
chainsToRemove = append(chainsToRemove, getHostportChain(id, pm))
// TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153
chainsToRemove = append(chainsToRemove, getBuggyHostportChain(id, pm))
}
// remove rules that consists of target chains
@ -255,16 +253,6 @@ func getHostportChain(id string, pm *PortMapping) utiliptables.Chain {
return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16])
}
// This bugy func does bad conversion on HostPort from int32 to string.
// It may generates same chain names for different ports of the same pod, e.g. port 57119/55429/56833.
// `getHostportChain` fixed this bug. In order to cleanup the legacy chains/rules, it is temporarily left.
// TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153
func getBuggyHostportChain(id string, pm *PortMapping) utiliptables.Chain {
hash := sha256.Sum256([]byte(id + string(pm.HostPort) + string(pm.Protocol)))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16])
}
// gatherHostportMappings returns all the PortMappings which has hostport for a pod
func gatherHostportMappings(podPortMapping *PodPortMapping) []*PortMapping {
mappings := []*PortMapping{}

View file

@ -45,7 +45,7 @@ const (
// options contains details about which streams are required for
// port forwarding.
// All fields incldued in V4Options need to be expressed explicilty in the
// All fields incldued in V4Options need to be expressed explicitly in the
// CRI (pkg/kubelet/apis/cri/{version}/api.proto) PortForwardRequest.
type V4Options struct {
Ports []int32

View file

@ -156,7 +156,7 @@ func createHttpStreamStreams(req *http.Request, w http.ResponseWriter, opts *Opt
case remotecommandconsts.StreamProtocolV2Name:
handler = &v2ProtocolHandler{}
case "":
glog.V(4).Infof("Client did not request protocol negotiaion. Falling back to %q", remotecommandconsts.StreamProtocolV1Name)
glog.V(4).Infof("Client did not request protocol negotiation. Falling back to %q", remotecommandconsts.StreamProtocolV1Name)
fallthrough
case remotecommandconsts.StreamProtocolV1Name:
handler = &v1ProtocolHandler{}

View file

@ -33,7 +33,7 @@ import (
"k8s.io/apimachinery/pkg/types"
remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand"
"k8s.io/client-go/tools/remotecommand"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/server/portforward"
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
)

View file

@ -24,4 +24,10 @@ const (
DockerContainerRuntime = "docker"
RktContainerRuntime = "rkt"
RemoteContainerRuntime = "remote"
// User visible keys for managing node allocatable enforcement on the node.
NodeAllocatableEnforcementKey = "pods"
SystemReservedEnforcementKey = "system-reserved"
KubeReservedEnforcementKey = "kube-reserved"
NodeAllocatableNoneKey = "none"
)

View file

@ -21,6 +21,7 @@ const (
KubernetesPodNamespaceLabel = "io.kubernetes.pod.namespace"
KubernetesPodUIDLabel = "io.kubernetes.pod.uid"
KubernetesContainerNameLabel = "io.kubernetes.container.name"
KubernetesContainerTypeLabel = "io.kubernetes.container.type"
)
func GetContainerName(labels map[string]string) string {

View file

@ -51,7 +51,7 @@ func PodWithDeletionTimestamp(pod *v1.Pod) string {
return Pod(pod) + deletionTimestamp
}
// Pods returns a string representating a list of pods in a human
// Pods returns a string representation a list of pods in a human
// readable format.
func Pods(pods []*v1.Pod) string {
return aggregatePods(pods, Pod)

19
vendor/k8s.io/kubernetes/pkg/master/ports/doc.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package ports defines ports used by various pieces of the kubernetes
// infrastructure.
package ports // import "k8s.io/kubernetes/pkg/master/ports"

44
vendor/k8s.io/kubernetes/pkg/master/ports/ports.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ports
const (
// ProxyStatusPort is the default port for the proxy metrics server.
// May be overridden by a flag at startup.
ProxyStatusPort = 10249
// KubeletPort is the default port for the kubelet server on each host machine.
// May be overridden by a flag at startup.
KubeletPort = 10250
// SchedulerPort is the default port for the scheduler status server.
// May be overridden by a flag at startup.
SchedulerPort = 10251
// ControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
ControllerManagerPort = 10252
// CloudControllerManagerPort is the default port for the cloud controller manager server.
// This value may be overridden by a flag at startup.
CloudControllerManagerPort = 10253
// KubeletReadOnlyPort exposes basic read-only services from the kubelet.
// May be overridden by a flag at startup.
// This is necessary for heapster to collect monitoring stats from the kubelet
// until heapster can transition to using the SSL endpoint.
// TODO(roberthbailey): Remove this once we have a better solution for heapster.
KubeletReadOnlyPort = 10255
// ProxyHealthzPort is the default port for the proxy healthz server.
// May be overridden by a flag at startup.
ProxyHealthzPort = 10256
)

View file

@ -73,7 +73,7 @@ type HTTPServerFactory interface {
// HTTPServer allows for testing of Server.
type HTTPServer interface {
// Server is designed so that http.Server satifies this interface,
// Server is designed so that http.Server satisfies this interface,
Serve(listener net.Listener) error
}

View file

@ -39,12 +39,10 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
apiservice "k8s.io/kubernetes/pkg/api/service"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
"k8s.io/kubernetes/pkg/proxy/metrics"
@ -191,8 +189,7 @@ func (e *endpointsInfo) String() string {
// returns a new serviceInfo struct
func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, service *api.Service) *serviceInfo {
onlyNodeLocalEndpoints := false
if utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) &&
apiservice.RequestsOnlyLocalTraffic(service) {
if apiservice.RequestsOnlyLocalTraffic(service) {
onlyNodeLocalEndpoints = true
}
var stickyMaxAgeSeconds int
@ -500,25 +497,32 @@ func NewProxier(ipt utiliptables.Interface,
return proxier, nil
}
type iptablesJumpChain struct {
table utiliptables.Table
chain utiliptables.Chain
sourceChain utiliptables.Chain
comment string
}
var iptablesJumpChains = []iptablesJumpChain{
{utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainInput, "kubernetes service portals"},
{utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals"},
{utiliptables.TableNAT, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals"},
{utiliptables.TableNAT, kubeServicesChain, utiliptables.ChainPrerouting, "kubernetes service portals"},
{utiliptables.TableNAT, kubePostroutingChain, utiliptables.ChainPostrouting, "kubernetes postrouting rules"},
{utiliptables.TableFilter, kubeForwardChain, utiliptables.ChainForward, "kubernetes forwarding rules"},
}
// CleanupLeftovers removes all iptables rules and chains created by the Proxier
// It returns true if an error was encountered. Errors are logged.
func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
// Unlink the services chain.
args := []string{
"-m", "comment", "--comment", "kubernetes service portals",
"-j", string(kubeServicesChain),
}
tableChainsWithJumpServices := []struct {
table utiliptables.Table
chain utiliptables.Chain
}{
{utiliptables.TableFilter, utiliptables.ChainInput},
{utiliptables.TableFilter, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainPrerouting},
}
for _, tc := range tableChainsWithJumpServices {
if err := ipt.DeleteRule(tc.table, tc.chain, args...); err != nil {
// Unlink our chains
for _, chain := range iptablesJumpChains {
args := []string{
"-m", "comment", "--comment", chain.comment,
"-j", string(chain.chain),
}
if err := ipt.DeleteRule(chain.table, chain.sourceChain, args...); err != nil {
if !utiliptables.IsNotFoundError(err) {
glog.Errorf("Error removing pure-iptables proxy rule: %v", err)
encounteredError = true
@ -526,31 +530,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
}
}
// Unlink the postrouting chain.
args = []string{
"-m", "comment", "--comment", "kubernetes postrouting rules",
"-j", string(kubePostroutingChain),
}
if err := ipt.DeleteRule(utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil {
if !utiliptables.IsNotFoundError(err) {
glog.Errorf("Error removing pure-iptables proxy rule: %v", err)
encounteredError = true
}
}
// Unlink the forwarding chain.
args = []string{
"-m", "comment", "--comment", "kubernetes forwarding rules",
"-j", string(kubeForwardChain),
}
if err := ipt.DeleteRule(utiliptables.TableFilter, utiliptables.ChainForward, args...); err != nil {
if !utiliptables.IsNotFoundError(err) {
glog.Errorf("Error removing pure-iptables proxy rule: %v", err)
encounteredError = true
}
}
// Flush and remove all of our chains.
// Flush and remove all of our "-t nat" chains.
iptablesData := bytes.NewBuffer(nil)
if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil {
glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableNAT, err)
@ -586,7 +566,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
}
}
// Flush and remove all of our chains.
// Flush and remove all of our "-t filter" chains.
iptablesData = bytes.NewBuffer(nil)
if err := ipt.SaveInto(utiliptables.TableFilter, iptablesData); err != nil {
glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableFilter, err)
@ -773,10 +753,6 @@ func updateEndpointsMap(
changes.items = make(map[types.NamespacedName]*endpointsChange)
}()
if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) {
return
}
// TODO: If this will appear to be computationally expensive, consider
// computing this incrementally similarly to endpointsMap.
result.hcEndpoints = make(map[types.NamespacedName]int)
@ -1004,61 +980,18 @@ func (proxier *Proxier) syncProxyRules() {
glog.V(3).Infof("Syncing iptables rules")
// Create and link the kube services chain.
{
tablesNeedServicesChain := []utiliptables.Table{utiliptables.TableFilter, utiliptables.TableNAT}
for _, table := range tablesNeedServicesChain {
if _, err := proxier.iptables.EnsureChain(table, kubeServicesChain); err != nil {
glog.Errorf("Failed to ensure that %s chain %s exists: %v", table, kubeServicesChain, err)
return
}
}
tableChainsNeedJumpServices := []struct {
table utiliptables.Table
chain utiliptables.Chain
}{
{utiliptables.TableFilter, utiliptables.ChainInput},
{utiliptables.TableFilter, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainPrerouting},
}
comment := "kubernetes service portals"
args := []string{"-m", "comment", "--comment", comment, "-j", string(kubeServicesChain)}
for _, tc := range tableChainsNeedJumpServices {
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, tc.table, tc.chain, args...); err != nil {
glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", tc.table, tc.chain, kubeServicesChain, err)
return
}
}
}
// Create and link the kube postrouting chain.
{
if _, err := proxier.iptables.EnsureChain(utiliptables.TableNAT, kubePostroutingChain); err != nil {
glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubePostroutingChain, err)
// Create and link the kube chains.
for _, chain := range iptablesJumpChains {
if _, err := proxier.iptables.EnsureChain(chain.table, chain.chain); err != nil {
glog.Errorf("Failed to ensure that %s chain %s exists: %v", chain.table, kubeServicesChain, err)
return
}
comment := "kubernetes postrouting rules"
args := []string{"-m", "comment", "--comment", comment, "-j", string(kubePostroutingChain)}
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil {
glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, kubePostroutingChain, err)
return
args := []string{
"-m", "comment", "--comment", chain.comment,
"-j", string(chain.chain),
}
}
// Create and link the kube forward chain.
{
if _, err := proxier.iptables.EnsureChain(utiliptables.TableFilter, kubeForwardChain); err != nil {
glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableFilter, kubeForwardChain, err)
return
}
comment := "kubernetes forward rules"
args := []string{"-m", "comment", "--comment", comment, "-j", string(kubeForwardChain)}
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainForward, args...); err != nil {
glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainForward, kubeForwardChain, err)
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, chain.table, chain.sourceChain, args...); err != nil {
glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", chain.table, chain.sourceChain, chain.chain, err)
return
}
}
@ -1100,35 +1033,19 @@ func (proxier *Proxier) syncProxyRules() {
// Make sure we keep stats for the top-level chains, if they existed
// (which most should have because we created them above).
if chain, ok := existingFilterChains[kubeServicesChain]; ok {
writeLine(proxier.filterChains, chain)
} else {
writeLine(proxier.filterChains, utiliptables.MakeChainLine(kubeServicesChain))
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeForwardChain} {
if chain, ok := existingFilterChains[chainName]; ok {
writeLine(proxier.filterChains, chain)
} else {
writeLine(proxier.filterChains, utiliptables.MakeChainLine(chainName))
}
}
if chain, ok := existingFilterChains[kubeForwardChain]; ok {
writeLine(proxier.filterChains, chain)
} else {
writeLine(proxier.filterChains, utiliptables.MakeChainLine(kubeForwardChain))
}
if chain, ok := existingNATChains[kubeServicesChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(kubeServicesChain))
}
if chain, ok := existingNATChains[kubeNodePortsChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(kubeNodePortsChain))
}
if chain, ok := existingNATChains[kubePostroutingChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(kubePostroutingChain))
}
if chain, ok := existingNATChains[KubeMarkMasqChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(KubeMarkMasqChain))
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, KubeMarkMasqChain} {
if chain, ok := existingNATChains[chainName]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(chainName))
}
}
// Install the kubernetes-specific postrouting rules. We use a whole chain for
@ -1172,6 +1089,7 @@ func (proxier *Proxier) syncProxyRules() {
// Build rules for each service.
var svcNameString string
for svcName, svcInfo := range proxier.serviceMap {
isIPv6 := utilproxy.IsIPv6(svcInfo.clusterIP)
protocol := strings.ToLower(string(svcInfo.protocol))
svcNameString = svcInfo.serviceNameString
@ -1384,7 +1302,6 @@ func (proxier *Proxier) syncProxyRules() {
// This is very low impact. The NodePort range is intentionally obscure, and unlikely to actually collide with real Services.
// This only affects UDP connections, which are not common.
// See issue: https://github.com/kubernetes/kubernetes/issues/49881
isIPv6 := utilproxy.IsIPv6(svcInfo.clusterIP)
err := utilproxy.ClearUDPConntrackForPort(proxier.exec, lp.Port, isIPv6)
if err != nil {
glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", lp.Port, err)
@ -1407,6 +1324,13 @@ func (proxier *Proxier) syncProxyRules() {
} else {
// TODO: Make all nodePorts jump to the firewall chain.
// Currently we only create it for loadbalancers (#33586).
// Fix localhost martian source error
loopback := "127.0.0.0/8"
if isIPv6 {
loopback = "::1/128"
}
writeLine(proxier.natRules, append(args, "-s", loopback, "-j", string(KubeMarkMasqChain))...)
writeLine(proxier.natRules, append(args, "-j", string(svcXlbChain))...)
}

View file

@ -21,27 +21,21 @@ import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strings"
"k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apiserver/pkg/authentication/authenticator"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/apiserver/pkg/authentication/user"
jwt "github.com/dgrijalva/jwt-go"
"github.com/golang/glog"
)
const (
Issuer = "kubernetes/serviceaccount"
SubjectClaim = "sub"
IssuerClaim = "iss"
ServiceAccountNameClaim = "kubernetes.io/serviceaccount/service-account.name"
ServiceAccountUIDClaim = "kubernetes.io/serviceaccount/service-account.uid"
SecretNameClaim = "kubernetes.io/serviceaccount/secret.name"
NamespaceClaim = "kubernetes.io/serviceaccount/namespace"
jose "gopkg.in/square/go-jose.v2"
"gopkg.in/square/go-jose.v2/jwt"
)
// ServiceAccountTokenGetter defines functions to retrieve a named service account and secret
@ -51,35 +45,43 @@ type ServiceAccountTokenGetter interface {
}
type TokenGenerator interface {
// GenerateToken generates a token which will identify the given ServiceAccount.
// The returned token will be stored in the given (and yet-unpersisted) Secret.
GenerateToken(serviceAccount v1.ServiceAccount, secret v1.Secret) (string, error)
// GenerateToken generates a token which will identify the given
// ServiceAccount. privateClaims is an interface that will be
// serialized into the JWT payload JSON encoding at the root level of
// the payload object. Public claims take precedent over private
// claims i.e. if both claims and privateClaims have an "exp" field,
// the value in claims will be used.
GenerateToken(claims *jwt.Claims, privateClaims interface{}) (string, error)
}
// JWTTokenGenerator returns a TokenGenerator that generates signed JWT tokens, using the given privateKey.
// privateKey is a PEM-encoded byte array of a private RSA key.
// JWTTokenAuthenticator()
func JWTTokenGenerator(privateKey interface{}) TokenGenerator {
return &jwtTokenGenerator{privateKey}
func JWTTokenGenerator(iss string, privateKey interface{}) TokenGenerator {
return &jwtTokenGenerator{
iss: iss,
privateKey: privateKey,
}
}
type jwtTokenGenerator struct {
iss string
privateKey interface{}
}
func (j *jwtTokenGenerator) GenerateToken(serviceAccount v1.ServiceAccount, secret v1.Secret) (string, error) {
var method jwt.SigningMethod
func (j *jwtTokenGenerator) GenerateToken(claims *jwt.Claims, privateClaims interface{}) (string, error) {
var alg jose.SignatureAlgorithm
switch privateKey := j.privateKey.(type) {
case *rsa.PrivateKey:
method = jwt.SigningMethodRS256
alg = jose.RS256
case *ecdsa.PrivateKey:
switch privateKey.Curve {
case elliptic.P256():
method = jwt.SigningMethodES256
alg = jose.ES256
case elliptic.P384():
method = jwt.SigningMethodES384
alg = jose.ES384
case elliptic.P521():
method = jwt.SigningMethodES512
alg = jose.ES512
default:
return "", fmt.Errorf("unknown private key curve, must be 256, 384, or 521")
}
@ -87,164 +89,186 @@ func (j *jwtTokenGenerator) GenerateToken(serviceAccount v1.ServiceAccount, secr
return "", fmt.Errorf("unknown private key type %T, must be *rsa.PrivateKey or *ecdsa.PrivateKey", j.privateKey)
}
token := jwt.New(method)
signer, err := jose.NewSigner(
jose.SigningKey{
Algorithm: alg,
Key: j.privateKey,
},
nil,
)
if err != nil {
return "", err
}
claims, _ := token.Claims.(jwt.MapClaims)
// Identify the issuer
claims[IssuerClaim] = Issuer
// Username
claims[SubjectClaim] = apiserverserviceaccount.MakeUsername(serviceAccount.Namespace, serviceAccount.Name)
// Persist enough structured info for the authenticator to be able to look up the service account and secret
claims[NamespaceClaim] = serviceAccount.Namespace
claims[ServiceAccountNameClaim] = serviceAccount.Name
claims[ServiceAccountUIDClaim] = serviceAccount.UID
claims[SecretNameClaim] = secret.Name
// Sign and get the complete encoded token as a string
return token.SignedString(j.privateKey)
// claims are applied in reverse precedence
return jwt.Signed(signer).
Claims(privateClaims).
Claims(claims).
Claims(&jwt.Claims{
Issuer: j.iss,
}).
CompactSerialize()
}
// JWTTokenAuthenticator authenticates tokens as JWT tokens produced by JWTTokenGenerator
// Token signatures are verified using each of the given public keys until one works (allowing key rotation)
// If lookup is true, the service account and secret referenced as claims inside the token are retrieved and verified with the provided ServiceAccountTokenGetter
func JWTTokenAuthenticator(keys []interface{}, lookup bool, getter ServiceAccountTokenGetter) authenticator.Token {
return &jwtTokenAuthenticator{keys, lookup, getter}
func JWTTokenAuthenticator(iss string, keys []interface{}, lookup bool, getter ServiceAccountTokenGetter) authenticator.Token {
return &jwtTokenAuthenticator{
iss: iss,
keys: keys,
validator: &legacyValidator{
lookup: lookup,
getter: getter,
},
}
}
type jwtTokenAuthenticator struct {
keys []interface{}
lookup bool
getter ServiceAccountTokenGetter
iss string
keys []interface{}
validator Validator
}
type Validator interface {
Validate(tokenData string, public *jwt.Claims, private *legacyPrivateClaims) error
}
var errMismatchedSigningMethod = errors.New("invalid signing method")
func (j *jwtTokenAuthenticator) AuthenticateToken(token string) (user.Info, bool, error) {
var validationError error
for i, key := range j.keys {
// Attempt to verify with each key until we find one that works
parsedToken, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
switch token.Method.(type) {
case *jwt.SigningMethodRSA:
if _, ok := key.(*rsa.PublicKey); ok {
return key, nil
}
return nil, errMismatchedSigningMethod
case *jwt.SigningMethodECDSA:
if _, ok := key.(*ecdsa.PublicKey); ok {
return key, nil
}
return nil, errMismatchedSigningMethod
default:
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
})
if err != nil {
switch err := err.(type) {
case *jwt.ValidationError:
if (err.Errors & jwt.ValidationErrorMalformed) != 0 {
// Not a JWT, no point in continuing
return nil, false, nil
}
if (err.Errors & jwt.ValidationErrorSignatureInvalid) != 0 {
// Signature error, perhaps one of the other keys will verify the signature
// If not, we want to return this error
glog.V(4).Infof("Signature error (key %d): %v", i, err)
validationError = err
continue
}
// This key doesn't apply to the given signature type
// Perhaps one of the other keys will verify the signature
// If not, we want to return this error
if err.Inner == errMismatchedSigningMethod {
glog.V(4).Infof("Mismatched key type (key %d): %v", i, err)
validationError = err
continue
}
}
// Other errors should just return as errors
return nil, false, err
}
// If we get here, we have a token with a recognized signature
claims, _ := parsedToken.Claims.(jwt.MapClaims)
// Make sure we issued the token
iss, _ := claims[IssuerClaim].(string)
if iss != Issuer {
return nil, false, nil
}
// Make sure the claims we need exist
sub, _ := claims[SubjectClaim].(string)
if len(sub) == 0 {
return nil, false, errors.New("sub claim is missing")
}
namespace, _ := claims[NamespaceClaim].(string)
if len(namespace) == 0 {
return nil, false, errors.New("namespace claim is missing")
}
secretName, _ := claims[SecretNameClaim].(string)
if len(namespace) == 0 {
return nil, false, errors.New("secretName claim is missing")
}
serviceAccountName, _ := claims[ServiceAccountNameClaim].(string)
if len(serviceAccountName) == 0 {
return nil, false, errors.New("serviceAccountName claim is missing")
}
serviceAccountUID, _ := claims[ServiceAccountUIDClaim].(string)
if len(serviceAccountUID) == 0 {
return nil, false, errors.New("serviceAccountUID claim is missing")
}
subjectNamespace, subjectName, err := apiserverserviceaccount.SplitUsername(sub)
if err != nil || subjectNamespace != namespace || subjectName != serviceAccountName {
return nil, false, errors.New("sub claim is invalid")
}
if j.lookup {
// Make sure token hasn't been invalidated by deletion of the secret
secret, err := j.getter.GetSecret(namespace, secretName)
if err != nil {
glog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err)
return nil, false, errors.New("Token has been invalidated")
}
if secret.DeletionTimestamp != nil {
glog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName)
return nil, false, errors.New("Token has been invalidated")
}
if bytes.Compare(secret.Data[v1.ServiceAccountTokenKey], []byte(token)) != 0 {
glog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName)
return nil, false, errors.New("Token does not match server's copy")
}
// Make sure service account still exists (name and UID)
serviceAccount, err := j.getter.GetServiceAccount(namespace, serviceAccountName)
if err != nil {
glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err)
return nil, false, err
}
if serviceAccount.DeletionTimestamp != nil {
glog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName)
return nil, false, fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, serviceAccountName)
}
if string(serviceAccount.UID) != serviceAccountUID {
glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID)
return nil, false, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID)
}
}
return UserInfo(namespace, serviceAccountName, serviceAccountUID), true, nil
func (j *jwtTokenAuthenticator) AuthenticateToken(tokenData string) (user.Info, bool, error) {
if !j.hasCorrectIssuer(tokenData) {
return nil, false, nil
}
return nil, false, validationError
tok, err := jwt.ParseSigned(tokenData)
if err != nil {
return nil, false, nil
}
public := &jwt.Claims{}
private := &legacyPrivateClaims{}
var (
found bool
errlist []error
)
for _, key := range j.keys {
if err := tok.Claims(key, public, private); err != nil {
errlist = append(errlist, err)
continue
}
found = true
break
}
if !found {
return nil, false, utilerrors.NewAggregate(errlist)
}
// If we get here, we have a token with a recognized signature and
// issuer string.
if err := j.validator.Validate(tokenData, public, private); err != nil {
return nil, false, err
}
return UserInfo(private.Namespace, private.ServiceAccountName, private.ServiceAccountUID), true, nil
}
// hasCorrectIssuer returns true if tokenData is a valid JWT in compact
// serialization format and the "iss" claim matches the iss field of this token
// authenticator, and otherwise returns false.
//
// Note: go-jose currently does not allow access to unverified JWS payloads.
// See https://github.com/square/go-jose/issues/169
func (j *jwtTokenAuthenticator) hasCorrectIssuer(tokenData string) bool {
parts := strings.Split(tokenData, ".")
if len(parts) != 3 {
return false
}
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return false
}
claims := struct {
// WARNING: this JWT is not verified. Do not trust these claims.
Issuer string `json:"iss"`
}{}
if err := json.Unmarshal(payload, &claims); err != nil {
return false
}
if claims.Issuer != j.iss {
return false
}
return true
}
type legacyValidator struct {
lookup bool
getter ServiceAccountTokenGetter
}
func (v *legacyValidator) Validate(tokenData string, public *jwt.Claims, private *legacyPrivateClaims) error {
// Make sure the claims we need exist
if len(public.Subject) == 0 {
return errors.New("sub claim is missing")
}
namespace := private.Namespace
if len(namespace) == 0 {
return errors.New("namespace claim is missing")
}
secretName := private.SecretName
if len(secretName) == 0 {
return errors.New("secretName claim is missing")
}
serviceAccountName := private.ServiceAccountName
if len(serviceAccountName) == 0 {
return errors.New("serviceAccountName claim is missing")
}
serviceAccountUID := private.ServiceAccountUID
if len(serviceAccountUID) == 0 {
return errors.New("serviceAccountUID claim is missing")
}
subjectNamespace, subjectName, err := apiserverserviceaccount.SplitUsername(public.Subject)
if err != nil || subjectNamespace != namespace || subjectName != serviceAccountName {
return errors.New("sub claim is invalid")
}
if v.lookup {
// Make sure token hasn't been invalidated by deletion of the secret
secret, err := v.getter.GetSecret(namespace, secretName)
if err != nil {
glog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err)
return errors.New("Token has been invalidated")
}
if secret.DeletionTimestamp != nil {
glog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName)
return errors.New("Token has been invalidated")
}
if bytes.Compare(secret.Data[v1.ServiceAccountTokenKey], []byte(tokenData)) != 0 {
glog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName)
return errors.New("Token does not match server's copy")
}
// Make sure service account still exists (name and UID)
serviceAccount, err := v.getter.GetServiceAccount(namespace, serviceAccountName)
if err != nil {
glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err)
return err
}
if serviceAccount.DeletionTimestamp != nil {
glog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName)
return fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, serviceAccountName)
}
if string(serviceAccount.UID) != serviceAccountUID {
glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID)
return fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID)
}
}
return nil
}

44
vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package serviceaccount
import (
"k8s.io/api/core/v1"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
"gopkg.in/square/go-jose.v2/jwt"
)
func LegacyClaims(serviceAccount v1.ServiceAccount, secret v1.Secret) (*jwt.Claims, interface{}) {
return &jwt.Claims{
Subject: apiserverserviceaccount.MakeUsername(serviceAccount.Namespace, serviceAccount.Name),
}, &legacyPrivateClaims{
Namespace: serviceAccount.Namespace,
ServiceAccountName: serviceAccount.Name,
ServiceAccountUID: string(serviceAccount.UID),
SecretName: secret.Name,
}
}
const LegacyIssuer = "kubernetes/serviceaccount"
type legacyPrivateClaims struct {
ServiceAccountName string `json:"kubernetes.io/serviceaccount/service-account.name"`
ServiceAccountUID string `json:"kubernetes.io/serviceaccount/service-account.uid"`
SecretName string `json:"kubernetes.io/serviceaccount/secret.name"`
Namespace string `json:"kubernetes.io/serviceaccount/namespace"`
}

View file

@ -109,7 +109,7 @@ var _ timer = realTimer{}
// multiple runs are serialized. If the function needs to hold locks, it must
// take them internally.
//
// Runs of the funtion will have at least minInterval between them (from
// Runs of the function will have at least minInterval between them (from
// completion to next start), except that up to bursts may be allowed. Burst
// runs are "accumulated" over time, one per minInterval up to burstRuns total.
// This can be used, for example, to mitigate the impact of expensive operations

View file

@ -56,7 +56,7 @@ func (m *execMounter) Mount(source string, target string, fstype string, options
return m.doExecMount(source, target, fstype, options)
}
// doExecMount calls exec(mount <waht> <where>) using given exec interface.
// doExecMount calls exec(mount <what> <where>) using given exec interface.
func (m *execMounter) doExecMount(source, target, fstype string, options []string) error {
glog.V(5).Infof("Exec Mounting %s %s %s %v", source, target, fstype, options)
mountArgs := makeMountArgs(source, target, fstype, options)

View file

@ -19,7 +19,9 @@ limitations under the License.
package mount
import (
"os"
"path/filepath"
"strings"
)
type FileType string
@ -124,12 +126,6 @@ type SafeFormatAndMount struct {
// disk is already formatted or it is being mounted as read-only, it
// will be mounted without formatting.
func (mounter *SafeFormatAndMount) FormatAndMount(source string, target string, fstype string, options []string) error {
// Don't attempt to format if mounting as readonly. Go straight to mounting.
for _, option := range options {
if option == "ro" {
return mounter.Interface.Mount(source, target, fstype, options)
}
}
return mounter.formatAndMount(source, target, fstype, options)
}
@ -208,6 +204,12 @@ func IsNotMountPoint(mounter Interface, file string) (bool, error) {
// IsLikelyNotMountPoint provides a quick check
// to determine whether file IS A mountpoint
notMnt, notMntErr := mounter.IsLikelyNotMountPoint(file)
if notMntErr != nil && os.IsPermission(notMntErr) {
// We were not allowed to do the simple stat() check, e.g. on NFS with
// root_squash. Fall back to /proc/mounts check below.
notMnt = true
notMntErr = nil
}
if notMntErr != nil {
return notMnt, notMntErr
}
@ -254,3 +256,21 @@ func isBind(options []string) (bool, []string) {
return bind, bindRemountOpts
}
// TODO: this is a workaround for the unmount device issue caused by gci mounter.
// In GCI cluster, if gci mounter is used for mounting, the container started by mounter
// script will cause additional mounts created in the container. Since these mounts are
// irrelevant to the original mounts, they should be not considered when checking the
// mount references. Current solution is to filter out those mount paths that contain
// the string of original mount path.
// Plan to work on better approach to solve this issue.
func HasMountRefs(mountPath string, mountRefs []string) bool {
count := 0
for _, ref := range mountRefs {
if !strings.Contains(ref, mountPath) {
count = count + 1
}
}
return count > 0
}

View file

@ -19,6 +19,7 @@ limitations under the License.
package mount
import (
"errors"
"fmt"
"os"
"os/exec"
@ -69,7 +70,7 @@ func New(mounterPath string) Interface {
}
// Mount mounts source to target as fstype with given options. 'source' and 'fstype' must
// be an emtpy string in case it's not required, e.g. for remount, or for auto filesystem
// be an empty string in case it's not required, e.g. for remount, or for auto filesystem
// type, where kernel handles fstype for you. The mount 'options' is a list of options,
// currently come from mount(8), e.g. "ro", "remount", "bind", etc. If no more option is
// required, call Mount with an empty string list or nil.
@ -313,7 +314,7 @@ func exclusiveOpenFailsOnDevice(pathname string) (bool, error) {
}
if !isDevice {
glog.Errorf("Path %q is not refering to a device.", pathname)
glog.Errorf("Path %q is not referring to a device.", pathname)
return false, nil
}
fd, errno := unix.Open(pathname, unix.O_RDONLY|unix.O_EXCL, 0)
@ -472,23 +473,33 @@ func (mounter *Mounter) ExistsPath(pathname string) bool {
// formatAndMount uses unix utils to format and mount the given disk
func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {
readOnly := false
for _, option := range options {
if option == "ro" {
readOnly = true
break
}
}
options = append(options, "defaults")
// Run fsck on the disk to fix repairable issues
glog.V(4).Infof("Checking for issues with fsck on disk: %s", source)
args := []string{"-a", source}
out, err := mounter.Exec.Run("fsck", args...)
if err != nil {
ee, isExitError := err.(utilexec.ExitError)
switch {
case err == utilexec.ErrExecutableNotFound:
glog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsCorrected:
glog.Infof("Device %s has errors which were corrected by fsck.", source)
case isExitError && ee.ExitStatus() == fsckErrorsUncorrected:
return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s.", source, string(out))
case isExitError && ee.ExitStatus() > fsckErrorsUncorrected:
glog.Infof("`fsck` error %s", string(out))
if !readOnly {
// Run fsck on the disk to fix repairable issues, only do this for volumes requested as rw.
glog.V(4).Infof("Checking for issues with fsck on disk: %s", source)
args := []string{"-a", source}
out, err := mounter.Exec.Run("fsck", args...)
if err != nil {
ee, isExitError := err.(utilexec.ExitError)
switch {
case err == utilexec.ErrExecutableNotFound:
glog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsCorrected:
glog.Infof("Device %s has errors which were corrected by fsck.", source)
case isExitError && ee.ExitStatus() == fsckErrorsUncorrected:
return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s.", source, string(out))
case isExitError && ee.ExitStatus() > fsckErrorsUncorrected:
glog.Infof("`fsck` error %s", string(out))
}
}
}
@ -503,8 +514,13 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string,
return err
}
if existingFormat == "" {
if readOnly {
// Don't attempt to format if mounting as readonly, return an error to reflect this.
return errors.New("failed to mount unformatted volume as read only")
}
// Disk is unformatted so format it.
args = []string{source}
args := []string{source}
// Use 'ext4' as the default
if len(fstype) == 0 {
fstype = "ext4"
@ -536,36 +552,57 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string,
return mountErr
}
// GetDiskFormat uses 'lsblk' to see if the given disk is unformated
// GetDiskFormat uses 'blkid' to see if the given disk is unformated
func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) {
args := []string{"-n", "-o", "FSTYPE", disk}
glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args)
dataOut, err := mounter.Exec.Run("lsblk", args...)
args := []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", disk}
glog.V(4).Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args)
dataOut, err := mounter.Exec.Run("blkid", args...)
output := string(dataOut)
glog.V(4).Infof("Output: %q", output)
glog.V(4).Infof("Output: %q, err: %v", output, err)
if err != nil {
if exit, ok := err.(utilexec.ExitError); ok {
if exit.ExitStatus() == 2 {
// Disk device is unformatted.
// For `blkid`, if the specified token (TYPE/PTTYPE, etc) was
// not found, or no (specified) devices could be identified, an
// exit code of 2 is returned.
return "", nil
}
}
glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err)
return "", err
}
// Split lsblk output into lines. Unformatted devices should contain only
// "\n". Beware of "\n\n", that's a device with one empty partition.
output = strings.TrimSuffix(output, "\n") // Avoid last empty line
var fstype, pttype string
lines := strings.Split(output, "\n")
if lines[0] != "" {
// The device is formatted
return lines[0], nil
for _, l := range lines {
if len(l) <= 0 {
// Ignore empty line.
continue
}
cs := strings.Split(l, "=")
if len(cs) != 2 {
return "", fmt.Errorf("blkid returns invalid output: %s", output)
}
// TYPE is filesystem type, and PTTYPE is partition table type, according
// to https://www.kernel.org/pub/linux/utils/util-linux/v2.21/libblkid-docs/.
if cs[0] == "TYPE" {
fstype = cs[1]
} else if cs[0] == "PTTYPE" {
pttype = cs[1]
}
}
if len(lines) == 1 {
// The device is unformatted and has no dependent devices
return "", nil
if len(pttype) > 0 {
glog.V(4).Infof("Disk %s detected partition table type: %s", pttype)
// Returns a special non-empty string as filesystem type, then kubelet
// will not format it.
return "unknown data, probably partitions", nil
}
// The device has dependent devices, most probably partitions (LVM, LUKS
// and MD RAID are reported as FSTYPE and caught above).
return "unknown data, probably partitions", nil
return fstype, nil
}
// isShared returns true, if given path is on a mount point that has shared

View file

@ -89,10 +89,7 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio
cmdLine += fmt.Sprintf(";New-SmbGlobalMapping -RemotePath %s -Credential $Credential", source)
if output, err := exec.Command("powershell", "/c", cmdLine).CombinedOutput(); err != nil {
// we don't return error here, even though New-SmbGlobalMapping failed, we still make it successful,
// will return error when Windows 2016 RS3 is ready on azure
glog.Errorf("azureMount: SmbGlobalMapping failed: %v, only SMB mount is supported now, output: %q", err, string(output))
return os.MkdirAll(target, 0755)
return fmt.Errorf("azureMount: SmbGlobalMapping failed: %v, only SMB mount is supported now, output: %q", err, string(output))
}
}

View file

@ -165,7 +165,7 @@ func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) {
glog.V(5).Infof("nsenter findmnt args: %v", args)
out, err := n.ne.Exec("findmnt", args).CombinedOutput()
if err != nil {
glog.V(2).Infof("Failed findmnt command for path %s: %v", file, err)
glog.V(2).Infof("Failed findmnt command for path %s: %s %v", file, out, err)
// Different operating systems behave differently for paths which are not mount points.
// On older versions (e.g. 2.20.1) we'd get error, on newer ones (e.g. 2.26.2) we'd get "/".
// It's safer to assume that it's not a mount point.

View file

@ -654,7 +654,7 @@ func (pm *VolumePluginMgr) FindCreatablePluginBySpec(spec *Spec) (ProvisionableV
return nil, fmt.Errorf("no creatable volume plugin matched")
}
// FindAttachablePluginBySpec fetches a persistent volume plugin by name.
// FindAttachablePluginBySpec fetches a persistent volume plugin by spec.
// Unlike the other "FindPlugin" methods, this does not return error if no
// plugin is found. All volumes require a mounter and unmounter, but not
// every volume will have an attacher/detacher.

View file

@ -38,6 +38,13 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
)
const (
// GB - GigaByte size
GB = 1000 * 1000 * 1000
// GIB - GibiByte size
GIB = 1024 * 1024 * 1024
)
type RecycleEventRecorder func(eventtype, message string)
// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume
@ -84,12 +91,11 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po
if deleteErr != nil {
return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
}
// Recycler will try again and the old pod will be hopefuly deleted
// Recycler will try again and the old pod will be hopefully deleted
// at that time.
return fmt.Errorf("old recycler pod found, will retry later")
} else {
return fmt.Errorf("unexpected error creating recycler pod: %+v\n", err)
}
return fmt.Errorf("unexpected error creating recycler pod: %+v", err)
}
err = waitForPod(pod, recyclerClient, podCh)
@ -274,9 +280,8 @@ func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.Pers
timeout := (pvSize / giSize) * int64(timeoutIncrement)
if timeout < int64(minimumTimeout) {
return int64(minimumTimeout)
} else {
return timeout
}
return timeout
}
// RoundUpSize calculates how many allocation units are needed to accommodate
@ -288,6 +293,18 @@ func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes
}
// RoundUpToGB rounds up given quantity to chunks of GB
func RoundUpToGB(size resource.Quantity) int64 {
requestBytes := size.Value()
return RoundUpSize(requestBytes, GB)
}
// RoundUpToGiB rounds up given quantity upto chunks of GiB
func RoundUpToGiB(size resource.Quantity) int64 {
requestBytes := size.Value()
return RoundUpSize(requestBytes, GIB)
}
// GenerateVolumeName returns a PV name with clusterName prefix. The function
// should be used to generate a name of GCE PD or Cinder volume. It basically
// adds "<clusterName>-dynamic-" before the PV name, making sure the resulting
@ -304,7 +321,7 @@ func GenerateVolumeName(clusterName, pvName string, maxLength int) string {
return prefix + "-" + pvName
}
// Check if the path from the mounter is empty.
// GetPath checks if the path from the mounter is empty.
func GetPath(mounter Mounter) (string, error) {
path := mounter.GetPath()
if path == "" {
@ -313,7 +330,7 @@ func GetPath(mounter Mounter) (string, error) {
return path, nil
}
// ChooseZone implements our heuristics for choosing a zone for volume creation based on the volume name
// ChooseZoneForVolume implements our heuristics for choosing a zone for volume creation based on the volume name
// Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name.
// However, if the PVCName ends with `-<integer>`, we will hash the prefix, and then add the integer to the hash.
// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones,
@ -495,3 +512,12 @@ func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, req
}
return true
}
// GetWindowsPath get a windows path
func GetWindowsPath(path string) string {
windowsPath := strings.Replace(path, "/", "\\", -1)
if strings.HasPrefix(windowsPath, "\\") {
windowsPath = "c:" + windowsPath
}
return windowsPath
}

View file

@ -88,14 +88,15 @@ const (
// The Write algorithm is:
//
// 1. The payload is validated; if the payload is invalid, the function returns
// 2. The user-visible portion of the volume is walked to determine whether any
// 2.  The current timestamped directory is detected by reading the data directory
// symlink
// 3. The old version of the volume is walked to determine whether any
// portion of the payload was deleted and is still present on disk.
// If the payload is already present on disk and there are no deleted files,
// the function returns
// 3. A check is made to determine whether data present in the payload has changed
// 4.  A new timestamped dir is created
// 5. The payload is written to the new timestamped directory
// 6.  Symlinks and directory for new user-visible files are created (if needed).
// 4. The data in the current timestamped directory is compared to the projected
// data to determine if an update is required.
// 5.  A new timestamped dir is created
// 6. The payload is written to the new timestamped directory
// 7.  Symlinks and directory for new user-visible files are created (if needed).
//
// For example, consider the files:
// <target-dir>/podName
@ -104,16 +105,12 @@ const (
//
// The user visible files are symbolic links into the internal data directory:
// <target-dir>/podName -> ..data/podName
// <target-dir>/usr/labels -> ../..data/usr/labels
// <target-dir>/k8s/annotations -> ../..data/k8s/annotations
//
// Relative links are created into the data directory for files in subdirectories.
// <target-dir>/usr -> ..data/usr
// <target-dir>/k8s -> ..data/k8s
//
// The data directory itself is a link to a timestamped directory with
// the real data:
// <target-dir>/..data -> ..2016_02_01_15_04_05.12345678/
// 7.  The current timestamped directory is detected by reading the data directory
// symlink
// 8.  A symlink to the new timestamped directory ..data_tmp is created that will
// become the new data directory
// 9.  The new data directory symlink is renamed to the data directory; rename is atomic
@ -128,31 +125,50 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
}
// (2)
pathsToRemove, err := w.pathsToRemove(cleanPayload)
dataDirPath := path.Join(w.targetDir, dataDirName)
oldTsDir, err := os.Readlink(dataDirPath)
if err != nil {
glog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err)
return err
if !os.IsNotExist(err) {
glog.Errorf("%s: error reading link for data directory: %v", w.logContext, err)
return err
}
// although Readlink() returns "" on err, don't be fragile by relying on it (since it's not specified in docs)
// empty oldTsDir indicates that it didn't exist
oldTsDir = ""
}
oldTsPath := path.Join(w.targetDir, oldTsDir)
var pathsToRemove sets.String
// if there was no old version, there's nothing to remove
if len(oldTsDir) != 0 {
// (3)
pathsToRemove, err = w.pathsToRemove(cleanPayload, oldTsPath)
if err != nil {
glog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err)
return err
}
// (4)
if should, err := shouldWritePayload(cleanPayload, oldTsPath); err != nil {
glog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err)
return err
} else if !should && len(pathsToRemove) == 0 {
glog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir)
return nil
} else {
glog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir)
}
}
// (3)
if should, err := w.shouldWritePayload(cleanPayload); err != nil {
glog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err)
return err
} else if !should && len(pathsToRemove) == 0 {
glog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir)
return nil
} else {
glog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir)
}
// (4)
// (5)
tsDir, err := w.newTimestampDir()
if err != nil {
glog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err)
return err
}
tsDirName := filepath.Base(tsDir)
// (5)
// (6)
if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil {
glog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err)
return err
@ -160,21 +176,12 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
glog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir)
}
// (6)
// (7)
if err = w.createUserVisibleFiles(cleanPayload); err != nil {
glog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err)
return err
}
// (7)
_, tsDirName := filepath.Split(tsDir)
dataDirPath := path.Join(w.targetDir, dataDirName)
oldTsDir, err := os.Readlink(dataDirPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("%s: error reading link for data directory: %v", w.logContext, err)
return err
}
// (8)
newDataDirPath := path.Join(w.targetDir, newDataDirName)
if err = os.Symlink(tsDirName, newDataDirPath); err != nil {
@ -206,7 +213,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
// (11)
if len(oldTsDir) > 0 {
if err = os.RemoveAll(path.Join(w.targetDir, oldTsDir)); err != nil {
if err = os.RemoveAll(oldTsPath); err != nil {
glog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err)
return err
}
@ -270,9 +277,9 @@ func validatePath(targetPath string) error {
}
// shouldWritePayload returns whether the payload should be written to disk.
func (w *AtomicWriter) shouldWritePayload(payload map[string]FileProjection) (bool, error) {
func shouldWritePayload(payload map[string]FileProjection, oldTsDir string) (bool, error) {
for userVisiblePath, fileProjection := range payload {
shouldWrite, err := w.shouldWriteFile(path.Join(w.targetDir, userVisiblePath), fileProjection.Data)
shouldWrite, err := shouldWriteFile(path.Join(oldTsDir, userVisiblePath), fileProjection.Data)
if err != nil {
return false, err
}
@ -286,7 +293,7 @@ func (w *AtomicWriter) shouldWritePayload(payload map[string]FileProjection) (bo
}
// shouldWriteFile returns whether a new version of a file should be written to disk.
func (w *AtomicWriter) shouldWriteFile(path string, content []byte) (bool, error) {
func shouldWriteFile(path string, content []byte) (bool, error) {
_, err := os.Lstat(path)
if os.IsNotExist(err) {
return true, nil
@ -300,19 +307,15 @@ func (w *AtomicWriter) shouldWriteFile(path string, content []byte) (bool, error
return (bytes.Compare(content, contentOnFs) != 0), nil
}
// pathsToRemove walks the user-visible portion of the target directory and
// pathsToRemove walks the current version of the data directory and
// determines which paths should be removed (if any) after the payload is
// written to the target directory.
func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection) (sets.String, error) {
func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir string) (sets.String, error) {
paths := sets.NewString()
visitor := func(path string, info os.FileInfo, err error) error {
if path == w.targetDir {
return nil
}
relativePath := strings.TrimPrefix(path, w.targetDir)
relativePath := strings.TrimPrefix(path, oldTsDir)
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
if strings.HasPrefix(relativePath, "..") {
if relativePath == "" {
return nil
}
@ -320,7 +323,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection) (sets.St
return nil
}
err := filepath.Walk(w.targetDir, visitor)
err := filepath.Walk(oldTsDir, visitor)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
@ -348,7 +351,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection) (sets.St
// newTimestampDir creates a new timestamp directory
func (w *AtomicWriter) newTimestampDir() (string, error) {
tsDir, err := ioutil.TempDir(w.targetDir, fmt.Sprintf("..%s.", time.Now().Format("1981_02_01_15_04_05")))
tsDir, err := ioutil.TempDir(w.targetDir, time.Now().UTC().Format("..2006_01_02_15_04_05."))
if err != nil {
glog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err)
return "", err
@ -405,34 +408,22 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir
//
// Viz:
// For files: "bar", "foo/bar", "baz/bar", "foo/baz/blah"
// the following symlinks and subdirectories are created:
// bar -> ..data/bar
// foo/bar -> ../..data/foo/bar
// baz/bar -> ../..data/baz/bar
// foo/baz/blah -> ../../..data/foo/baz/blah
// the following symlinks are created:
// bar -> ..data/bar
// foo -> ..data/foo
// baz -> ..data/baz
func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection) error {
for userVisiblePath := range payload {
dir, _ := filepath.Split(userVisiblePath)
subDirs := 0
if len(dir) > 0 {
// If dir is not empty, the projection path contains at least one
// subdirectory (example: userVisiblePath := "foo/bar").
// Since filepath.Split leaves a trailing path separator, in this
// example, dir = "foo/". In order to calculate the number of
// subdirectories, we must subtract 1 from the number returned by split.
subDirs = len(strings.Split(dir, string(os.PathSeparator))) - 1
err := os.MkdirAll(path.Join(w.targetDir, dir), os.ModePerm)
if err != nil {
return err
}
slashpos := strings.Index(userVisiblePath, string(os.PathSeparator))
if slashpos == -1 {
slashpos = len(userVisiblePath)
}
_, err := os.Readlink(path.Join(w.targetDir, userVisiblePath))
linkname := userVisiblePath[:slashpos]
_, err := os.Readlink(path.Join(w.targetDir, linkname))
if err != nil && os.IsNotExist(err) {
// The link into the data directory for this path doesn't exist; create it,
// respecting the number of subdirectories necessary to link
// correctly back into the data directory.
visibleFile := path.Join(w.targetDir, userVisiblePath)
dataDirFile := path.Join(strings.Repeat("../", subDirs), dataDirName, userVisiblePath)
// The link into the data directory for this path doesn't exist; create it
visibleFile := path.Join(w.targetDir, linkname)
dataDirFile := path.Join(dataDirName, linkname)
err = os.Symlink(dataDirFile, visibleFile)
if err != nil {
@ -446,13 +437,18 @@ func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection)
// removeUserVisiblePaths removes the set of paths from the user-visible
// portion of the writer's target directory.
func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error {
orderedPaths := paths.List()
for ii := len(orderedPaths) - 1; ii >= 0; ii-- {
if err := os.Remove(path.Join(w.targetDir, orderedPaths[ii])); err != nil {
glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, orderedPaths[ii], err)
return err
ps := string(os.PathSeparator)
var lasterr error
for p := range paths {
// only remove symlinks from the volume root directory (i.e. items that don't contain '/')
if strings.Contains(p, ps) {
continue
}
if err := os.Remove(path.Join(w.targetDir, p)); err != nil {
glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err)
lasterr = err
}
}
return nil
return lasterr
}

View file

@ -19,6 +19,7 @@ package util
//DeviceUtil is a util for common device methods
type DeviceUtil interface {
FindMultipathDeviceForDevice(disk string) string
FindSlaveDevicesOnMultipath(disk string) []string
}
type deviceHandler struct {

View file

@ -20,6 +20,7 @@ package util
import (
"errors"
"path"
"strings"
)
@ -59,3 +60,23 @@ func findDeviceForPath(path string, io IoUtil) (string, error) {
}
return "", errors.New("Illegal path for device " + devicePath)
}
// FindSlaveDevicesOnMultipath given a dm name like /dev/dm-1, find all devices
// which are managed by the devicemapper dm-1.
func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string {
var devices []string
io := handler.get_io
// Split path /dev/dm-1 into "", "dev", "dm-1"
parts := strings.Split(dm, "/")
if len(parts) != 3 || !strings.HasPrefix(parts[1], "dev") {
return devices
}
disk := parts[2]
slavesPath := path.Join("/sys/block/", disk, "/slaves/")
if files, err := io.ReadDir(slavesPath); err == nil {
for _, f := range files {
devices = append(devices, path.Join("/dev/", f.Name()))
}
}
return devices
}

View file

@ -22,3 +22,9 @@ package util
func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
return ""
}
// FindSlaveDevicesOnMultipath unsupported returns ""
func (handler *deviceHandler) FindSlaveDevicesOnMultipath(disk string) []string {
out := []string{}
return out
}

View file

@ -16,53 +16,10 @@ limitations under the License.
package util
import (
"k8s.io/api/core/v1"
)
const (
// Name of finalizer on PVCs that have a running pod.
PVCProtectionFinalizer = "kubernetes.io/pvc-protection"
// Name of finalizer on PVs that are bound by PVCs
PVProtectionFinalizer = "kubernetes.io/pv-protection"
)
// IsPVCBeingDeleted returns:
// true: in case PVC is being deleted, i.e. ObjectMeta.DeletionTimestamp is set
// false: in case PVC is not being deleted, i.e. ObjectMeta.DeletionTimestamp is nil
func IsPVCBeingDeleted(pvc *v1.PersistentVolumeClaim) bool {
return pvc.ObjectMeta.DeletionTimestamp != nil
}
// IsProtectionFinalizerPresent returns true in case PVCProtectionFinalizer is
// present among the pvc.Finalizers
func IsProtectionFinalizerPresent(pvc *v1.PersistentVolumeClaim) bool {
for _, finalizer := range pvc.Finalizers {
if finalizer == PVCProtectionFinalizer {
return true
}
}
return false
}
// RemoveProtectionFinalizer returns pvc without PVCProtectionFinalizer in case
// it's present in pvc.Finalizers. It expects that pvc is writable (i.e. is not
// informer's cached copy.)
func RemoveProtectionFinalizer(pvc *v1.PersistentVolumeClaim) {
newFinalizers := make([]string, 0)
for _, finalizer := range pvc.Finalizers {
if finalizer != PVCProtectionFinalizer {
newFinalizers = append(newFinalizers, finalizer)
}
}
if len(newFinalizers) == 0 {
// Sanitize for unit tests so we don't need to distinguish empty array
// and nil.
newFinalizers = nil
}
pvc.Finalizers = newFinalizers
}
// AddProtectionFinalizer adds PVCProtectionFinalizer to pvc. It expects that
// pvc is writable (i.e. is not informer's cached copy.)
func AddProtectionFinalizer(pvc *v1.PersistentVolumeClaim) {
pvc.Finalizers = append(pvc.Finalizers, PVCProtectionFinalizer)
}

View file

@ -49,12 +49,12 @@ func registerMetrics() {
}
// OperationCompleteHook returns a hook to call when an operation is completed
func OperationCompleteHook(plugin, operationName string) func(error) {
func OperationCompleteHook(plugin, operationName string) func(*error) {
requestTime := time.Now()
opComplete := func(err error) {
opComplete := func(err *error) {
timeTaken := time.Since(requestTime).Seconds()
// Create metric with operation name and plugin name
if err != nil {
if *err != nil {
storageOperationErrorMetric.WithLabelValues(plugin, operationName).Inc()
} else {
storageOperationMetric.WithLabelValues(plugin, operationName).Observe(timeTaken)

125
vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go generated vendored Normal file
View file

@ -0,0 +1,125 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"encoding/json"
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
)
var (
knownResizeConditions map[v1.PersistentVolumeClaimConditionType]bool = map[v1.PersistentVolumeClaimConditionType]bool{
v1.PersistentVolumeClaimFileSystemResizePending: true,
v1.PersistentVolumeClaimResizing: true,
}
)
type resizeProcessStatus struct {
condition v1.PersistentVolumeClaimCondition
processed bool
}
// ClaimToClaimKey return namespace/name string for pvc
func ClaimToClaimKey(claim *v1.PersistentVolumeClaim) string {
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
}
// MarkFSResizeFinished marks file system resizing as done
func MarkFSResizeFinished(
pvc *v1.PersistentVolumeClaim,
capacity v1.ResourceList,
kubeClient clientset.Interface) error {
newPVC := pvc.DeepCopy()
newPVC.Status.Capacity = capacity
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{})
_, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
return err
}
// PatchPVCStatus updates PVC status using PATCH verb
func PatchPVCStatus(
oldPVC *v1.PersistentVolumeClaim,
newPVC *v1.PersistentVolumeClaim,
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
pvcName := oldPVC.Name
oldData, err := json.Marshal(oldPVC)
if err != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to marshal oldData for pvc %q with %v", pvcName, err)
}
newData, err := json.Marshal(newPVC)
if err != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to marshal newData for pvc %q with %v", pvcName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, oldPVC)
if err != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to CreateTwoWayMergePatch for pvc %q with %v ", pvcName, err)
}
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(oldPVC.Namespace).
Patch(pvcName, types.StrategicMergePatchType, patchBytes, "status")
if updateErr != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to patch PVC %q with %v", pvcName, updateErr)
}
return updatedClaim, nil
}
// MergeResizeConditionOnPVC updates pvc with requested resize conditions
// leaving other conditions untouched.
func MergeResizeConditionOnPVC(
pvc *v1.PersistentVolumeClaim,
resizeConditions []v1.PersistentVolumeClaimCondition) *v1.PersistentVolumeClaim {
resizeConditionMap := map[v1.PersistentVolumeClaimConditionType]*resizeProcessStatus{}
for _, condition := range resizeConditions {
resizeConditionMap[condition.Type] = &resizeProcessStatus{condition, false}
}
oldConditions := pvc.Status.Conditions
newConditions := []v1.PersistentVolumeClaimCondition{}
for _, condition := range oldConditions {
// If Condition is of not resize type, we keep it.
if _, ok := knownResizeConditions[condition.Type]; !ok {
newConditions = append(newConditions, condition)
continue
}
if newCondition, ok := resizeConditionMap[condition.Type]; ok {
if newCondition.condition.Status != condition.Status {
newConditions = append(newConditions, newCondition.condition)
} else {
newConditions = append(newConditions, condition)
}
newCondition.processed = true
}
}
// append all unprocessed conditions
for _, newCondition := range resizeConditionMap {
if !newCondition.processed {
newConditions = append(newConditions, newCondition.condition)
}
}
pvc.Status.Conditions = newConditions
return pvc
}

View file

@ -23,6 +23,7 @@ import (
"path"
"path/filepath"
"strings"
"syscall"
"github.com/golang/glog"
"k8s.io/api/core/v1"
@ -45,7 +46,6 @@ const (
ErrDeviceNotFound = "device not found"
ErrDeviceNotSupported = "device not supported"
ErrNotAvailable = "not available"
)
// IsReady checks for the existence of a regular file
@ -96,29 +96,42 @@ func UnmountPath(mountPath string, mounter mount.Interface) error {
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
// IsNotMountPoint is more expensive but properly handles bind mounts.
func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error {
if pathExists, pathErr := PathExists(mountPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
pathExists, pathErr := PathExists(mountPath)
if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
return nil
}
var notMnt bool
var err error
if extensiveMountPointCheck {
notMnt, err = mount.IsNotMountPoint(mounter, mountPath)
} else {
notMnt, err = mounter.IsLikelyNotMountPoint(mountPath)
corruptedMnt := isCorruptedMnt(pathErr)
if pathErr != nil && !corruptedMnt {
return fmt.Errorf("Error checking path: %v", pathErr)
}
return doUnmountMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt)
}
if err != nil {
return err
}
// doUnmountMountPoint is a common unmount routine that unmounts the given path and
// deletes the remaining directory if successful.
// if extensiveMountPointCheck is true
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
// IsNotMountPoint is more expensive but properly handles bind mounts.
// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, Take it as an argument for convenience of testing
func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool, corruptedMnt bool) error {
if !corruptedMnt {
var notMnt bool
var err error
if extensiveMountPointCheck {
notMnt, err = mount.IsNotMountPoint(mounter, mountPath)
} else {
notMnt, err = mounter.IsLikelyNotMountPoint(mountPath)
}
if notMnt {
glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
return os.Remove(mountPath)
if err != nil {
return err
}
if notMnt {
glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
return os.Remove(mountPath)
}
}
// Unmount the mount path
@ -128,7 +141,7 @@ func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMount
}
notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath)
if mntErr != nil {
return err
return mntErr
}
if notMnt {
glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
@ -144,11 +157,32 @@ func PathExists(path string) (bool, error) {
return true, nil
} else if os.IsNotExist(err) {
return false, nil
} else if isCorruptedMnt(err) {
return true, err
} else {
return false, err
}
}
// isCorruptedMnt return true if err is about corrupted mount point
func isCorruptedMnt(err error) bool {
if err == nil {
return false
}
var underlyingError error
switch pe := err.(type) {
case nil:
return false
case *os.PathError:
underlyingError = pe.Err
case *os.LinkError:
underlyingError = pe.Err
case *os.SyscallError:
underlyingError = pe.Err
}
return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE
}
// GetSecretForPod locates secret by name in the pod's namespace and returns secret map
func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) {
secret := make(map[string]string)
@ -287,7 +321,7 @@ type BlockVolumePathHandler interface {
UnmapDevice(mapPath string, linkName string) error
// RemovePath removes a file or directory on specified map path
RemoveMapPath(mapPath string) error
// IsSymlinkExist retruns true if specified symbolic link exists
// IsSymlinkExist returns true if specified symbolic link exists
IsSymlinkExist(mapPath string) (bool, error)
// GetDeviceSymlinkRefs searches symbolic links under global map path
GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error)
@ -346,7 +380,7 @@ func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName
}
// Remove old symbolic link(or file) then create new one.
// This should be done because current symbolic link is
// stale accross node reboot.
// stale across node reboot.
linkPath := path.Join(mapPath, string(linkName))
if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
return err

View file

@ -51,7 +51,7 @@ func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
_, err := os.Stat(path)
if os.IsNotExist(err) {
return "", errors.New(ErrNotAvailable)
return "", errors.New(ErrDeviceNotFound)
}
if err != nil {
return "", fmt.Errorf("not attachable: %v", err)
@ -84,9 +84,11 @@ func (v VolumePathHandler) RemoveLoopDevice(device string) error {
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
if !strings.Contains(string(out), "No such device or address") {
return err
if _, err := os.Stat(device); os.IsNotExist(err) {
return nil
}
glog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out)
return err
}
return nil
}