vendor: bump to kube 1.10/master

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2017-12-11 16:45:48 +01:00
parent a85ea609db
commit f317ffce5b
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
535 changed files with 52955 additions and 17528 deletions

View file

@ -18,10 +18,9 @@ package service
import (
"fmt"
"strings"
"k8s.io/kubernetes/pkg/api"
api "k8s.io/kubernetes/pkg/apis/core"
netsets "k8s.io/kubernetes/pkg/util/net/sets"
"strings"
)
const (

File diff suppressed because it is too large Load diff

View file

@ -1,84 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
// ValidateEvent makes sure that the event makes sense.
func ValidateEvent(event *api.Event) field.ErrorList {
allErrs := field.ErrorList{}
// Make sure event.Namespace and the involvedObject.Namespace agree
if len(event.InvolvedObject.Namespace) == 0 {
// event.Namespace must also be empty (or "default", for compatibility with old clients)
if event.Namespace != metav1.NamespaceNone && event.Namespace != metav1.NamespaceDefault {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
}
} else {
// event namespace must match
if event.Namespace != event.InvolvedObject.Namespace {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
}
}
// For kinds we recognize, make sure involvedObject.Namespace is set for namespaced kinds
if namespaced, err := isNamespacedKind(event.InvolvedObject.Kind, event.InvolvedObject.APIVersion); err == nil {
if namespaced && len(event.InvolvedObject.Namespace) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("involvedObject", "namespace"), fmt.Sprintf("required for kind %s", event.InvolvedObject.Kind)))
}
if !namespaced && len(event.InvolvedObject.Namespace) > 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, fmt.Sprintf("not allowed for kind %s", event.InvolvedObject.Kind)))
}
}
for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) {
allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg))
}
return allErrs
}
// Check whether the kind in groupVersion is scoped at the root of the api hierarchy
func isNamespacedKind(kind, groupVersion string) (bool, error) {
gv, err := schema.ParseGroupVersion(groupVersion)
if err != nil {
return false, err
}
g, err := legacyscheme.Registry.Group(gv.Group)
if err != nil {
return false, err
}
restMapping, err := g.RESTMapper.RESTMapping(schema.GroupKind{Group: gv.Group, Kind: kind}, gv.Version)
if err != nil {
return false, err
}
scopeName := restMapping.Scope.Name()
if scopeName == meta.RESTScopeNameNamespace {
return true, nil
}
return false, nil
}

View file

@ -0,0 +1,34 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
// MetricSpecsAnnotation is the annotation which holds non-CPU-utilization HPA metric
// specs when converting the `Metrics` field from autoscaling/v2beta1
const MetricSpecsAnnotation = "autoscaling.alpha.kubernetes.io/metrics"
// MetricStatusesAnnotation is the annotation which holds non-CPU-utilization HPA metric
// statuses when converting the `CurrentMetrics` field from autoscaling/v2beta1
const MetricStatusesAnnotation = "autoscaling.alpha.kubernetes.io/current-metrics"
// HorizontalPodAutoscalerConditionsAnnotation is the annotation which holds the conditions
// of an HPA when converting the `Conditions` field from autoscaling/v2beta1
const HorizontalPodAutoscalerConditionsAnnotation = "autoscaling.alpha.kubernetes.io/conditions"
// DefaultCPUUtilization is the default value for CPU utilization, provided no other
// metrics are present. This is here because it's used by both the v2beta1 defaulting
// logic, and the pseudo-defaulting done in v1 conversion.
const DefaultCPUUtilization = 80

19
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package autoscaling // import "k8s.io/kubernetes/pkg/apis/autoscaling"

View file

@ -0,0 +1,53 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "autoscaling"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Scale{},
&HorizontalPodAutoscaler{},
&HorizontalPodAutoscalerList{},
)
return nil
}

363
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go generated vendored Normal file
View file

@ -0,0 +1,363 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "k8s.io/kubernetes/pkg/apis/core"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Scale represents a scaling request for a resource.
type Scale struct {
metav1.TypeMeta
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta
// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
// +optional
Spec ScaleSpec
// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
// +optional
Status ScaleStatus
}
// ScaleSpec describes the attributes of a scale subresource.
type ScaleSpec struct {
// desired number of instances for the scaled object.
// +optional
Replicas int32
}
// ScaleStatus represents the current status of a scale subresource.
type ScaleStatus struct {
// actual number of observed instances of the scaled object.
Replicas int32
// label query over pods that should match the replicas count. This is same
// as the label selector but in the string format to avoid introspection
// by clients. The string will be in the same format as the query-param syntax.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector string
}
// CrossVersionObjectReference contains enough information to let you identify the referred resource.
type CrossVersionObjectReference struct {
// Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
Kind string
// Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
Name string
// API version of the referent
// +optional
APIVersion string
}
// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.
type HorizontalPodAutoscalerSpec struct {
// ScaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
// should be collected, as well as to actually change the replica count.
ScaleTargetRef CrossVersionObjectReference
// MinReplicas is the lower limit for the number of replicas to which the autoscaler can scale down.
// It defaults to 1 pod.
// +optional
MinReplicas *int32
// MaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
// It cannot be less that minReplicas.
MaxReplicas int32
// Metrics contains the specifications for which to use to calculate the
// desired replica count (the maximum replica count across all metrics will
// be used). The desired replica count is calculated multiplying the
// ratio between the target value and the current value by the current
// number of pods. Ergo, metrics used must decrease as the pod count is
// increased, and vice-versa. See the individual metric source types for
// more information about how each type of metric must respond.
// +optional
Metrics []MetricSpec
}
// MetricSourceType indicates the type of metric.
type MetricSourceType string
var (
// ObjectMetricSourceType is a metric describing a kubernetes object
// (for example, hits-per-second on an Ingress object).
ObjectMetricSourceType MetricSourceType = "Object"
// PodsMetricSourceType is a metric describing each pod in the current scale
// target (for example, transactions-processed-per-second). The values
// will be averaged together before being compared to the target value.
PodsMetricSourceType MetricSourceType = "Pods"
// ResourceMetricSourceType is a resource metric known to Kubernetes, as
// specified in requests and limits, describing each pod in the current
// scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics (the "pods" source).
ResourceMetricSourceType MetricSourceType = "Resource"
)
// MetricSpec specifies how to scale based on a single metric
// (only `type` and one other matching field should be set at once).
type MetricSpec struct {
// Type is the type of metric source. It should match one of the fields below.
Type MetricSourceType
// Object refers to a metric describing a single kubernetes object
// (for example, hits-per-second on an Ingress object).
// +optional
Object *ObjectMetricSource
// Pods refers to a metric describing each pod in the current scale target
// (for example, transactions-processed-per-second). The values will be
// averaged together before being compared to the target value.
// +optional
Pods *PodsMetricSource
// Resource refers to a resource metric (such as those specified in
// requests and limits) known to Kubernetes describing each pod in the
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics using the "pods" source.
// +optional
Resource *ResourceMetricSource
}
// ObjectMetricSource indicates how to scale on a metric describing a
// kubernetes object (for example, hits-per-second on an Ingress object).
type ObjectMetricSource struct {
// Target is the described Kubernetes object.
Target CrossVersionObjectReference
// MetricName is the name of the metric in question.
MetricName string
// TargetValue is the target value of the metric (as a quantity).
TargetValue resource.Quantity
}
// PodsMetricSource indicates how to scale on a metric describing each pod in
// the current scale target (for example, transactions-processed-per-second).
// The values will be averaged together before being compared to the target
// value.
type PodsMetricSource struct {
// MetricName is the name of the metric in question
MetricName string
// TargetAverageValue is the target value of the average of the
// metric across all relevant pods (as a quantity)
TargetAverageValue resource.Quantity
}
// ResourceMetricSource indicates how to scale on a resource metric known to
// Kubernetes, as specified in requests and limits, describing each pod in the
// current scale target (e.g. CPU or memory). The values will be averaged
// together before being compared to the target. Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available to
// normal per-pod metrics using the "pods" source. Only one "target" type
// should be set.
type ResourceMetricSource struct {
// Name is the name of the resource in question.
Name api.ResourceName
// TargetAverageUtilization is the target value of the average of the
// resource metric across all relevant pods, represented as a percentage of
// the requested value of the resource for the pods.
// +optional
TargetAverageUtilization *int32
// TargetAverageValue is the target value of the average of the
// resource metric across all relevant pods, as a raw value (instead of as
// a percentage of the request), similar to the "pods" metric source type.
// +optional
TargetAverageValue *resource.Quantity
}
// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
type HorizontalPodAutoscalerStatus struct {
// ObservedGeneration is the most recent generation observed by this autoscaler.
// +optional
ObservedGeneration *int64
// LastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods,
// used by the autoscaler to control how often the number of pods is changed.
// +optional
LastScaleTime *metav1.Time
// CurrentReplicas is current number of replicas of pods managed by this autoscaler,
// as last seen by the autoscaler.
CurrentReplicas int32
// DesiredReplicas is the desired number of replicas of pods managed by this autoscaler,
// as last calculated by the autoscaler.
DesiredReplicas int32
// CurrentMetrics is the last read state of the metrics used by this autoscaler.
CurrentMetrics []MetricStatus
// Conditions is the set of conditions required for this autoscaler to scale its target,
// and indicates whether or not those conditions are met.
Conditions []HorizontalPodAutoscalerCondition
}
// ConditionStatus indicates the status of a condition (true, false, or unknown).
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition;
// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// HorizontalPodAutoscalerConditionType are the valid conditions of
// a HorizontalPodAutoscaler.
type HorizontalPodAutoscalerConditionType string
var (
// ScalingActive indicates that the HPA controller is able to scale if necessary:
// it's correctly configured, can fetch the desired metrics, and isn't disabled.
ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"
// AbleToScale indicates a lack of transient issues which prevent scaling from occurring,
// such as being in a backoff window, or being unable to access/update the target scale.
AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale"
// ScalingLimited indicates that the calculated scale based on metrics would be above or
// below the range for the HPA, and has thus been capped.
ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited"
)
// HorizontalPodAutoscalerCondition describes the state of
// a HorizontalPodAutoscaler at a certain point.
type HorizontalPodAutoscalerCondition struct {
// Type describes the current condition
Type HorizontalPodAutoscalerConditionType
// Status is the status of the condition (True, False, Unknown)
Status ConditionStatus
// LastTransitionTime is the last time the condition transitioned from
// one status to another
// +optional
LastTransitionTime metav1.Time
// Reason is the reason for the condition's last transition.
// +optional
Reason string
// Message is a human-readable explanation containing details about
// the transition
// +optional
Message string
}
// MetricStatus describes the last-read state of a single metric.
type MetricStatus struct {
// Type is the type of metric source. It will match one of the fields below.
Type MetricSourceType
// Object refers to a metric describing a single kubernetes object
// (for example, hits-per-second on an Ingress object).
// +optional
Object *ObjectMetricStatus
// Pods refers to a metric describing each pod in the current scale target
// (for example, transactions-processed-per-second). The values will be
// averaged together before being compared to the target value.
// +optional
Pods *PodsMetricStatus
// Resource refers to a resource metric (such as those specified in
// requests and limits) known to Kubernetes describing each pod in the
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics using the "pods" source.
// +optional
Resource *ResourceMetricStatus
}
// ObjectMetricStatus indicates the current value of a metric describing a
// kubernetes object (for example, hits-per-second on an Ingress object).
type ObjectMetricStatus struct {
// Target is the described Kubernetes object.
Target CrossVersionObjectReference
// MetricName is the name of the metric in question.
MetricName string
// CurrentValue is the current value of the metric (as a quantity).
CurrentValue resource.Quantity
}
// PodsMetricStatus indicates the current value of a metric describing each pod in
// the current scale target (for example, transactions-processed-per-second).
type PodsMetricStatus struct {
// MetricName is the name of the metric in question
MetricName string
// CurrentAverageValue is the current value of the average of the
// metric across all relevant pods (as a quantity)
CurrentAverageValue resource.Quantity
}
// ResourceMetricStatus indicates the current value of a resource metric known to
// Kubernetes, as specified in requests and limits, describing each pod in the
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available to
// normal per-pod metrics using the "pods" source.
type ResourceMetricStatus struct {
// Name is the name of the resource in question.
Name api.ResourceName
// CurrentAverageUtilization is the current value of the average of the
// resource metric across all relevant pods, represented as a percentage of
// the requested value of the resource for the pods. It will only be
// present if `targetAverageValue` was set in the corresponding metric
// specification.
// +optional
CurrentAverageUtilization *int32
// CurrentAverageValue is the current value of the average of the
// resource metric across all relevant pods, as a raw value (instead of as
// a percentage of the request), similar to the "pods" metric source type.
// It will always be set, regardless of the corresponding metric specification.
CurrentAverageValue resource.Quantity
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// HorizontalPodAutoscaler is the configuration for a horizontal pod
// autoscaler, which automatically manages the replica count of any resource
// implementing the scale subresource based on the metrics specified.
type HorizontalPodAutoscaler struct {
metav1.TypeMeta
// Metadata is the standard object metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// Spec is the specification for the behaviour of the autoscaler.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
// +optional
Spec HorizontalPodAutoscalerSpec
// Status is the current information about the autoscaler.
// +optional
Status HorizontalPodAutoscalerStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.
type HorizontalPodAutoscalerList struct {
metav1.TypeMeta
// Metadata is the standard list metadata.
// +optional
metav1.ListMeta
// Items is the list of horizontal pod autoscaler objects.
Items []HorizontalPodAutoscaler
}

View file

@ -0,0 +1,481 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package autoscaling
import (
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference.
func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference {
if in == nil {
return nil
}
out := new(CrossVersionObjectReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler.
func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscaler)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition.
func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]HorizontalPodAutoscaler, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList.
func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) {
*out = *in
out.ScaleTargetRef = in.ScaleTargetRef
if in.MinReplicas != nil {
in, out := &in.MinReplicas, &out.MinReplicas
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = make([]MetricSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec.
func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) {
*out = *in
if in.ObservedGeneration != nil {
in, out := &in.ObservedGeneration, &out.ObservedGeneration
if *in == nil {
*out = nil
} else {
*out = new(int64)
**out = **in
}
}
if in.LastScaleTime != nil {
in, out := &in.LastScaleTime, &out.LastScaleTime
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
}
}
if in.CurrentMetrics != nil {
in, out := &in.CurrentMetrics, &out.CurrentMetrics
*out = make([]MetricStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]HorizontalPodAutoscalerCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus.
func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricSpec) DeepCopyInto(out *MetricSpec) {
*out = *in
if in.Object != nil {
in, out := &in.Object, &out.Object
if *in == nil {
*out = nil
} else {
*out = new(ObjectMetricSource)
(*in).DeepCopyInto(*out)
}
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
if *in == nil {
*out = nil
} else {
*out = new(PodsMetricSource)
(*in).DeepCopyInto(*out)
}
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
if *in == nil {
*out = nil
} else {
*out = new(ResourceMetricSource)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec.
func (in *MetricSpec) DeepCopy() *MetricSpec {
if in == nil {
return nil
}
out := new(MetricSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricStatus) DeepCopyInto(out *MetricStatus) {
*out = *in
if in.Object != nil {
in, out := &in.Object, &out.Object
if *in == nil {
*out = nil
} else {
*out = new(ObjectMetricStatus)
(*in).DeepCopyInto(*out)
}
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
if *in == nil {
*out = nil
} else {
*out = new(PodsMetricStatus)
(*in).DeepCopyInto(*out)
}
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
if *in == nil {
*out = nil
} else {
*out = new(ResourceMetricStatus)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus.
func (in *MetricStatus) DeepCopy() *MetricStatus {
if in == nil {
return nil
}
out := new(MetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) {
*out = *in
out.Target = in.Target
out.TargetValue = in.TargetValue.DeepCopy()
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource.
func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource {
if in == nil {
return nil
}
out := new(ObjectMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) {
*out = *in
out.Target = in.Target
out.CurrentValue = in.CurrentValue.DeepCopy()
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus.
func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus {
if in == nil {
return nil
}
out := new(ObjectMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) {
*out = *in
out.TargetAverageValue = in.TargetAverageValue.DeepCopy()
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource.
func (in *PodsMetricSource) DeepCopy() *PodsMetricSource {
if in == nil {
return nil
}
out := new(PodsMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) {
*out = *in
out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy()
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus.
func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus {
if in == nil {
return nil
}
out := new(PodsMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) {
*out = *in
if in.TargetAverageUtilization != nil {
in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.TargetAverageValue != nil {
in, out := &in.TargetAverageValue, &out.TargetAverageValue
if *in == nil {
*out = nil
} else {
*out = new(resource.Quantity)
**out = (*in).DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource.
func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource {
if in == nil {
return nil
}
out := new(ResourceMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) {
*out = *in
if in.CurrentAverageUtilization != nil {
in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy()
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus.
func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus {
if in == nil {
return nil
}
out := new(ResourceMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Scale) DeepCopyInto(out *Scale) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale.
func (in *Scale) DeepCopy() *Scale {
if in == nil {
return nil
}
out := new(Scale)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Scale) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec.
func (in *ScaleSpec) DeepCopy() *ScaleSpec {
if in == nil {
return nil
}
out := new(ScaleSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus.
func (in *ScaleStatus) DeepCopy() *ScaleStatus {
if in == nil {
return nil
}
out := new(ScaleStatus)
in.DeepCopyInto(out)
return out
}

View file

@ -16,7 +16,7 @@ limitations under the License.
// This file should be consistent with pkg/api/v1/annotation_key_constants.go.
package api
package core
const (
// ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy
@ -68,6 +68,10 @@ const (
// This annotation can be attached to node.
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
// BootstrapCheckpointAnnotationKey represents a Resource (Pod) that should be checkpointed by
// the kubelet prior to running
BootstrapCheckpointAnnotationKey string = "node.kubernetes.io/bootstrap-checkpoint"
// annotation key prefix used to identify non-convertible json paths.
NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io"

View file

@ -14,11 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// +k8s:deepcopy-gen=package
// Package api contains the latest (or "internal") version of the
// Kubernetes API objects. This is the API objects as represented in memory.
// The contract presented to clients is located in the versioned packages,
// which are sub-directories. The first one is "v1". Those packages
// describe how a particular version is serialized to storage/network.
package api // import "k8s.io/kubernetes/pkg/api"
package core // import "k8s.io/kubernetes/pkg/apis/core"

View file

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package api
package core
// Field path constants that are specific to the internal API
// representation.
@ -25,8 +25,8 @@ const (
PodStatusField = "status.phase"
SecretTypeField = "type"
EventReasonField = "reason"
EventSourceField = "source"
EventReasonField = "action"
EventSourceField = "reportingComponent"
EventTypeField = "type"
EventInvolvedKindField = "involvedObject.kind"
EventInvolvedNamespaceField = "involvedObject.namespace"

View file

@ -28,30 +28,36 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/core"
)
// IsHugePageResourceName returns true if the resource name has the huge page
// resource prefix.
func IsHugePageResourceName(name api.ResourceName) bool {
return strings.HasPrefix(string(name), api.ResourceHugePagesPrefix)
func IsHugePageResourceName(name core.ResourceName) bool {
return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix)
}
// IsQuotaHugePageResourceName returns true if the resource name has the quota
// related huge page resource prefix.
func IsQuotaHugePageResourceName(name core.ResourceName) bool {
return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) || strings.HasPrefix(string(name), core.ResourceRequestsHugePagesPrefix)
}
// HugePageResourceName returns a ResourceName with the canonical hugepage
// prefix prepended for the specified page size. The page size is converted
// to its canonical representation.
func HugePageResourceName(pageSize resource.Quantity) api.ResourceName {
return api.ResourceName(fmt.Sprintf("%s%s", api.ResourceHugePagesPrefix, pageSize.String()))
func HugePageResourceName(pageSize resource.Quantity) core.ResourceName {
return core.ResourceName(fmt.Sprintf("%s%s", core.ResourceHugePagesPrefix, pageSize.String()))
}
// HugePageSizeFromResourceName returns the page size for the specified huge page
// resource name. If the specified input is not a valid huge page resource name
// an error is returned.
func HugePageSizeFromResourceName(name api.ResourceName) (resource.Quantity, error) {
func HugePageSizeFromResourceName(name core.ResourceName) (resource.Quantity, error) {
if !IsHugePageResourceName(name) {
return resource.Quantity{}, fmt.Errorf("resource name: %s is not valid hugepage name", name)
}
pageSize := strings.TrimPrefix(string(name), api.ResourceHugePagesPrefix)
pageSize := strings.TrimPrefix(string(name), core.ResourceHugePagesPrefix)
return resource.ParseQuantity(pageSize)
}
@ -60,14 +66,14 @@ func HugePageSizeFromResourceName(name api.ResourceName) (resource.Quantity, err
func NonConvertibleFields(annotations map[string]string) map[string]string {
nonConvertibleKeys := map[string]string{}
for key, value := range annotations {
if strings.HasPrefix(key, api.NonConvertibleAnnotationPrefix) {
if strings.HasPrefix(key, core.NonConvertibleAnnotationPrefix) {
nonConvertibleKeys[key] = value
}
}
return nonConvertibleKeys
}
// Semantic can do semantic deep equality checks for api objects.
// Semantic can do semantic deep equality checks for core objects.
// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true
var Semantic = conversion.EqualitiesOrDie(
func(a, b resource.Quantity) bool {
@ -92,10 +98,10 @@ var Semantic = conversion.EqualitiesOrDie(
)
var standardResourceQuotaScopes = sets.NewString(
string(api.ResourceQuotaScopeTerminating),
string(api.ResourceQuotaScopeNotTerminating),
string(api.ResourceQuotaScopeBestEffort),
string(api.ResourceQuotaScopeNotBestEffort),
string(core.ResourceQuotaScopeTerminating),
string(core.ResourceQuotaScopeNotTerminating),
string(core.ResourceQuotaScopeBestEffort),
string(core.ResourceQuotaScopeNotBestEffort),
)
// IsStandardResourceQuotaScope returns true if the scope is a standard value
@ -104,24 +110,24 @@ func IsStandardResourceQuotaScope(str string) bool {
}
var podObjectCountQuotaResources = sets.NewString(
string(api.ResourcePods),
string(core.ResourcePods),
)
var podComputeQuotaResources = sets.NewString(
string(api.ResourceCPU),
string(api.ResourceMemory),
string(api.ResourceLimitsCPU),
string(api.ResourceLimitsMemory),
string(api.ResourceRequestsCPU),
string(api.ResourceRequestsMemory),
string(core.ResourceCPU),
string(core.ResourceMemory),
string(core.ResourceLimitsCPU),
string(core.ResourceLimitsMemory),
string(core.ResourceRequestsCPU),
string(core.ResourceRequestsMemory),
)
// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope
func IsResourceQuotaScopeValidForResource(scope api.ResourceQuotaScope, resource string) bool {
func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource string) bool {
switch scope {
case api.ResourceQuotaScopeTerminating, api.ResourceQuotaScopeNotTerminating, api.ResourceQuotaScopeNotBestEffort:
case core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeNotBestEffort:
return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource)
case api.ResourceQuotaScopeBestEffort:
case core.ResourceQuotaScopeBestEffort:
return podObjectCountQuotaResources.Has(resource)
default:
return true
@ -129,62 +135,45 @@ func IsResourceQuotaScopeValidForResource(scope api.ResourceQuotaScope, resource
}
var standardContainerResources = sets.NewString(
string(api.ResourceCPU),
string(api.ResourceMemory),
string(api.ResourceEphemeralStorage),
string(core.ResourceCPU),
string(core.ResourceMemory),
string(core.ResourceEphemeralStorage),
)
// IsStandardContainerResourceName returns true if the container can make a resource request
// for the specified resource
func IsStandardContainerResourceName(str string) bool {
return standardContainerResources.Has(str) || IsHugePageResourceName(api.ResourceName(str))
return standardContainerResources.Has(str) || IsHugePageResourceName(core.ResourceName(str))
}
// IsExtendedResourceName returns true if the resource name is not in the
// default namespace, or it has the opaque integer resource prefix.
func IsExtendedResourceName(name api.ResourceName) bool {
// TODO: Remove OIR part following deprecation.
return !IsDefaultNamespaceResource(name) || IsOpaqueIntResourceName(name)
// default namespace.
func IsExtendedResourceName(name core.ResourceName) bool {
return !IsDefaultNamespaceResource(name)
}
// IsDefaultNamespaceResource returns true if the resource name is in the
// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are
// implicitly in the kubernetes.io/ namespace.
func IsDefaultNamespaceResource(name api.ResourceName) bool {
func IsDefaultNamespaceResource(name core.ResourceName) bool {
return !strings.Contains(string(name), "/") ||
strings.Contains(string(name), api.ResourceDefaultNamespacePrefix)
strings.Contains(string(name), core.ResourceDefaultNamespacePrefix)
}
// IsOpaqueIntResourceName returns true if the resource name has the opaque
// integer resource prefix.
func IsOpaqueIntResourceName(name api.ResourceName) bool {
return strings.HasPrefix(string(name), api.ResourceOpaqueIntPrefix)
}
// OpaqueIntResourceName returns a ResourceName with the canonical opaque
// integer prefix prepended. If the argument already has the prefix, it is
// returned unmodified.
func OpaqueIntResourceName(name string) api.ResourceName {
if IsOpaqueIntResourceName(api.ResourceName(name)) {
return api.ResourceName(name)
}
return api.ResourceName(fmt.Sprintf("%s%s", api.ResourceOpaqueIntPrefix, name))
}
var overcommitBlacklist = sets.NewString(string(api.ResourceNvidiaGPU))
var overcommitBlacklist = sets.NewString(string(core.ResourceNvidiaGPU))
// IsOvercommitAllowed returns true if the resource is in the default
// namespace and not blacklisted.
func IsOvercommitAllowed(name api.ResourceName) bool {
func IsOvercommitAllowed(name core.ResourceName) bool {
return IsDefaultNamespaceResource(name) &&
!IsHugePageResourceName(name) &&
!overcommitBlacklist.Has(string(name))
}
var standardLimitRangeTypes = sets.NewString(
string(api.LimitTypePod),
string(api.LimitTypeContainer),
string(api.LimitTypePersistentVolumeClaim),
string(core.LimitTypePod),
string(core.LimitTypeContainer),
string(core.LimitTypePersistentVolumeClaim),
)
// IsStandardLimitRangeType returns true if the type is Pod or Container
@ -193,112 +182,112 @@ func IsStandardLimitRangeType(str string) bool {
}
var standardQuotaResources = sets.NewString(
string(api.ResourceCPU),
string(api.ResourceMemory),
string(api.ResourceEphemeralStorage),
string(api.ResourceRequestsCPU),
string(api.ResourceRequestsMemory),
string(api.ResourceRequestsStorage),
string(api.ResourceRequestsEphemeralStorage),
string(api.ResourceLimitsCPU),
string(api.ResourceLimitsMemory),
string(api.ResourceLimitsEphemeralStorage),
string(api.ResourcePods),
string(api.ResourceQuotas),
string(api.ResourceServices),
string(api.ResourceReplicationControllers),
string(api.ResourceSecrets),
string(api.ResourcePersistentVolumeClaims),
string(api.ResourceConfigMaps),
string(api.ResourceServicesNodePorts),
string(api.ResourceServicesLoadBalancers),
string(core.ResourceCPU),
string(core.ResourceMemory),
string(core.ResourceEphemeralStorage),
string(core.ResourceRequestsCPU),
string(core.ResourceRequestsMemory),
string(core.ResourceRequestsStorage),
string(core.ResourceRequestsEphemeralStorage),
string(core.ResourceLimitsCPU),
string(core.ResourceLimitsMemory),
string(core.ResourceLimitsEphemeralStorage),
string(core.ResourcePods),
string(core.ResourceQuotas),
string(core.ResourceServices),
string(core.ResourceReplicationControllers),
string(core.ResourceSecrets),
string(core.ResourcePersistentVolumeClaims),
string(core.ResourceConfigMaps),
string(core.ResourceServicesNodePorts),
string(core.ResourceServicesLoadBalancers),
)
// IsStandardQuotaResourceName returns true if the resource is known to
// the quota tracking system
func IsStandardQuotaResourceName(str string) bool {
return standardQuotaResources.Has(str)
return standardQuotaResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str))
}
var standardResources = sets.NewString(
string(api.ResourceCPU),
string(api.ResourceMemory),
string(api.ResourceEphemeralStorage),
string(api.ResourceRequestsCPU),
string(api.ResourceRequestsMemory),
string(api.ResourceRequestsEphemeralStorage),
string(api.ResourceLimitsCPU),
string(api.ResourceLimitsMemory),
string(api.ResourceLimitsEphemeralStorage),
string(api.ResourcePods),
string(api.ResourceQuotas),
string(api.ResourceServices),
string(api.ResourceReplicationControllers),
string(api.ResourceSecrets),
string(api.ResourceConfigMaps),
string(api.ResourcePersistentVolumeClaims),
string(api.ResourceStorage),
string(api.ResourceRequestsStorage),
string(api.ResourceServicesNodePorts),
string(api.ResourceServicesLoadBalancers),
string(core.ResourceCPU),
string(core.ResourceMemory),
string(core.ResourceEphemeralStorage),
string(core.ResourceRequestsCPU),
string(core.ResourceRequestsMemory),
string(core.ResourceRequestsEphemeralStorage),
string(core.ResourceLimitsCPU),
string(core.ResourceLimitsMemory),
string(core.ResourceLimitsEphemeralStorage),
string(core.ResourcePods),
string(core.ResourceQuotas),
string(core.ResourceServices),
string(core.ResourceReplicationControllers),
string(core.ResourceSecrets),
string(core.ResourceConfigMaps),
string(core.ResourcePersistentVolumeClaims),
string(core.ResourceStorage),
string(core.ResourceRequestsStorage),
string(core.ResourceServicesNodePorts),
string(core.ResourceServicesLoadBalancers),
)
// IsStandardResourceName returns true if the resource is known to the system
func IsStandardResourceName(str string) bool {
return standardResources.Has(str) || IsHugePageResourceName(api.ResourceName(str))
return standardResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str))
}
var integerResources = sets.NewString(
string(api.ResourcePods),
string(api.ResourceQuotas),
string(api.ResourceServices),
string(api.ResourceReplicationControllers),
string(api.ResourceSecrets),
string(api.ResourceConfigMaps),
string(api.ResourcePersistentVolumeClaims),
string(api.ResourceServicesNodePorts),
string(api.ResourceServicesLoadBalancers),
string(core.ResourcePods),
string(core.ResourceQuotas),
string(core.ResourceServices),
string(core.ResourceReplicationControllers),
string(core.ResourceSecrets),
string(core.ResourceConfigMaps),
string(core.ResourcePersistentVolumeClaims),
string(core.ResourceServicesNodePorts),
string(core.ResourceServicesLoadBalancers),
)
// IsIntegerResourceName returns true if the resource is measured in integer values
func IsIntegerResourceName(str string) bool {
return integerResources.Has(str) || IsExtendedResourceName(api.ResourceName(str))
return integerResources.Has(str) || IsExtendedResourceName(core.ResourceName(str))
}
// Extended and HugePages resources
func IsScalarResourceName(name api.ResourceName) bool {
func IsScalarResourceName(name core.ResourceName) bool {
return IsExtendedResourceName(name) || IsHugePageResourceName(name)
}
// this function aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here
func IsServiceIPSet(service *api.Service) bool {
return service.Spec.ClusterIP != api.ClusterIPNone && service.Spec.ClusterIP != ""
func IsServiceIPSet(service *core.Service) bool {
return service.Spec.ClusterIP != core.ClusterIPNone && service.Spec.ClusterIP != ""
}
// this function aims to check if the service's cluster IP is requested or not
func IsServiceIPRequested(service *api.Service) bool {
func IsServiceIPRequested(service *core.Service) bool {
// ExternalName services are CNAME aliases to external ones. Ignore the IP.
if service.Spec.Type == api.ServiceTypeExternalName {
if service.Spec.Type == core.ServiceTypeExternalName {
return false
}
return service.Spec.ClusterIP == ""
}
var standardFinalizers = sets.NewString(
string(api.FinalizerKubernetes),
string(core.FinalizerKubernetes),
metav1.FinalizerOrphanDependents,
metav1.FinalizerDeleteDependents,
)
// HasAnnotation returns a bool if passed in annotation exists
func HasAnnotation(obj api.ObjectMeta, ann string) bool {
func HasAnnotation(obj core.ObjectMeta, ann string) bool {
_, found := obj.Annotations[ann]
return found
}
// SetMetaDataAnnotation sets the annotation and value
func SetMetaDataAnnotation(obj *api.ObjectMeta, ann string, value string) {
func SetMetaDataAnnotation(obj *core.ObjectMeta, ann string, value string) {
if obj.Annotations == nil {
obj.Annotations = make(map[string]string)
}
@ -311,7 +300,7 @@ func IsStandardFinalizerName(str string) bool {
// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice,
// only if they do not already exist
func AddToNodeAddresses(addresses *[]api.NodeAddress, addAddresses ...api.NodeAddress) {
func AddToNodeAddresses(addresses *[]core.NodeAddress, addAddresses ...core.NodeAddress) {
for _, add := range addAddresses {
exists := false
for _, existing := range *addresses {
@ -327,11 +316,11 @@ func AddToNodeAddresses(addresses *[]api.NodeAddress, addAddresses ...api.NodeAd
}
// TODO: make method on LoadBalancerStatus?
func LoadBalancerStatusEqual(l, r *api.LoadBalancerStatus) bool {
func LoadBalancerStatusEqual(l, r *core.LoadBalancerStatus) bool {
return ingressSliceEqual(l.Ingress, r.Ingress)
}
func ingressSliceEqual(lhs, rhs []api.LoadBalancerIngress) bool {
func ingressSliceEqual(lhs, rhs []core.LoadBalancerIngress) bool {
if len(lhs) != len(rhs) {
return false
}
@ -343,7 +332,7 @@ func ingressSliceEqual(lhs, rhs []api.LoadBalancerIngress) bool {
return true
}
func ingressEqual(lhs, rhs *api.LoadBalancerIngress) bool {
func ingressEqual(lhs, rhs *core.LoadBalancerIngress) bool {
if lhs.IP != rhs.IP {
return false
}
@ -354,9 +343,9 @@ func ingressEqual(lhs, rhs *api.LoadBalancerIngress) bool {
}
// TODO: make method on LoadBalancerStatus?
func LoadBalancerStatusDeepCopy(lb *api.LoadBalancerStatus) *api.LoadBalancerStatus {
c := &api.LoadBalancerStatus{}
c.Ingress = make([]api.LoadBalancerIngress, len(lb.Ingress))
func LoadBalancerStatusDeepCopy(lb *core.LoadBalancerStatus) *core.LoadBalancerStatus {
c := &core.LoadBalancerStatus{}
c.Ingress = make([]core.LoadBalancerIngress, len(lb.Ingress))
for i := range lb.Ingress {
c.Ingress[i] = lb.Ingress[i]
}
@ -365,42 +354,42 @@ func LoadBalancerStatusDeepCopy(lb *api.LoadBalancerStatus) *api.LoadBalancerSta
// GetAccessModesAsString returns a string representation of an array of access modes.
// modes, when present, are always in the same order: RWO,ROX,RWX.
func GetAccessModesAsString(modes []api.PersistentVolumeAccessMode) string {
func GetAccessModesAsString(modes []core.PersistentVolumeAccessMode) string {
modes = removeDuplicateAccessModes(modes)
modesStr := []string{}
if containsAccessMode(modes, api.ReadWriteOnce) {
if containsAccessMode(modes, core.ReadWriteOnce) {
modesStr = append(modesStr, "RWO")
}
if containsAccessMode(modes, api.ReadOnlyMany) {
if containsAccessMode(modes, core.ReadOnlyMany) {
modesStr = append(modesStr, "ROX")
}
if containsAccessMode(modes, api.ReadWriteMany) {
if containsAccessMode(modes, core.ReadWriteMany) {
modesStr = append(modesStr, "RWX")
}
return strings.Join(modesStr, ",")
}
// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString
func GetAccessModesFromString(modes string) []api.PersistentVolumeAccessMode {
func GetAccessModesFromString(modes string) []core.PersistentVolumeAccessMode {
strmodes := strings.Split(modes, ",")
accessModes := []api.PersistentVolumeAccessMode{}
accessModes := []core.PersistentVolumeAccessMode{}
for _, s := range strmodes {
s = strings.Trim(s, " ")
switch {
case s == "RWO":
accessModes = append(accessModes, api.ReadWriteOnce)
accessModes = append(accessModes, core.ReadWriteOnce)
case s == "ROX":
accessModes = append(accessModes, api.ReadOnlyMany)
accessModes = append(accessModes, core.ReadOnlyMany)
case s == "RWX":
accessModes = append(accessModes, api.ReadWriteMany)
accessModes = append(accessModes, core.ReadWriteMany)
}
}
return accessModes
}
// removeDuplicateAccessModes returns an array of access modes without any duplicates
func removeDuplicateAccessModes(modes []api.PersistentVolumeAccessMode) []api.PersistentVolumeAccessMode {
accessModes := []api.PersistentVolumeAccessMode{}
func removeDuplicateAccessModes(modes []core.PersistentVolumeAccessMode) []core.PersistentVolumeAccessMode {
accessModes := []core.PersistentVolumeAccessMode{}
for _, m := range modes {
if !containsAccessMode(accessModes, m) {
accessModes = append(accessModes, m)
@ -409,7 +398,7 @@ func removeDuplicateAccessModes(modes []api.PersistentVolumeAccessMode) []api.Pe
return accessModes
}
func containsAccessMode(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
func containsAccessMode(modes []core.PersistentVolumeAccessMode, mode core.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
@ -418,9 +407,9 @@ func containsAccessMode(modes []api.PersistentVolumeAccessMode, mode api.Persist
return false
}
// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements
// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement core type into a struct that implements
// labels.Selector.
func NodeSelectorRequirementsAsSelector(nsm []api.NodeSelectorRequirement) (labels.Selector, error) {
func NodeSelectorRequirementsAsSelector(nsm []core.NodeSelectorRequirement) (labels.Selector, error) {
if len(nsm) == 0 {
return labels.Nothing(), nil
}
@ -428,17 +417,17 @@ func NodeSelectorRequirementsAsSelector(nsm []api.NodeSelectorRequirement) (labe
for _, expr := range nsm {
var op selection.Operator
switch expr.Operator {
case api.NodeSelectorOpIn:
case core.NodeSelectorOpIn:
op = selection.In
case api.NodeSelectorOpNotIn:
case core.NodeSelectorOpNotIn:
op = selection.NotIn
case api.NodeSelectorOpExists:
case core.NodeSelectorOpExists:
op = selection.Exists
case api.NodeSelectorOpDoesNotExist:
case core.NodeSelectorOpDoesNotExist:
op = selection.DoesNotExist
case api.NodeSelectorOpGt:
case core.NodeSelectorOpGt:
op = selection.GreaterThan
case api.NodeSelectorOpLt:
case core.NodeSelectorOpLt:
op = selection.LessThan
default:
return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator)
@ -453,11 +442,11 @@ func NodeSelectorRequirementsAsSelector(nsm []api.NodeSelectorRequirement) (labe
}
// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations
// and converts it to the []Toleration type in api.
func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]api.Toleration, error) {
var tolerations []api.Toleration
if len(annotations) > 0 && annotations[api.TolerationsAnnotationKey] != "" {
err := json.Unmarshal([]byte(annotations[api.TolerationsAnnotationKey]), &tolerations)
// and converts it to the []Toleration type in core.
func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]core.Toleration, error) {
var tolerations []core.Toleration
if len(annotations) > 0 && annotations[core.TolerationsAnnotationKey] != "" {
err := json.Unmarshal([]byte(annotations[core.TolerationsAnnotationKey]), &tolerations)
if err != nil {
return tolerations, err
}
@ -467,10 +456,10 @@ func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]api.Tole
// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list.
// Returns true if something was updated, false otherwise.
func AddOrUpdateTolerationInPod(pod *api.Pod, toleration *api.Toleration) bool {
func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool {
podTolerations := pod.Spec.Tolerations
var newTolerations []api.Toleration
var newTolerations []core.Toleration
updated := false
for i := range podTolerations {
if toleration.MatchToleration(&podTolerations[i]) {
@ -494,7 +483,7 @@ func AddOrUpdateTolerationInPod(pod *api.Pod, toleration *api.Toleration) bool {
}
// TolerationToleratesTaint checks if the toleration tolerates the taint.
func TolerationToleratesTaint(toleration *api.Toleration, taint *api.Taint) bool {
func TolerationToleratesTaint(toleration *core.Toleration, taint *core.Taint) bool {
if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect {
return false
}
@ -503,17 +492,17 @@ func TolerationToleratesTaint(toleration *api.Toleration, taint *api.Taint) bool
return false
}
// TODO: Use proper defaulting when Toleration becomes a field of PodSpec
if (len(toleration.Operator) == 0 || toleration.Operator == api.TolerationOpEqual) && toleration.Value == taint.Value {
if (len(toleration.Operator) == 0 || toleration.Operator == core.TolerationOpEqual) && toleration.Value == taint.Value {
return true
}
if toleration.Operator == api.TolerationOpExists {
if toleration.Operator == core.TolerationOpExists {
return true
}
return false
}
// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations.
func TaintToleratedByTolerations(taint *api.Taint, tolerations []api.Toleration) bool {
func TaintToleratedByTolerations(taint *core.Taint, tolerations []core.Toleration) bool {
tolerated := false
for i := range tolerations {
if TolerationToleratesTaint(&tolerations[i], taint) {
@ -525,13 +514,13 @@ func TaintToleratedByTolerations(taint *api.Taint, tolerations []api.Toleration)
}
// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations
// and converts it to the []Taint type in api.
func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]api.Taint, error) {
var taints []api.Taint
if len(annotations) > 0 && annotations[api.TaintsAnnotationKey] != "" {
err := json.Unmarshal([]byte(annotations[api.TaintsAnnotationKey]), &taints)
// and converts it to the []Taint type in core.
func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) {
var taints []core.Taint
if len(annotations) > 0 && annotations[core.TaintsAnnotationKey] != "" {
err := json.Unmarshal([]byte(annotations[core.TaintsAnnotationKey]), &taints)
if err != nil {
return []api.Taint{}, err
return []core.Taint{}, err
}
}
return taints, nil
@ -540,12 +529,12 @@ func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]api.Taint, e
// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls
// and a slice of unsafe Sysctls. This is only a convenience wrapper around
// SysctlsFromPodAnnotation.
func SysctlsFromPodAnnotations(a map[string]string) ([]api.Sysctl, []api.Sysctl, error) {
safe, err := SysctlsFromPodAnnotation(a[api.SysctlsPodAnnotationKey])
func SysctlsFromPodAnnotations(a map[string]string) ([]core.Sysctl, []core.Sysctl, error) {
safe, err := SysctlsFromPodAnnotation(a[core.SysctlsPodAnnotationKey])
if err != nil {
return nil, nil, err
}
unsafe, err := SysctlsFromPodAnnotation(a[api.UnsafeSysctlsPodAnnotationKey])
unsafe, err := SysctlsFromPodAnnotation(a[core.UnsafeSysctlsPodAnnotationKey])
if err != nil {
return nil, nil, err
}
@ -554,13 +543,13 @@ func SysctlsFromPodAnnotations(a map[string]string) ([]api.Sysctl, []api.Sysctl,
}
// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls.
func SysctlsFromPodAnnotation(annotation string) ([]api.Sysctl, error) {
func SysctlsFromPodAnnotation(annotation string) ([]core.Sysctl, error) {
if len(annotation) == 0 {
return nil, nil
}
kvs := strings.Split(annotation, ",")
sysctls := make([]api.Sysctl, len(kvs))
sysctls := make([]core.Sysctl, len(kvs))
for i, kv := range kvs {
cs := strings.Split(kv, "=")
if len(cs) != 2 || len(cs[0]) == 0 {
@ -573,7 +562,7 @@ func SysctlsFromPodAnnotation(annotation string) ([]api.Sysctl, error) {
}
// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls.
func PodAnnotationsFromSysctls(sysctls []api.Sysctl) string {
func PodAnnotationsFromSysctls(sysctls []core.Sysctl) string {
if len(sysctls) == 0 {
return ""
}
@ -586,9 +575,9 @@ func PodAnnotationsFromSysctls(sysctls []api.Sysctl) string {
}
// GetPersistentVolumeClass returns StorageClassName.
func GetPersistentVolumeClass(volume *api.PersistentVolume) string {
func GetPersistentVolumeClass(volume *core.PersistentVolume) string {
// Use beta annotation first
if class, found := volume.Annotations[api.BetaStorageClassAnnotation]; found {
if class, found := volume.Annotations[core.BetaStorageClassAnnotation]; found {
return class
}
@ -597,9 +586,9 @@ func GetPersistentVolumeClass(volume *api.PersistentVolume) string {
// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was
// requested, it returns "".
func GetPersistentVolumeClaimClass(claim *api.PersistentVolumeClaim) string {
func GetPersistentVolumeClaimClass(claim *core.PersistentVolumeClaim) string {
// Use beta annotation first
if class, found := claim.Annotations[api.BetaStorageClassAnnotation]; found {
if class, found := claim.Annotations[core.BetaStorageClassAnnotation]; found {
return class
}
@ -611,9 +600,9 @@ func GetPersistentVolumeClaimClass(claim *api.PersistentVolumeClaim) string {
}
// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field.
func PersistentVolumeClaimHasClass(claim *api.PersistentVolumeClaim) bool {
func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool {
// Use beta annotation first
if _, found := claim.Annotations[api.BetaStorageClassAnnotation]; found {
if _, found := claim.Annotations[core.BetaStorageClassAnnotation]; found {
return true
}
@ -625,12 +614,12 @@ func PersistentVolumeClaimHasClass(claim *api.PersistentVolumeClaim) bool {
}
// GetStorageNodeAffinityFromAnnotation gets the json serialized data from PersistentVolume.Annotations
// and converts it to the NodeAffinity type in api.
// and converts it to the NodeAffinity type in core.
// TODO: update when storage node affinity graduates to beta
func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*api.NodeAffinity, error) {
if len(annotations) > 0 && annotations[api.AlphaStorageNodeAffinityAnnotation] != "" {
var affinity api.NodeAffinity
err := json.Unmarshal([]byte(annotations[api.AlphaStorageNodeAffinityAnnotation]), &affinity)
func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*core.NodeAffinity, error) {
if len(annotations) > 0 && annotations[core.AlphaStorageNodeAffinityAnnotation] != "" {
var affinity core.NodeAffinity
err := json.Unmarshal([]byte(annotations[core.AlphaStorageNodeAffinityAnnotation]), &affinity)
if err != nil {
return nil, err
}
@ -641,7 +630,7 @@ func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*api.N
// Converts NodeAffinity type to Alpha annotation for use in PersistentVolumes
// TODO: update when storage node affinity graduates to beta
func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinity *api.NodeAffinity) error {
func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinity *core.NodeAffinity) error {
if affinity == nil {
return nil
}
@ -650,6 +639,6 @@ func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinit
if err != nil {
return err
}
annotations[api.AlphaStorageNodeAffinityAnnotation] = string(json)
annotations[core.AlphaStorageNodeAffinityAnnotation] = string(json)
return nil
}

View file

@ -23,9 +23,9 @@ import (
"k8s.io/apimachinery/pkg/apimachinery/registered"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/v1"
)
func init() {
@ -36,9 +36,9 @@ func init() {
func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) {
if err := announced.NewGroupMetaFactory(
&announced.GroupMetaFactoryArgs{
GroupName: api.GroupName,
GroupName: core.GroupName,
VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version},
AddInternalObjectsToScheme: api.AddToScheme,
AddInternalObjectsToScheme: core.AddToScheme,
RootScopedKinds: sets.NewString(
"Node",
"Namespace",
@ -56,9 +56,6 @@ func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *r
"PodProxyOptions",
"NodeProxyOptions",
"ServiceProxyOptions",
"ThirdPartyResource",
"ThirdPartyResourceData",
"ThirdPartyResourceList",
),
},
announced.VersionToSchemeFunc{

View file

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package api
package core
import "encoding/json"

View file

@ -17,7 +17,7 @@ limitations under the License.
//TODO: consider making these methods functions, because we don't want helper
//functions in the k8s.io/api repo.
package api
package core
import (
"k8s.io/apimachinery/pkg/runtime/schema"

63
vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go generated vendored Normal file
View file

@ -0,0 +1,63 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pods
import (
"fmt"
"k8s.io/kubernetes/pkg/fieldpath"
)
// ConvertDownwardAPIFieldLabel converts the specified downward API field label
// and its value in the pod of the specified version to the internal version,
// and returns the converted label and value. This function returns an error if
// the conversion fails.
func ConvertDownwardAPIFieldLabel(version, label, value string) (string, string, error) {
if version != "v1" {
return "", "", fmt.Errorf("unsupported pod version: %s", version)
}
if path, _, ok := fieldpath.SplitMaybeSubscriptedPath(label); ok {
switch path {
case "metadata.annotations", "metadata.labels":
return label, value, nil
default:
return "", "", fmt.Errorf("field label does not support subscript: %s", label)
}
}
switch label {
case "metadata.annotations",
"metadata.labels",
"metadata.name",
"metadata.namespace",
"metadata.uid",
"spec.nodeName",
"spec.restartPolicy",
"spec.serviceAccountName",
"spec.schedulerName",
"status.phase",
"status.hostIP",
"status.podIP":
return label, value, nil
// This is for backwards compatibility with old v1 clients which send spec.host
case "spec.host":
return "spec.nodeName", value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
}

View file

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package api
package core
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View file

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package api
package core
import (
"k8s.io/apimachinery/pkg/api/resource"

View file

@ -17,7 +17,7 @@ limitations under the License.
//TODO: consider making these methods functions, because we don't want helper
//functions in the k8s.io/api repo.
package api
package core
import "fmt"

View file

@ -17,7 +17,7 @@ limitations under the License.
//TODO: consider making these methods functions, because we don't want helper
//functions in the k8s.io/api repo.
package api
package core
// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>,
// if the two tolerations have same <key,effect,operator,value> combination, regard as they match.

View file

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package api
package core
import (
"k8s.io/apimachinery/pkg/api/resource"
@ -347,10 +347,10 @@ type PersistentVolumeSource struct {
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource
// ISCSIVolumeSource represents an ISCSI resource that is attached to a
// ISCSIPersistentVolumeSource represents an ISCSI resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// +optional
ISCSI *ISCSIVolumeSource
ISCSI *ISCSIPersistentVolumeSource
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
// +optional
@ -391,6 +391,9 @@ type PersistentVolumeSource struct {
// More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource
// CSI (Container Storage Interface) represents storage that handled by an external CSI driver
// +optional
CSI *CSIPersistentVolumeSource
}
type PersistentVolumeClaimVolumeSource struct {
@ -404,7 +407,7 @@ type PersistentVolumeClaimVolumeSource struct {
const (
// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation.
// It's currently still used and will be held for backwards compatibility
// It's deprecated and will be removed in a future release. (#51440)
BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class"
// MountOptionAnnotation defines mount option annotation used in PVs
@ -459,6 +462,11 @@ type PersistentVolumeSpec struct {
// simply fail if one is invalid.
// +optional
MountOptions []string
// volumeMode defines if a volume is intended to be used with a formatted filesystem
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
// This is an alpha feature and may change in the future.
// +optional
VolumeMode *PersistentVolumeMode
}
// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes
@ -476,6 +484,16 @@ const (
PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"
)
// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem.
type PersistentVolumeMode string
const (
// PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device.
PersistentVolumeBlock PersistentVolumeMode = "Block"
// PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem.
PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem"
)
type PersistentVolumeStatus struct {
// Phase indicates if a volume is available, bound to a claim, or released by a claim
// +optional
@ -545,6 +563,11 @@ type PersistentVolumeClaimSpec struct {
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
// +optional
StorageClassName *string
// volumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
// This is an alpha feature and may change in the future.
// +optional
VolumeMode *PersistentVolumeMode
}
type PersistentVolumeClaimConditionType string
@ -770,6 +793,54 @@ type ISCSIVolumeSource struct {
InitiatorName *string
}
// ISCSIPersistentVolumeSource represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIPersistentVolumeSource struct {
// Required: iSCSI target portal
// the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260)
// +optional
TargetPortal string
// Required: target iSCSI Qualified Name
// +optional
IQN string
// Required: iSCSI target lun number
// +optional
Lun int32
// Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.
// +optional
ISCSIInterface string
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool
// Optional: list of iSCSI target portal ips for high availability.
// the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260)
// +optional
Portals []string
// Optional: whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool
// Optional: whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool
// Optional: CHAP secret for iSCSI target and initiator authentication.
// The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true
// +optional
SecretRef *SecretReference
// Optional: Custom initiator name per volume.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string
}
// Represents a Fibre Channel volume.
// Fibre Channel volumes can only be mounted as read/write once.
// Fibre Channel volumes support ownership management and SELinux relabeling.
@ -1503,6 +1574,23 @@ type LocalVolumeSource struct {
Path string
}
// Represents storage that is managed by an external CSI volume driver
type CSIPersistentVolumeSource struct {
// Driver is the name of the driver to use for this volume.
// Required.
Driver string
// VolumeHandle is the unique volume name returned by the CSI volume
// plugins CreateVolume to refer to the volume on all subsequent calls.
// Required.
VolumeHandle string
// Optional: The value to pass to ControllerPublishVolumeRequest.
// Defaults to false (read/write).
// +optional
ReadOnly bool
}
// ContainerPort represents a network port in a single container
type ContainerPort struct {
// Optional: If specified, this must be an IANA_SVC_NAME Each named port
@ -1530,7 +1618,9 @@ type VolumeMount struct {
// Optional: Defaults to false (read-write).
// +optional
ReadOnly bool
// Required. Must not contain ':'.
// Required. If the path is not an absolute path (e.g. some/path) it
// will be prepended with the appropriate root prefix for the operating
// system. On Linux this is '/', on Windows this is 'C:\'.
MountPath string
// Path within the volume from which the container's volume should be mounted.
// Defaults to "" (volume's root).
@ -1564,6 +1654,14 @@ const (
MountPropagationBidirectional MountPropagationMode = "Bidirectional"
)
// VolumeDevice describes a mapping of a raw block device within a container.
type VolumeDevice struct {
// name must match the name of a persistentVolumeClaim in the pod
Name string
// devicePath is the path inside of the container that the device will be mapped to.
DevicePath string
}
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
// Required: This must be a C_IDENTIFIER.
@ -1857,6 +1955,10 @@ type Container struct {
Resources ResourceRequirements
// +optional
VolumeMounts []VolumeMount
// volumeDevices is the list of block devices to be used by the container.
// This is an alpha feature and may change in the future.
// +optional
VolumeDevices []VolumeDevice
// +optional
LivenessProbe *Probe
// +optional
@ -2081,6 +2183,11 @@ const (
// DNSDefault indicates that the pod should use the default (as
// determined by kubelet) DNS settings.
DNSDefault DNSPolicy = "Default"
// DNSNone indicates that the pod should use empty DNS settings. DNS
// parameters such as nameservers and search paths should be defined via
// DNSConfig.
DNSNone DNSPolicy = "None"
)
// A node selector represents the union of the results of one or more label queries
@ -2380,7 +2487,12 @@ type PodSpec struct {
// before the system actively tries to terminate the pod; value must be positive integer
// +optional
ActiveDeadlineSeconds *int64
// Required: Set DNS policy.
// Set DNS policy for the pod.
// Defaults to "ClusterFirst".
// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
// To have DNS options set along with hostNetwork, you have to specify DNS policy
// explicitly to 'ClusterFirstWithHostNet'.
// +optional
DNSPolicy DNSPolicy
// NodeSelector is a selector which must be true for the pod to fit on a node
@ -2444,6 +2556,11 @@ type PodSpec struct {
// The higher the value, the higher the priority.
// +optional
Priority *int32
// Specifies the DNS parameters of a pod.
// Parameters specified here will be merged to the generated DNS
// configuration based on DNSPolicy.
// +optional
DNSConfig *PodDNSConfig
}
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
@ -2533,6 +2650,35 @@ const (
PodQOSBestEffort PodQOSClass = "BestEffort"
)
// PodDNSConfig defines the DNS parameters of a pod in addition to
// those generated from DNSPolicy.
type PodDNSConfig struct {
// A list of DNS name server IP addresses.
// This will be appended to the base nameservers generated from DNSPolicy.
// Duplicated nameservers will be removed.
// +optional
Nameservers []string
// A list of DNS search domains for host-name lookup.
// This will be appended to the base search paths generated from DNSPolicy.
// Duplicated search paths will be removed.
// +optional
Searches []string
// A list of DNS resolver options.
// This will be merged with the base options generated from DNSPolicy.
// Duplicated entries will be removed. Resolution options given in Options
// will override those that appear in the base DNSPolicy.
// +optional
Options []PodDNSConfigOption
}
// PodDNSConfigOption defines DNS resolver options of a pod.
type PodDNSConfigOption struct {
// Required.
Name string
// +optional
Value *string
}
// PodStatus represents information about the status of a pod. Status may trail the actual
// state of a system.
type PodStatus struct {
@ -2725,8 +2871,8 @@ type ReplicationControllerCondition struct {
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/extensions.Scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationController represents the configuration of a replication controller.
@ -2916,7 +3062,8 @@ type ServiceSpec struct {
// ExternalName is the external reference that kubedns or equivalent will
// return as a CNAME record for this service. No proxying will be involved.
// Must be a valid DNS name and requires Type to be ExternalName.
// Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123)
// and requires Type to be ExternalName.
ExternalName string
// ExternalIPs are used by external load balancers, or can be set by
@ -3406,8 +3553,6 @@ const (
)
const (
// Namespace prefix for opaque counted resources (alpha).
ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-"
// Default namespace prefix.
ResourceDefaultNamespacePrefix = "kubernetes.io/"
// Name prefix for huge page resources (alpha).
@ -3575,6 +3720,10 @@ type DeleteOptions struct {
// Either this field or OrphanDependents may be set, but not both.
// The default policy is decided by the existing finalizer set in the
// metadata.finalizers and the resource-specific default policy.
// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
// allow the garbage collector to delete the dependents in the background;
// 'Foreground' - a cascading policy that deletes all dependents in the
// foreground.
// +optional
PropagationPolicy *DeletionPropagation
}
@ -3809,7 +3958,7 @@ type Event struct {
// +optional
metav1.ObjectMeta
// Required. The object that this event is about.
// Required. The object that this event is about. Mapped to events.Event.regarding
// +optional
InvolvedObject ObjectReference
@ -3821,7 +3970,7 @@ type Event struct {
Reason string
// Optional. A human-readable description of the status of this operation.
// TODO: decide on maximum length.
// TODO: decide on maximum length. Mapped to events.Event.note
// +optional
Message string
@ -3844,8 +3993,49 @@ type Event struct {
// Type of this event (Normal, Warning), new types could be added in the future.
// +optional
Type string
// Time when this Event was first observed.
// +optional
EventTime metav1.MicroTime
// Data about the Event series this event represents or nil if it's a singleton Event.
// +optional
Series *EventSeries
// What action was taken/failed regarding to the Regarding object.
// +optional
Action string
// Optional secondary object for more complex actions.
// +optional
Related *ObjectReference
// Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
// +optional
ReportingController string
// ID of the controller instance, e.g. `kubelet-xyzf`.
// +optional
ReportingInstance string
}
type EventSeries struct {
// Number of occurrences in this series up to the last heartbeat time
Count int32
// Time of the last occurence observed
LastObservedTime metav1.MicroTime
// State of this Series: Ongoing or Finished
State EventSeriesState
}
type EventSeriesState string
const (
EventSeriesStateOngoing EventSeriesState = "Ongoing"
EventSeriesStateFinished EventSeriesState = "Finished"
EventSeriesStateUnknown EventSeriesState = "Unknown"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EventList is a list of events.
@ -3964,6 +4154,13 @@ const (
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
)
// The following identify resource prefix for Kubernetes object types
const (
// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
ResourceRequestsHugePagesPrefix = "requests.hugepages-"
)
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
type ResourceQuotaScope string
@ -4350,5 +4547,5 @@ const (
// corresponding to every RequiredDuringScheduling affinity rule.
// When the --hard-pod-affinity-weight scheduler flag is not specified,
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
DefaultHardPodAffinitySymmetricWeight int = 1
DefaultHardPodAffinitySymmetricWeight int32 = 1
)

View file

@ -26,7 +26,7 @@ import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/extensions"
)
@ -38,79 +38,79 @@ func addFastPathConversionFuncs(scheme *runtime.Scheme) error {
switch a := objA.(type) {
case *v1.Pod:
switch b := objB.(type) {
case *api.Pod:
return true, Convert_v1_Pod_To_api_Pod(a, b, s)
case *core.Pod:
return true, Convert_v1_Pod_To_core_Pod(a, b, s)
}
case *api.Pod:
case *core.Pod:
switch b := objB.(type) {
case *v1.Pod:
return true, Convert_api_Pod_To_v1_Pod(a, b, s)
return true, Convert_core_Pod_To_v1_Pod(a, b, s)
}
case *v1.Event:
switch b := objB.(type) {
case *api.Event:
return true, Convert_v1_Event_To_api_Event(a, b, s)
case *core.Event:
return true, Convert_v1_Event_To_core_Event(a, b, s)
}
case *api.Event:
case *core.Event:
switch b := objB.(type) {
case *v1.Event:
return true, Convert_api_Event_To_v1_Event(a, b, s)
return true, Convert_core_Event_To_v1_Event(a, b, s)
}
case *v1.ReplicationController:
switch b := objB.(type) {
case *api.ReplicationController:
return true, Convert_v1_ReplicationController_To_api_ReplicationController(a, b, s)
case *core.ReplicationController:
return true, Convert_v1_ReplicationController_To_core_ReplicationController(a, b, s)
}
case *api.ReplicationController:
case *core.ReplicationController:
switch b := objB.(type) {
case *v1.ReplicationController:
return true, Convert_api_ReplicationController_To_v1_ReplicationController(a, b, s)
return true, Convert_core_ReplicationController_To_v1_ReplicationController(a, b, s)
}
case *v1.Node:
switch b := objB.(type) {
case *api.Node:
return true, Convert_v1_Node_To_api_Node(a, b, s)
case *core.Node:
return true, Convert_v1_Node_To_core_Node(a, b, s)
}
case *api.Node:
case *core.Node:
switch b := objB.(type) {
case *v1.Node:
return true, Convert_api_Node_To_v1_Node(a, b, s)
return true, Convert_core_Node_To_v1_Node(a, b, s)
}
case *v1.Namespace:
switch b := objB.(type) {
case *api.Namespace:
return true, Convert_v1_Namespace_To_api_Namespace(a, b, s)
case *core.Namespace:
return true, Convert_v1_Namespace_To_core_Namespace(a, b, s)
}
case *api.Namespace:
case *core.Namespace:
switch b := objB.(type) {
case *v1.Namespace:
return true, Convert_api_Namespace_To_v1_Namespace(a, b, s)
return true, Convert_core_Namespace_To_v1_Namespace(a, b, s)
}
case *v1.Service:
switch b := objB.(type) {
case *api.Service:
return true, Convert_v1_Service_To_api_Service(a, b, s)
case *core.Service:
return true, Convert_v1_Service_To_core_Service(a, b, s)
}
case *api.Service:
case *core.Service:
switch b := objB.(type) {
case *v1.Service:
return true, Convert_api_Service_To_v1_Service(a, b, s)
return true, Convert_core_Service_To_v1_Service(a, b, s)
}
case *v1.Endpoints:
switch b := objB.(type) {
case *api.Endpoints:
return true, Convert_v1_Endpoints_To_api_Endpoints(a, b, s)
case *core.Endpoints:
return true, Convert_v1_Endpoints_To_core_Endpoints(a, b, s)
}
case *api.Endpoints:
case *core.Endpoints:
switch b := objB.(type) {
case *v1.Endpoints:
return true, Convert_api_Endpoints_To_v1_Endpoints(a, b, s)
return true, Convert_core_Endpoints_To_v1_Endpoints(a, b, s)
}
case *metav1.WatchEvent:
@ -132,16 +132,16 @@ func addFastPathConversionFuncs(scheme *runtime.Scheme) error {
func addConversionFuncs(scheme *runtime.Scheme) error {
// Add non-generated conversion functions
err := scheme.AddConversionFuncs(
Convert_api_Pod_To_v1_Pod,
Convert_api_PodSpec_To_v1_PodSpec,
Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec,
Convert_api_ServiceSpec_To_v1_ServiceSpec,
Convert_v1_Pod_To_api_Pod,
Convert_v1_PodSpec_To_api_PodSpec,
Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec,
Convert_v1_Secret_To_api_Secret,
Convert_v1_ServiceSpec_To_api_ServiceSpec,
Convert_v1_ResourceList_To_api_ResourceList,
Convert_core_Pod_To_v1_Pod,
Convert_core_PodSpec_To_v1_PodSpec,
Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec,
Convert_core_ServiceSpec_To_v1_ServiceSpec,
Convert_v1_Pod_To_core_Pod,
Convert_v1_PodSpec_To_core_PodSpec,
Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec,
Convert_v1_Secret_To_core_Secret,
Convert_v1_ServiceSpec_To_core_ServiceSpec,
Convert_v1_ResourceList_To_core_ResourceList,
Convert_v1_ReplicationController_to_extensions_ReplicaSet,
Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec,
Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus,
@ -157,20 +157,15 @@ func addConversionFuncs(scheme *runtime.Scheme) error {
err = scheme.AddFieldLabelConversionFunc("v1", "Pod",
func(label, value string) (string, string, error) {
switch label {
case "metadata.annotations",
"metadata.labels",
"metadata.name",
case "metadata.name",
"metadata.namespace",
"metadata.uid",
"spec.nodeName",
"spec.restartPolicy",
"spec.serviceAccountName",
"spec.schedulerName",
"status.phase",
"status.hostIP",
"status.podIP":
return label, value, nil
// This is for backwards compatibility with old v1 clients which send spec.host
// This is for backwards compatibility with old v1 clients which send spec.host
case "spec.host":
return "spec.nodeName", value, nil
default:
@ -241,7 +236,7 @@ func Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(in *v1.Re
metav1.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s)
}
if in.Template != nil {
if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, &out.Template, s); err != nil {
if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, &out.Template, s); err != nil {
return err
}
}
@ -257,7 +252,7 @@ func Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(in *v
for _, cond := range in.Conditions {
out.Conditions = append(out.Conditions, extensions.ReplicaSetCondition{
Type: extensions.ReplicaSetConditionType(cond.Type),
Status: api.ConditionStatus(cond.Status),
Status: core.ConditionStatus(cond.Status),
LastTransitionTime: cond.LastTransitionTime,
Reason: cond.Reason,
Message: cond.Message,
@ -293,7 +288,7 @@ func Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(in *exten
invalidErr = metav1.Convert_unversioned_LabelSelector_to_map(in.Selector, &out.Selector, s)
}
out.Template = new(v1.PodTemplateSpec)
if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil {
if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil {
return err
}
return invalidErr
@ -317,13 +312,13 @@ func Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(in *e
return nil
}
func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error {
func Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error {
out.Replicas = &in.Replicas
out.MinReadySeconds = in.MinReadySeconds
out.Selector = in.Selector
if in.Template != nil {
out.Template = new(v1.PodTemplateSpec)
if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil {
if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil {
return err
}
} else {
@ -332,15 +327,15 @@ func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *a
return nil
}
func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error {
func Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error {
if in.Replicas != nil {
out.Replicas = *in.Replicas
}
out.MinReadySeconds = in.MinReadySeconds
out.Selector = in.Selector
if in.Template != nil {
out.Template = new(api.PodTemplateSpec)
if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil {
out.Template = new(core.PodTemplateSpec)
if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, out.Template, s); err != nil {
return err
}
} else {
@ -349,16 +344,16 @@ func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *v
return nil
}
func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error {
if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil {
func Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error {
if err := autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil {
return err
}
return nil
}
func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error {
if err := autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s); err != nil {
func Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error {
if err := autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in, out, s); err != nil {
return err
}
@ -367,8 +362,8 @@ func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, o
// The following two v1.PodSpec conversions are done here to support v1.ServiceAccount
// as an alias for ServiceAccountName.
func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conversion.Scope) error {
if err := autoConvert_api_PodSpec_To_v1_PodSpec(in, out, s); err != nil {
func Convert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s conversion.Scope) error {
if err := autoConvert_core_PodSpec_To_v1_PodSpec(in, out, s); err != nil {
return err
}
@ -386,8 +381,8 @@ func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conve
return nil
}
func Convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conversion.Scope) error {
if err := autoConvert_v1_PodSpec_To_api_PodSpec(in, out, s); err != nil {
func Convert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s conversion.Scope) error {
if err := autoConvert_v1_PodSpec_To_core_PodSpec(in, out, s); err != nil {
return err
}
@ -400,7 +395,7 @@ func Convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conve
// the host namespace fields have to be handled specially for backward compatibility
// with v1.0.0
if out.SecurityContext == nil {
out.SecurityContext = new(api.PodSecurityContext)
out.SecurityContext = new(core.PodSecurityContext)
}
out.SecurityContext.HostNetwork = in.HostNetwork
out.SecurityContext.HostPID = in.HostPID
@ -409,8 +404,8 @@ func Convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conve
return nil
}
func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *v1.Pod, s conversion.Scope) error {
if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil {
func Convert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error {
if err := autoConvert_core_Pod_To_v1_Pod(in, out, s); err != nil {
return err
}
@ -431,8 +426,8 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *v1.Pod, s conversion.Scope) err
return nil
}
func Convert_v1_Secret_To_api_Secret(in *v1.Secret, out *api.Secret, s conversion.Scope) error {
if err := autoConvert_v1_Secret_To_api_Secret(in, out, s); err != nil {
func Convert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s conversion.Scope) error {
if err := autoConvert_v1_Secret_To_core_Secret(in, out, s); err != nil {
return err
}
@ -448,10 +443,10 @@ func Convert_v1_Secret_To_api_Secret(in *v1.Secret, out *api.Secret, s conversio
return nil
}
func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error {
func Convert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error {
if in.Capabilities != nil {
out.Capabilities = new(v1.Capabilities)
if err := Convert_api_Capabilities_To_v1_Capabilities(in.Capabilities, out.Capabilities, s); err != nil {
if err := Convert_core_Capabilities_To_v1_Capabilities(in.Capabilities, out.Capabilities, s); err != nil {
return err
}
} else {
@ -460,7 +455,7 @@ func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext,
out.Privileged = in.Privileged
if in.SELinuxOptions != nil {
out.SELinuxOptions = new(v1.SELinuxOptions)
if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil {
if err := Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil {
return err
}
} else {
@ -473,11 +468,11 @@ func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext,
return nil
}
func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error {
func Convert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error {
out.SupplementalGroups = in.SupplementalGroups
if in.SELinuxOptions != nil {
out.SELinuxOptions = new(v1.SELinuxOptions)
if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil {
if err := Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil {
return err
}
} else {
@ -489,11 +484,11 @@ func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurity
return nil
}
func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *v1.PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error {
func Convert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error {
out.SupplementalGroups = in.SupplementalGroups
if in.SELinuxOptions != nil {
out.SELinuxOptions = new(api.SELinuxOptions)
if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil {
out.SELinuxOptions = new(core.SELinuxOptions)
if err := Convert_v1_SELinuxOptions_To_core_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil {
return err
}
} else {
@ -506,12 +501,12 @@ func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *v1.PodSecurityC
}
// +k8s:conversion-fn=copy-only
func Convert_v1_ResourceList_To_api_ResourceList(in *v1.ResourceList, out *api.ResourceList, s conversion.Scope) error {
func Convert_v1_ResourceList_To_core_ResourceList(in *v1.ResourceList, out *core.ResourceList, s conversion.Scope) error {
if *in == nil {
return nil
}
if *out == nil {
*out = make(api.ResourceList, len(*in))
*out = make(core.ResourceList, len(*in))
}
for key, val := range *in {
// Moved to defaults
@ -520,7 +515,7 @@ func Convert_v1_ResourceList_To_api_ResourceList(in *v1.ResourceList, out *api.R
// const milliScale = -3
// val.RoundUp(milliScale)
(*out)[api.ResourceName(key)] = val
(*out)[core.ResourceName(key)] = val
}
return nil
}

View file

@ -20,6 +20,8 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/parsers"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
)
@ -39,14 +41,6 @@ func SetDefaults_ResourceList(obj *v1.ResourceList) {
}
}
func SetDefaults_PodExecOptions(obj *v1.PodExecOptions) {
obj.Stdout = true
obj.Stderr = true
}
func SetDefaults_PodAttachOptions(obj *v1.PodAttachOptions) {
obj.Stdout = true
obj.Stderr = true
}
func SetDefaults_ReplicationController(obj *v1.ReplicationController) {
var labels map[string]string
if obj.Spec.Template != nil {
@ -236,17 +230,30 @@ func SetDefaults_PersistentVolume(obj *v1.PersistentVolume) {
if obj.Spec.PersistentVolumeReclaimPolicy == "" {
obj.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimRetain
}
if obj.Spec.VolumeMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
obj.Spec.VolumeMode = new(v1.PersistentVolumeMode)
*obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem
}
}
func SetDefaults_PersistentVolumeClaim(obj *v1.PersistentVolumeClaim) {
if obj.Status.Phase == "" {
obj.Status.Phase = v1.ClaimPending
}
if obj.Spec.VolumeMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
obj.Spec.VolumeMode = new(v1.PersistentVolumeMode)
*obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem
}
}
func SetDefaults_ISCSIVolumeSource(obj *v1.ISCSIVolumeSource) {
if obj.ISCSIInterface == "" {
obj.ISCSIInterface = "default"
}
}
func SetDefaults_ISCSIPersistentVolumeSource(obj *v1.ISCSIPersistentVolumeSource) {
if obj.ISCSIInterface == "" {
obj.ISCSIInterface = "default"
}
}
func SetDefaults_AzureDiskVolumeSource(obj *v1.AzureDiskVolumeSource) {
if obj.CachingMode == nil {
obj.CachingMode = new(v1.AzureDataDiskCachingMode)

View file

@ -14,10 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/api
// +k8s:conversion-gen-external-types=../../../vendor/k8s.io/api/core/v1
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/core
// +k8s:conversion-gen-external-types=k8s.io/api/core/v1
// +k8s:defaulter-gen=TypeMeta
// +k8s:defaulter-gen-input=../../../vendor/k8s.io/api/core/v1
// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/core/v1
// Package v1 is the v1 version of the API.
package v1 // import "k8s.io/kubernetes/pkg/api/v1"
package v1 // import "k8s.io/kubernetes/pkg/apis/core/v1"

View file

@ -26,14 +26,13 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api/helper"
"k8s.io/kubernetes/pkg/apis/core/helper"
)
// IsExtendedResourceName returns true if the resource name is not in the
// default namespace, or it has the opaque integer resource prefix.
// default namespace.
func IsExtendedResourceName(name v1.ResourceName) bool {
// TODO: Remove OIR part following deprecation.
return !IsDefaultNamespaceResource(name) || IsOpaqueIntResourceName(name)
return !IsDefaultNamespaceResource(name)
}
// IsDefaultNamespaceResource returns true if the resource name is in the
@ -69,22 +68,6 @@ func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, erro
return resource.ParseQuantity(pageSize)
}
// IsOpaqueIntResourceName returns true if the resource name has the opaque
// integer resource prefix.
func IsOpaqueIntResourceName(name v1.ResourceName) bool {
return strings.HasPrefix(string(name), v1.ResourceOpaqueIntPrefix)
}
// OpaqueIntResourceName returns a ResourceName with the canonical opaque
// integer prefix prepended. If the argument already has the prefix, it is
// returned unmodified.
func OpaqueIntResourceName(name string) v1.ResourceName {
if IsOpaqueIntResourceName(v1.ResourceName(name)) {
return v1.ResourceName(name)
}
return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceOpaqueIntPrefix, name))
}
var overcommitBlacklist = sets.NewString(string(v1.ResourceNvidiaGPU))
// IsOvercommitAllowed returns true if the resource is in the default

File diff suppressed because it is too large Load diff

View file

@ -46,8 +46,6 @@ func RegisterDefaults(scheme *runtime.Scheme) error {
})
scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*v1.PersistentVolumeList)) })
scheme.AddTypeDefaultingFunc(&v1.Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*v1.Pod)) })
scheme.AddTypeDefaultingFunc(&v1.PodAttachOptions{}, func(obj interface{}) { SetObjectDefaults_PodAttachOptions(obj.(*v1.PodAttachOptions)) })
scheme.AddTypeDefaultingFunc(&v1.PodExecOptions{}, func(obj interface{}) { SetObjectDefaults_PodExecOptions(obj.(*v1.PodExecOptions)) })
scheme.AddTypeDefaultingFunc(&v1.PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*v1.PodList)) })
scheme.AddTypeDefaultingFunc(&v1.PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*v1.PodTemplate)) })
scheme.AddTypeDefaultingFunc(&v1.PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*v1.PodTemplateList)) })
@ -140,7 +138,7 @@ func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) {
SetDefaults_RBDPersistentVolumeSource(in.Spec.PersistentVolumeSource.RBD)
}
if in.Spec.PersistentVolumeSource.ISCSI != nil {
SetDefaults_ISCSIVolumeSource(in.Spec.PersistentVolumeSource.ISCSI)
SetDefaults_ISCSIPersistentVolumeSource(in.Spec.PersistentVolumeSource.ISCSI)
}
if in.Spec.PersistentVolumeSource.AzureDisk != nil {
SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk)
@ -308,14 +306,6 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
}
}
func SetObjectDefaults_PodAttachOptions(in *v1.PodAttachOptions) {
SetDefaults_PodAttachOptions(in)
}
func SetObjectDefaults_PodExecOptions(in *v1.PodExecOptions) {
SetDefaults_PodExecOptions(in)
}
func SetObjectDefaults_PodList(in *v1.PodList) {
for i := range in.Items {
a := &in.Items[i]

View file

@ -16,4 +16,4 @@ limitations under the License.
// Package validation has functions for validating the correctness of api
// objects and explaining what is wrong with them when they aren't valid.
package validation // import "k8s.io/kubernetes/pkg/api/validation"
package validation // import "k8s.io/kubernetes/pkg/apis/core/validation"

View file

@ -0,0 +1,129 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/core"
)
const (
ReportingInstanceLengthLimit = 128
ActionLengthLimit = 128
ReasonLengthLimit = 128
NoteLengthLimit = 1024
)
// ValidateEvent makes sure that the event makes sense.
func ValidateEvent(event *core.Event) field.ErrorList {
allErrs := field.ErrorList{}
// Because go
zeroTime := time.Time{}
// "New" Events need to have EventTime set, so it's validating old object.
if event.EventTime.Time == zeroTime {
// Make sure event.Namespace and the involvedInvolvedObject.Namespace agree
if len(event.InvolvedObject.Namespace) == 0 {
// event.Namespace must also be empty (or "default", for compatibility with old clients)
if event.Namespace != metav1.NamespaceNone && event.Namespace != metav1.NamespaceDefault {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
}
} else {
// event namespace must match
if event.Namespace != event.InvolvedObject.Namespace {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
}
}
} else {
if len(event.InvolvedObject.Namespace) == 0 && event.Namespace != metav1.NamespaceSystem {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
}
if len(event.ReportingController) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("reportingController"), ""))
}
for _, msg := range validation.IsQualifiedName(event.ReportingController) {
allErrs = append(allErrs, field.Invalid(field.NewPath("reportingController"), event.ReportingController, msg))
}
if len(event.ReportingInstance) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("reportingInstance"), ""))
}
if len(event.ReportingInstance) > ReportingInstanceLengthLimit {
allErrs = append(allErrs, field.Invalid(field.NewPath("repotingIntance"), "", fmt.Sprintf("can have at most %v characters", ReportingInstanceLengthLimit)))
}
if len(event.Action) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("action"), ""))
}
if len(event.Action) > ActionLengthLimit {
allErrs = append(allErrs, field.Invalid(field.NewPath("action"), "", fmt.Sprintf("can have at most %v characters", ActionLengthLimit)))
}
if len(event.Reason) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("reason"), ""))
}
if len(event.Reason) > ReasonLengthLimit {
allErrs = append(allErrs, field.Invalid(field.NewPath("reason"), "", fmt.Sprintf("can have at most %v characters", ReasonLengthLimit)))
}
if len(event.Message) > NoteLengthLimit {
allErrs = append(allErrs, field.Invalid(field.NewPath("message"), "", fmt.Sprintf("can have at most %v characters", NoteLengthLimit)))
}
}
// For kinds we recognize, make sure InvolvedObject.Namespace is set for namespaced kinds
if namespaced, err := isNamespacedKind(event.InvolvedObject.Kind, event.InvolvedObject.APIVersion); err == nil {
if namespaced && len(event.InvolvedObject.Namespace) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("involvedObject", "namespace"), fmt.Sprintf("required for kind %s", event.InvolvedObject.Kind)))
}
if !namespaced && len(event.InvolvedObject.Namespace) > 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, fmt.Sprintf("not allowed for kind %s", event.InvolvedObject.Kind)))
}
}
for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) {
allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg))
}
return allErrs
}
// Check whether the kind in groupVersion is scoped at the root of the api hierarchy
func isNamespacedKind(kind, groupVersion string) (bool, error) {
gv, err := schema.ParseGroupVersion(groupVersion)
if err != nil {
return false, err
}
g, err := legacyscheme.Registry.Group(gv.Group)
if err != nil {
return false, err
}
restMapping, err := g.RESTMapper.RESTMapping(schema.GroupKind{Group: gv.Group, Kind: kind}, gv.Version)
if err != nil {
return false, err
}
scopeName := restMapping.Scope.Name()
if scopeName == meta.RESTScopeNameNamespace {
return true, nil
}
return false, nil
}

View file

@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// +k8s:deepcopy-gen=package
package extensions // import "k8s.io/kubernetes/pkg/apis/extensions"

View file

@ -19,6 +19,7 @@ package extensions
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/networking"
)
@ -51,19 +52,15 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&DeploymentList{},
&DeploymentRollback{},
&ReplicationControllerDummy{},
&Scale{},
&ThirdPartyResource{},
&ThirdPartyResourceList{},
&DaemonSetList{},
&DaemonSet{},
&ThirdPartyResourceData{},
&ThirdPartyResourceDataList{},
&Ingress{},
&IngressList{},
&ReplicaSet{},
&ReplicaSetList{},
&PodSecurityPolicy{},
&PodSecurityPolicyList{},
&autoscaling.Scale{},
&networking.NetworkPolicy{},
&networking.NetworkPolicyList{},
)

View file

@ -32,7 +32,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/pkg/api"
api "k8s.io/kubernetes/pkg/apis/core"
)
const (
@ -42,44 +42,6 @@ const (
SysctlsPodSecurityPolicyAnnotationKey string = "security.alpha.kubernetes.io/sysctls"
)
// describes the attributes of a scale subresource
type ScaleSpec struct {
// desired number of instances for the scaled object.
// +optional
Replicas int32
}
// represents the current status of a scale subresource.
type ScaleStatus struct {
// actual number of observed instances of the scaled object.
Replicas int32
// label query over pods that should match the replicas count.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector *metav1.LabelSelector
}
// +genclient
// +genclient:noVerbs
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// represents a scaling request for a resource.
type Scale struct {
metav1.TypeMeta
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta
// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
// +optional
Spec ScaleSpec
// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
// +optional
Status ScaleStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Dummy definition
@ -111,63 +73,8 @@ type CustomMetricCurrentStatusList struct {
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource
// types to the API. It consists of one or more Versions of the api.
type ThirdPartyResource struct {
metav1.TypeMeta
// Standard object metadata
// +optional
metav1.ObjectMeta
// Description is the description of this object.
// +optional
Description string
// Versions are versions for this third party object
Versions []APIVersion
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ThirdPartyResourceList struct {
metav1.TypeMeta
// Standard list metadata.
// +optional
metav1.ListMeta
// Items is the list of horizontal pod autoscalers.
Items []ThirdPartyResource
}
// An APIVersion represents a single concrete version of an object model.
// TODO: we should consider merge this struct with GroupVersion in metav1.go
type APIVersion struct {
// Name of this version (e.g. 'v1').
Name string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// An internal object, used for versioned storage in etcd. Not exposed to the end user.
type ThirdPartyResourceData struct {
metav1.TypeMeta
// Standard object metadata.
// +optional
metav1.ObjectMeta
// Data is the raw JSON data for this data.
// +optional
Data []byte
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Deployment struct {
@ -524,6 +431,27 @@ type DaemonSetStatus struct {
// create the name for the newest ControllerRevision.
// +optional
CollisionCount *int32
// Represents the latest available observations of a DaemonSet's current state.
Conditions []DaemonSetCondition
}
type DaemonSetConditionType string
// TODO: Add valid condition types of a DaemonSet.
// DaemonSetCondition describes the state of a DaemonSet at a certain point.
type DaemonSetCondition struct {
// Type of DaemonSet condition.
Type DaemonSetConditionType
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus
// Last time the condition transitioned from one status to another.
LastTransitionTime metav1.Time
// The reason for the condition's last transition.
Reason string
// A human readable message indicating details about the transition.
Message string
}
// +genclient
@ -573,18 +501,6 @@ type DaemonSetList struct {
Items []DaemonSet
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ThirdPartyResourceDataList struct {
metav1.TypeMeta
// Standard list metadata
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta
// Items is a list of third party objects
Items []ThirdPartyResourceData
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@ -755,8 +671,8 @@ type IngressBackend struct {
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
@ -888,7 +804,8 @@ type PodSecurityPolicySpec struct {
Privileged bool
// DefaultAddCapabilities is the default set of capabilities that will be added to the container
// unless the pod spec specifically drops the capability. You may not list a capability in both
// DefaultAddCapabilities and RequiredDropCapabilities.
// DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly
// allowed, and need not be included in the AllowedCapabilities list.
// +optional
DefaultAddCapabilities []api.Capability
// RequiredDropCapabilities are the capabilities that will be dropped from the container. These
@ -943,6 +860,11 @@ type PodSecurityPolicySpec struct {
// AllowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.
// +optional
AllowedHostPaths []AllowedHostPath
// AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all
// Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
// is allowed in the "Volumes" field.
// +optional
AllowedFlexVolumes []AllowedFlexVolume
}
// AllowedHostPath defines the host volume conditions that will be enabled by a policy
@ -962,9 +884,9 @@ type AllowedHostPath struct {
// for pods to use. It requires both the start and end to be defined.
type HostPortRange struct {
// Min is the start of the range, inclusive.
Min int
Min int32
// Max is the end of the range, inclusive.
Max int
Max int32
}
// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities
@ -1002,9 +924,16 @@ var (
Projected FSType = "projected"
PortworxVolume FSType = "portworxVolume"
ScaleIO FSType = "scaleIO"
CSI FSType = "csi"
All FSType = "*"
)
// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
type AllowedFlexVolume struct {
// Driver is the name of the Flexvolume driver.
Driver string
}
// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.
type SELinuxStrategyOptions struct {
// Rule is the strategy that will dictate the allowable labels that may be set.

View file

@ -22,253 +22,22 @@ package extensions
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
api "k8s.io/kubernetes/pkg/api"
reflect "reflect"
core "k8s.io/kubernetes/pkg/apis/core"
)
func init() {
SchemeBuilder.Register(RegisterDeepCopies)
}
// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
// to allow building arbitrary schemes.
//
// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented.
func RegisterDeepCopies(scheme *runtime.Scheme) error {
return scheme.AddGeneratedDeepCopyFuncs(
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*APIVersion).DeepCopyInto(out.(*APIVersion))
return nil
}, InType: reflect.TypeOf(&APIVersion{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*AllowedHostPath).DeepCopyInto(out.(*AllowedHostPath))
return nil
}, InType: reflect.TypeOf(&AllowedHostPath{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*CustomMetricCurrentStatus).DeepCopyInto(out.(*CustomMetricCurrentStatus))
return nil
}, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*CustomMetricCurrentStatusList).DeepCopyInto(out.(*CustomMetricCurrentStatusList))
return nil
}, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*CustomMetricTarget).DeepCopyInto(out.(*CustomMetricTarget))
return nil
}, InType: reflect.TypeOf(&CustomMetricTarget{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*CustomMetricTargetList).DeepCopyInto(out.(*CustomMetricTargetList))
return nil
}, InType: reflect.TypeOf(&CustomMetricTargetList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet))
return nil
}, InType: reflect.TypeOf(&DaemonSet{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList))
return nil
}, InType: reflect.TypeOf(&DaemonSetList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec))
return nil
}, InType: reflect.TypeOf(&DaemonSetSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus))
return nil
}, InType: reflect.TypeOf(&DaemonSetStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy))
return nil
}, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Deployment).DeepCopyInto(out.(*Deployment))
return nil
}, InType: reflect.TypeOf(&Deployment{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition))
return nil
}, InType: reflect.TypeOf(&DeploymentCondition{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList))
return nil
}, InType: reflect.TypeOf(&DeploymentList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DeploymentRollback).DeepCopyInto(out.(*DeploymentRollback))
return nil
}, InType: reflect.TypeOf(&DeploymentRollback{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec))
return nil
}, InType: reflect.TypeOf(&DeploymentSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus))
return nil
}, InType: reflect.TypeOf(&DeploymentStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy))
return nil
}, InType: reflect.TypeOf(&DeploymentStrategy{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*FSGroupStrategyOptions).DeepCopyInto(out.(*FSGroupStrategyOptions))
return nil
}, InType: reflect.TypeOf(&FSGroupStrategyOptions{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*GroupIDRange).DeepCopyInto(out.(*GroupIDRange))
return nil
}, InType: reflect.TypeOf(&GroupIDRange{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*HTTPIngressPath).DeepCopyInto(out.(*HTTPIngressPath))
return nil
}, InType: reflect.TypeOf(&HTTPIngressPath{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*HTTPIngressRuleValue).DeepCopyInto(out.(*HTTPIngressRuleValue))
return nil
}, InType: reflect.TypeOf(&HTTPIngressRuleValue{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*HostPortRange).DeepCopyInto(out.(*HostPortRange))
return nil
}, InType: reflect.TypeOf(&HostPortRange{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Ingress).DeepCopyInto(out.(*Ingress))
return nil
}, InType: reflect.TypeOf(&Ingress{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressBackend).DeepCopyInto(out.(*IngressBackend))
return nil
}, InType: reflect.TypeOf(&IngressBackend{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressList).DeepCopyInto(out.(*IngressList))
return nil
}, InType: reflect.TypeOf(&IngressList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressRule).DeepCopyInto(out.(*IngressRule))
return nil
}, InType: reflect.TypeOf(&IngressRule{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressRuleValue).DeepCopyInto(out.(*IngressRuleValue))
return nil
}, InType: reflect.TypeOf(&IngressRuleValue{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressSpec).DeepCopyInto(out.(*IngressSpec))
return nil
}, InType: reflect.TypeOf(&IngressSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressStatus).DeepCopyInto(out.(*IngressStatus))
return nil
}, InType: reflect.TypeOf(&IngressStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IngressTLS).DeepCopyInto(out.(*IngressTLS))
return nil
}, InType: reflect.TypeOf(&IngressTLS{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*PodSecurityPolicy).DeepCopyInto(out.(*PodSecurityPolicy))
return nil
}, InType: reflect.TypeOf(&PodSecurityPolicy{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*PodSecurityPolicyList).DeepCopyInto(out.(*PodSecurityPolicyList))
return nil
}, InType: reflect.TypeOf(&PodSecurityPolicyList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*PodSecurityPolicySpec).DeepCopyInto(out.(*PodSecurityPolicySpec))
return nil
}, InType: reflect.TypeOf(&PodSecurityPolicySpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ReplicaSet).DeepCopyInto(out.(*ReplicaSet))
return nil
}, InType: reflect.TypeOf(&ReplicaSet{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ReplicaSetCondition).DeepCopyInto(out.(*ReplicaSetCondition))
return nil
}, InType: reflect.TypeOf(&ReplicaSetCondition{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ReplicaSetList).DeepCopyInto(out.(*ReplicaSetList))
return nil
}, InType: reflect.TypeOf(&ReplicaSetList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ReplicaSetSpec).DeepCopyInto(out.(*ReplicaSetSpec))
return nil
}, InType: reflect.TypeOf(&ReplicaSetSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ReplicaSetStatus).DeepCopyInto(out.(*ReplicaSetStatus))
return nil
}, InType: reflect.TypeOf(&ReplicaSetStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ReplicationControllerDummy).DeepCopyInto(out.(*ReplicationControllerDummy))
return nil
}, InType: reflect.TypeOf(&ReplicationControllerDummy{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RollbackConfig).DeepCopyInto(out.(*RollbackConfig))
return nil
}, InType: reflect.TypeOf(&RollbackConfig{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet))
return nil
}, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment))
return nil
}, InType: reflect.TypeOf(&RollingUpdateDeployment{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RunAsUserStrategyOptions).DeepCopyInto(out.(*RunAsUserStrategyOptions))
return nil
}, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*SELinuxStrategyOptions).DeepCopyInto(out.(*SELinuxStrategyOptions))
return nil
}, InType: reflect.TypeOf(&SELinuxStrategyOptions{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Scale).DeepCopyInto(out.(*Scale))
return nil
}, InType: reflect.TypeOf(&Scale{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec))
return nil
}, InType: reflect.TypeOf(&ScaleSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus))
return nil
}, InType: reflect.TypeOf(&ScaleStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*SupplementalGroupsStrategyOptions).DeepCopyInto(out.(*SupplementalGroupsStrategyOptions))
return nil
}, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ThirdPartyResource).DeepCopyInto(out.(*ThirdPartyResource))
return nil
}, InType: reflect.TypeOf(&ThirdPartyResource{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ThirdPartyResourceData).DeepCopyInto(out.(*ThirdPartyResourceData))
return nil
}, InType: reflect.TypeOf(&ThirdPartyResourceData{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ThirdPartyResourceDataList).DeepCopyInto(out.(*ThirdPartyResourceDataList))
return nil
}, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ThirdPartyResourceList).DeepCopyInto(out.(*ThirdPartyResourceList))
return nil
}, InType: reflect.TypeOf(&ThirdPartyResourceList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*UserIDRange).DeepCopyInto(out.(*UserIDRange))
return nil
}, InType: reflect.TypeOf(&UserIDRange{})},
)
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *APIVersion) DeepCopyInto(out *APIVersion) {
func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersion.
func (in *APIVersion) DeepCopy() *APIVersion {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume.
func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume {
if in == nil {
return nil
}
out := new(APIVersion)
out := new(AllowedFlexVolume)
in.DeepCopyInto(out)
return out
}
@ -398,6 +167,23 @@ func (in *DaemonSet) DeepCopyObject() runtime.Object {
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition.
func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition {
if in == nil {
return nil
}
out := new(DaemonSetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
*out = *in
@ -480,6 +266,13 @@ func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
**out = **in
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DaemonSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@ -1101,17 +894,17 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
*out = *in
if in.DefaultAddCapabilities != nil {
in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities
*out = make([]api.Capability, len(*in))
*out = make([]core.Capability, len(*in))
copy(*out, *in)
}
if in.RequiredDropCapabilities != nil {
in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities
*out = make([]api.Capability, len(*in))
*out = make([]core.Capability, len(*in))
copy(*out, *in)
}
if in.AllowedCapabilities != nil {
in, out := &in.AllowedCapabilities, &out.AllowedCapabilities
*out = make([]api.Capability, len(*in))
*out = make([]core.Capability, len(*in))
copy(*out, *in)
}
if in.Volumes != nil {
@ -1142,6 +935,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
*out = make([]AllowedHostPath, len(*in))
copy(*out, *in)
}
if in.AllowedFlexVolumes != nil {
in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes
*out = make([]AllowedFlexVolume, len(*in))
copy(*out, *in)
}
return
}
@ -1390,7 +1188,7 @@ func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) {
if *in == nil {
*out = nil
} else {
*out = new(api.SELinuxOptions)
*out = new(core.SELinuxOptions)
**out = **in
}
}
@ -1407,76 +1205,6 @@ func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Scale) DeepCopyInto(out *Scale) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale.
func (in *Scale) DeepCopy() *Scale {
if in == nil {
return nil
}
out := new(Scale)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Scale) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec.
func (in *ScaleSpec) DeepCopy() *ScaleSpec {
if in == nil {
return nil
}
out := new(ScaleSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus.
func (in *ScaleStatus) DeepCopy() *ScaleStatus {
if in == nil {
return nil
}
out := new(ScaleStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) {
*out = *in
@ -1498,138 +1226,6 @@ func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrat
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ThirdPartyResource) DeepCopyInto(out *ThirdPartyResource) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Versions != nil {
in, out := &in.Versions, &out.Versions
*out = make([]APIVersion, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResource.
func (in *ThirdPartyResource) DeepCopy() *ThirdPartyResource {
if in == nil {
return nil
}
out := new(ThirdPartyResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ThirdPartyResource) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ThirdPartyResourceData) DeepCopyInto(out *ThirdPartyResourceData) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Data != nil {
in, out := &in.Data, &out.Data
*out = make([]byte, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceData.
func (in *ThirdPartyResourceData) DeepCopy() *ThirdPartyResourceData {
if in == nil {
return nil
}
out := new(ThirdPartyResourceData)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ThirdPartyResourceData) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ThirdPartyResourceDataList) DeepCopyInto(out *ThirdPartyResourceDataList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ThirdPartyResourceData, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceDataList.
func (in *ThirdPartyResourceDataList) DeepCopy() *ThirdPartyResourceDataList {
if in == nil {
return nil
}
out := new(ThirdPartyResourceDataList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ThirdPartyResourceDataList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ThirdPartyResourceList) DeepCopyInto(out *ThirdPartyResourceList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ThirdPartyResource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceList.
func (in *ThirdPartyResourceList) DeepCopy() *ThirdPartyResourceList {
if in == nil {
return nil
}
out := new(ThirdPartyResourceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ThirdPartyResourceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UserIDRange) DeepCopyInto(out *UserIDRange) {
*out = *in

View file

@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// +k8s:deepcopy-gen=package
// +groupName=networking.k8s.io
package networking // import "k8s.io/kubernetes/pkg/apis/networking"

View file

@ -19,7 +19,7 @@ package networking
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/pkg/api"
api "k8s.io/kubernetes/pkg/apis/core"
)
// +genclient

View file

@ -22,58 +22,11 @@ package networking
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
api "k8s.io/kubernetes/pkg/api"
reflect "reflect"
core "k8s.io/kubernetes/pkg/apis/core"
)
func init() {
SchemeBuilder.Register(RegisterDeepCopies)
}
// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
// to allow building arbitrary schemes.
//
// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented.
func RegisterDeepCopies(scheme *runtime.Scheme) error {
return scheme.AddGeneratedDeepCopyFuncs(
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*IPBlock).DeepCopyInto(out.(*IPBlock))
return nil
}, InType: reflect.TypeOf(&IPBlock{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicy).DeepCopyInto(out.(*NetworkPolicy))
return nil
}, InType: reflect.TypeOf(&NetworkPolicy{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicyEgressRule).DeepCopyInto(out.(*NetworkPolicyEgressRule))
return nil
}, InType: reflect.TypeOf(&NetworkPolicyEgressRule{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicyIngressRule).DeepCopyInto(out.(*NetworkPolicyIngressRule))
return nil
}, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicyList).DeepCopyInto(out.(*NetworkPolicyList))
return nil
}, InType: reflect.TypeOf(&NetworkPolicyList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicyPeer).DeepCopyInto(out.(*NetworkPolicyPeer))
return nil
}, InType: reflect.TypeOf(&NetworkPolicyPeer{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicyPort).DeepCopyInto(out.(*NetworkPolicyPort))
return nil
}, InType: reflect.TypeOf(&NetworkPolicyPort{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*NetworkPolicySpec).DeepCopyInto(out.(*NetworkPolicySpec))
return nil
}, InType: reflect.TypeOf(&NetworkPolicySpec{})},
)
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPBlock) DeepCopyInto(out *IPBlock) {
*out = *in
@ -268,7 +221,7 @@ func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) {
if *in == nil {
*out = nil
} else {
*out = new(api.Protocol)
*out = new(core.Protocol)
**out = **in
}
}

View file

@ -4,7 +4,7 @@
The mechanism for supporting cloud providers is currently in transition: the original method of implementing cloud provider-specific functionality within the main kubernetes tree (here) is no longer advised; however, the proposed solution is still in development.
#### Guidance for potential cloud providers:
* Support for cloud providers is currently in a state of flux. Background information on motivation and the proposal for improving is in the github [proposal](https://git.k8s.io/community/contributors/design-proposals/cloud-provider/cloud-provider-refactoring.md).
* Support for cloud providers is currently in a state of flux. Background information on motivation and the proposal for improving is in the github [proposal](https://git.k8s.io/community/contributors/design-proposals/cloud-provider/cloud-provider-refactoring.md).
* In support of this plan, a new cloud-controller-manager binary was added in 1.6. This was the first of several steps (see the proposal for more information).
* Attempts to contribute new cloud providers or (to a lesser extent) persistent volumes to the core repo will likely meet with some pushback from reviewers/approvers.
* It is understood that this is an unfortunate situation in which 'the old way is no longer supported but the new way is not ready yet', but the initial path is unsustainable, and contributors are encouraged to participate in the implementation of the proposed long-term solution, as there is risk that PRs for new cloud providers here will not be approved.
@ -13,4 +13,4 @@ The mechanism for supporting cloud providers is currently in transition: the or
#### Some additional context on status / direction:
* 1.6 added a new cloud-controller-manager binary that may be used for testing the new out-of-core cloudprovider flow.
* Setting cloud-provider=external allows for creation of a separate controller-manager binary
* 1.7 adds [extensible admission control](https://git.k8s.io/community/contributors/design-proposals/api-machinery/admission_control_extension.md), further enabling topology customization.
* 1.7 adds [extensible admission control](https://git.k8s.io/community/contributors/design-proposals/api-machinery/admission_control_extension.md), further enabling topology customization.

View file

@ -23,6 +23,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
"k8s.io/kubernetes/pkg/controller"
)
@ -49,6 +50,11 @@ type Interface interface {
HasClusterID() bool
}
type InformerUser interface {
// SetInformers sets the informer on the cloud object.
SetInformers(informerFactory informers.SharedInformerFactory)
}
// Clusters is an abstract, pluggable interface for clusters of containers.
type Clusters interface {
// ListClusters lists the names of the available clusters.

View file

@ -28,14 +28,13 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
clientgoclientset "k8s.io/client-go/kubernetes"
clientset "k8s.io/client-go/kubernetes"
v1authentication "k8s.io/client-go/kubernetes/typed/authentication/v1"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/legacyscheme"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/serviceaccount"
"github.com/golang/glog"
@ -47,8 +46,8 @@ type ControllerClientBuilder interface {
ConfigOrDie(name string) *restclient.Config
Client(name string) (clientset.Interface, error)
ClientOrDie(name string) clientset.Interface
ClientGoClient(name string) (clientgoclientset.Interface, error)
ClientGoClientOrDie(name string) clientgoclientset.Interface
ClientGoClient(name string) (clientset.Interface, error)
ClientGoClientOrDie(name string) clientset.Interface
}
// SimpleControllerClientBuilder returns a fixed client with different user agents
@ -86,15 +85,15 @@ func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interf
return client
}
func (b SimpleControllerClientBuilder) ClientGoClient(name string) (clientgoclientset.Interface, error) {
func (b SimpleControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) {
clientConfig, err := b.Config(name)
if err != nil {
return nil, err
}
return clientgoclientset.NewForConfig(clientConfig)
return clientset.NewForConfig(clientConfig)
}
func (b SimpleControllerClientBuilder) ClientGoClientOrDie(name string) clientgoclientset.Interface {
func (b SimpleControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface {
client, err := b.ClientGoClient(name)
if err != nil {
glog.Fatal(err)
@ -276,15 +275,15 @@ func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface
return client
}
func (b SAControllerClientBuilder) ClientGoClient(name string) (clientgoclientset.Interface, error) {
func (b SAControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) {
clientConfig, err := b.Config(name)
if err != nil {
return nil, err
}
return clientgoclientset.NewForConfig(clientConfig)
return clientset.NewForConfig(clientConfig)
}
func (b SAControllerClientBuilder) ClientGoClientOrDie(name string) clientgoclientset.Interface {
func (b SAControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface {
client, err := b.ClientGoClient(name)
if err != nil {
glog.Fatal(err)

View file

@ -42,9 +42,9 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/integer"
clientretry "k8s.io/client-go/util/retry"
_ "k8s.io/kubernetes/pkg/api/install"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/api/validation"
_ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/kubernetes/pkg/apis/core/validation"
hashutil "k8s.io/kubernetes/pkg/util/hash"
taintutils "k8s.io/kubernetes/pkg/util/taints"
@ -410,7 +410,7 @@ type RealRSControl struct {
var _ RSControlInterface = &RealRSControl{}
func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error {
_, err := r.KubeClient.Extensions().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data)
_, err := r.KubeClient.ExtensionsV1beta1().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data)
return err
}
@ -1008,7 +1008,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
}
_, err = c.CoreV1().Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes)
_, err = c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, patchBytes)
return err
}

View file

@ -98,7 +98,7 @@ const (
// the API server as the certificate approaches expiration.
RotateKubeletClientCertificate utilfeature.Feature = "RotateKubeletClientCertificate"
// owner: @msau
// owner: @msau42
// alpha: v1.7
//
// A new volume type that supports local disks on a node.
@ -140,12 +140,6 @@ const (
// 'MemoryPressure', 'OutOfDisk' and 'DiskPressure'.
TaintNodesByCondition utilfeature.Feature = "TaintNodesByCondition"
// owner: @haibinxie
// alpha: v1.8
//
// Implement IPVS-based in-cluster service load balancing
SupportIPVSProxyMode utilfeature.Feature = "SupportIPVSProxyMode"
// owner: @jsafrane
// alpha: v1.8
//
@ -169,6 +163,55 @@ const (
//
// Enable nodes to exclude themselves from service load balancers
ServiceNodeExclusion utilfeature.Feature = "ServiceNodeExclusion"
// owner: @jsafrane
// alpha: v1.9
//
// Enable running mount utilities in containers.
MountContainers utilfeature.Feature = "MountContainers"
// owner: @msau42
// alpha: v1.9
//
// Extend the default scheduler to be aware of PV topology and handle PV binding
// Before moving to beta, resolve Kubernetes issue #56180
VolumeScheduling utilfeature.Feature = "VolumeScheduling"
// owner: @vladimirvivien
// alpha: v1.9
//
// Enable mount/attachment of Container Storage Interface (CSI) backed PVs
CSIPersistentVolume utilfeature.Feature = "CSIPersistentVolume"
// owner @MrHohn
// alpha: v1.9
//
// Support configurable pod DNS parameters.
CustomPodDNS utilfeature.Feature = "CustomPodDNS"
// owner: @screeley44
// alpha: v1.9
//
// Enable Block volume support in containers.
BlockVolume utilfeature.Feature = "BlockVolume"
// owner: @pospispa
//
// alpha: v1.9
// Postpone deletion of a persistent volume claim in case it is used by a pod
PVCProtection utilfeature.Feature = "PVCProtection"
// owner: @aveshagarwal
// alpha: v1.9
//
// Enable resource limits priority function
ResourceLimitsPriorityFunction utilfeature.Feature = "ResourceLimitsPriorityFunction"
// owner: @m1093782566
// beta: v1.9
//
// Implement IPVS-based in-cluster service load balancing
SupportIPVSProxyMode utilfeature.Feature = "SupportIPVSProxyMode"
)
func init() {
@ -201,6 +244,14 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
ExpandPersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha},
CPUManager: {Default: false, PreRelease: utilfeature.Alpha},
ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha},
MountContainers: {Default: false, PreRelease: utilfeature.Alpha},
VolumeScheduling: {Default: false, PreRelease: utilfeature.Alpha},
CSIPersistentVolume: {Default: false, PreRelease: utilfeature.Alpha},
CustomPodDNS: {Default: false, PreRelease: utilfeature.Alpha},
BlockVolume: {Default: false, PreRelease: utilfeature.Alpha},
PVCProtection: {Default: false, PreRelease: utilfeature.Alpha},
ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha},
SupportIPVSProxyMode: {Default: false, PreRelease: utilfeature.Beta},
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
@ -212,6 +263,5 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
// inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
apiextensionsfeatures.CustomResourceValidation: {Default: false, PreRelease: utilfeature.Alpha},
SupportIPVSProxyMode: {Default: false, PreRelease: utilfeature.Alpha},
apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta},
}

19
vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package fieldpath supplies methods for extracting fields from objects
// given a path to a field.
package fieldpath // import "k8s.io/kubernetes/pkg/fieldpath"

103
vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go generated vendored Normal file
View file

@ -0,0 +1,103 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fieldpath
import (
"fmt"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/util/validation"
)
// FormatMap formats map[string]string to a string.
func FormatMap(m map[string]string) (fmtStr string) {
for key, value := range m {
fmtStr += fmt.Sprintf("%v=%q\n", key, value)
}
fmtStr = strings.TrimSuffix(fmtStr, "\n")
return
}
// ExtractFieldPathAsString extracts the field from the given object
// and returns it as a string. The object must be a pointer to an
// API type.
func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) {
accessor, err := meta.Accessor(obj)
if err != nil {
return "", nil
}
if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok {
switch path {
case "metadata.annotations":
if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 {
return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";"))
}
return accessor.GetAnnotations()[subscript], nil
case "metadata.labels":
if errs := validation.IsQualifiedName(subscript); len(errs) != 0 {
return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";"))
}
return accessor.GetLabels()[subscript], nil
default:
return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath)
}
}
switch fieldPath {
case "metadata.annotations":
return FormatMap(accessor.GetAnnotations()), nil
case "metadata.labels":
return FormatMap(accessor.GetLabels()), nil
case "metadata.name":
return accessor.GetName(), nil
case "metadata.namespace":
return accessor.GetNamespace(), nil
case "metadata.uid":
return string(accessor.GetUID()), nil
}
return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath)
}
// SplitMaybeSubscriptedPath checks whether the specified fieldPath is
// subscripted, and
// - if yes, this function splits the fieldPath into path and subscript, and
// returns (path, subscript, true).
// - if no, this function returns (fieldPath, "", false).
//
// Example inputs and outputs:
// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true)
// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true)
// - "metadata.labels['']" --> ("metadata.labels", "", true)
// - "metadata.labels" --> ("metadata.labels", "", false)
func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) {
if !strings.HasSuffix(fieldPath, "']") {
return fieldPath, "", false
}
s := strings.TrimSuffix(fieldPath, "']")
parts := strings.SplitN(s, "['", 2)
if len(parts) < 2 {
return fieldPath, "", false
}
if len(parts[0]) == 0 {
return fieldPath, "", false
}
return parts[0], parts[1], true
}

View file

@ -25,3 +25,31 @@ const (
// NetworkReady means the runtime network is up and ready to accept containers which require network.
NetworkReady = "NetworkReady"
)
// LogStreamType is the type of the stream in CRI container log.
type LogStreamType string
const (
// Stdout is the stream type for stdout.
Stdout LogStreamType = "stdout"
// Stderr is the stream type for stderr.
Stderr LogStreamType = "stderr"
)
// LogTag is the tag of a log line in CRI container log.
// Currently defined log tags:
// * First tag: Partial/End - P/E.
// The field in the container log format can be extended to include multiple
// tags by using a delimiter, but changes should be rare. If it becomes clear
// that better extensibility is desired, a more extensible format (e.g., json)
// should be adopted as a replacement and/or addition.
type LogTag string
const (
// LogTagPartial means the line is part of multiple lines.
LogTagPartial LogTag = "P"
// LogTagFull means the line is a single full line or the end of multiple lines.
LogTagFull LogTag = "F"
// LogTagDelimiter is the delimiter for different log tags.
LogTagDelimiter = ":"
)

View file

@ -46,8 +46,8 @@ type HandlerRunner interface {
// RuntimeHelper wraps kubelet to make container runtime
// able to get necessary informations like the RunContainerOptions, DNS settings, Host IP.
type RuntimeHelper interface {
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, useClusterFirstPolicy bool, err error)
GetClusterDNS(pod *v1.Pod) (dnsServers []string, dnsSearches []string, useClusterFirstPolicy bool, err error)
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, err error)
GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error)
// GetPodCgroupParent returns the CgroupName identifer, and its literal cgroupfs form on the host
// of a pod.
GetPodCgroupParent(pod *v1.Pod) string

View file

@ -428,10 +428,6 @@ type RunContainerOptions struct {
// this directory will be used to create and mount the log file to
// container.TerminationMessagePath
PodContainerDir string
// The list of DNS servers for the container to use.
DNS []string
// The list of DNS search domains.
DNSSearch []string
// The parent cgroup to pass to Docker
CgroupParent string
// The type of container rootfs
@ -450,9 +446,14 @@ type RunContainerOptions struct {
type VolumeInfo struct {
// Mounter is the volume's mounter
Mounter volume.Mounter
// BlockVolumeMapper is the Block volume's mapper
BlockVolumeMapper volume.BlockVolumeMapper
// SELinuxLabeled indicates whether this volume has had the
// pod's SELinux label applied to it or not
SELinuxLabeled bool
// Whether the volume permission is set to read-only or not
// This value is passed from volume.spec
ReadOnly bool
}
type VolumeMap map[string]VolumeInfo

View file

@ -22,6 +22,7 @@ import (
"net"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
)
@ -36,12 +37,18 @@ type fakeTable struct {
}
type fakeIPTables struct {
tables map[string]*fakeTable
tables map[string]*fakeTable
builtinChains map[string]sets.String
}
func NewFakeIPTables() *fakeIPTables {
return &fakeIPTables{
tables: make(map[string]*fakeTable, 0),
builtinChains: map[string]sets.String{
string(utiliptables.TableFilter): sets.NewString("INPUT", "FORWARD", "OUTPUT"),
string(utiliptables.TableNAT): sets.NewString("PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"),
string(utiliptables.TableMangle): sets.NewString("PREROUTING", "INPUT", "FORWARD", "OUTPUT", "POSTROUTING"),
},
}
}
@ -246,6 +253,7 @@ func (f *fakeIPTables) SaveInto(tableName utiliptables.Table, buffer *bytes.Buff
}
func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte, flush utiliptables.FlushFlag) error {
allLines := string(data)
buf := bytes.NewBuffer(data)
var tableName utiliptables.Table
for {
@ -274,6 +282,13 @@ func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte,
}
}
_, _ = f.ensureChain(tableName, chainName)
// The --noflush option for iptables-restore doesn't work for user-defined chains, only builtin chains.
// We should flush user-defined chains if the chain is not to be deleted
if !f.isBuiltinChain(tableName, chainName) && !strings.Contains(allLines, "-X "+string(chainName)) {
if err := f.FlushChain(tableName, chainName); err != nil {
return err
}
}
} else if strings.HasPrefix(line, "-A") {
parts := strings.Split(line, " ")
if len(parts) < 3 {
@ -329,3 +344,10 @@ func (f *fakeIPTables) AddReloadFunc(reloadFunc func()) {
func (f *fakeIPTables) Destroy() {
}
func (f *fakeIPTables) isBuiltinChain(tableName utiliptables.Table, chainName utiliptables.Chain) bool {
if builtinChains, ok := f.builtinChains[string(tableName)]; ok && builtinChains.Has(string(chainName)) {
return true
}
return false
}

View file

@ -21,6 +21,7 @@ import (
"crypto/sha256"
"encoding/base32"
"fmt"
"strconv"
"strings"
"sync"
@ -177,6 +178,8 @@ func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (er
chainsToRemove := []utiliptables.Chain{}
for _, pm := range hostportMappings {
chainsToRemove = append(chainsToRemove, getHostportChain(id, pm))
// TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153
chainsToRemove = append(chainsToRemove, getBuggyHostportChain(id, pm))
}
// remove rules that consists of target chains
@ -247,6 +250,16 @@ func (hm *hostportManager) closeHostports(hostportMappings []*PortMapping) error
// WARNING: Please do not change this function. Otherwise, HostportManager may not be able to
// identify existing iptables chains.
func getHostportChain(id string, pm *PortMapping) utiliptables.Chain {
hash := sha256.Sum256([]byte(id + strconv.Itoa(int(pm.HostPort)) + string(pm.Protocol)))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16])
}
// This bugy func does bad conversion on HostPort from int32 to string.
// It may generates same chain names for different ports of the same pod, e.g. port 57119/55429/56833.
// `getHostportChain` fixed this bug. In order to cleanup the legacy chains/rules, it is temporarily left.
// TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153
func getBuggyHostportChain(id string, pm *PortMapping) utiliptables.Chain {
hash := sha256.Sum256([]byte(id + string(pm.HostPort) + string(pm.Protocol)))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16])

View file

@ -21,6 +21,7 @@ import (
"crypto/sha256"
"encoding/base32"
"fmt"
"strconv"
"strings"
"time"
@ -142,7 +143,7 @@ func writeLine(buf *bytes.Buffer, words ...string) {
// this because IPTables Chain Names must be <= 28 chars long, and the longer
// they are the harder they are to read.
func hostportChainName(pm *PortMapping, podFullName string) utiliptables.Chain {
hash := sha256.Sum256([]byte(string(pm.HostPort) + string(pm.Protocol) + podFullName))
hash := sha256.Sum256([]byte(strconv.Itoa(int(pm.HostPort)) + string(pm.Protocol) + podFullName))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16])
}

View file

@ -28,7 +28,7 @@ import (
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/httpstream/spdy"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api"
api "k8s.io/kubernetes/pkg/apis/core"
"github.com/golang/glog"
)

View file

@ -32,7 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apiserver/pkg/server/httplog"
"k8s.io/apiserver/pkg/util/wsstream"
"k8s.io/kubernetes/pkg/api"
api "k8s.io/kubernetes/pkg/apis/core"
)
const (

View file

@ -32,7 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apiserver/pkg/util/wsstream"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/kubernetes/pkg/api"
api "k8s.io/kubernetes/pkg/apis/core"
"github.com/golang/glog"
)

View file

@ -21,7 +21,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeapi "k8s.io/kubernetes/pkg/api"
kubeapi "k8s.io/kubernetes/pkg/apis/core"
)
const (
@ -49,6 +49,8 @@ const (
// Pods with the given ids have unexpected status in this source,
// kubelet should reconcile status with this source
RECONCILE
// Pods with the given ids have been restored from a checkpoint.
RESTORE
// These constants identify the sources of pods
// Updates from a file

View file

@ -33,7 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api"
api "k8s.io/kubernetes/pkg/apis/core"
)
var nodeHealthzRetryInterval = 60 * time.Second

View file

@ -41,9 +41,9 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/helper"
apiservice "k8s.io/kubernetes/pkg/api/service"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
@ -80,6 +80,9 @@ const (
// the mark-for-drop chain
KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
// the kubernetes forward chain
kubeForwardChain utiliptables.Chain = "KUBE-FORWARD"
)
// IPTablesVersioner can query the current iptables version.
@ -440,11 +443,6 @@ func NewProxier(ipt utiliptables.Interface,
recorder record.EventRecorder,
healthzServer healthcheck.HealthzUpdater,
) (*Proxier, error) {
// check valid user input
if minSyncPeriod > syncPeriod {
return nil, fmt.Errorf("minSyncPeriod (%v) must be <= syncPeriod (%v)", minSyncPeriod, syncPeriod)
}
// Set the route_localnet sysctl we need for
if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil {
return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err)
@ -458,9 +456,6 @@ func NewProxier(ipt utiliptables.Interface,
}
// Generate the masquerade mark to use for SNAT rules.
if masqueradeBit < 0 || masqueradeBit > 31 {
return nil, fmt.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", masqueradeBit)
}
masqueradeValue := 1 << uint(masqueradeBit)
masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue)
@ -543,6 +538,18 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
}
}
// Unlink the forwarding chain.
args = []string{
"-m", "comment", "--comment", "kubernetes forwarding rules",
"-j", string(kubeForwardChain),
}
if err := ipt.DeleteRule(utiliptables.TableFilter, utiliptables.ChainForward, args...); err != nil {
if !utiliptables.IsNotFoundError(err) {
glog.Errorf("Error removing pure-iptables proxy rule: %v", err)
encounteredError = true
}
}
// Flush and remove all of our chains.
iptablesData := bytes.NewBuffer(nil)
if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil {
@ -578,14 +585,28 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
encounteredError = true
}
}
{
filterBuf := bytes.NewBuffer(nil)
writeLine(filterBuf, "*filter")
writeLine(filterBuf, fmt.Sprintf(":%s - [0:0]", kubeServicesChain))
writeLine(filterBuf, fmt.Sprintf("-X %s", kubeServicesChain))
writeLine(filterBuf, "COMMIT")
// Flush and remove all of our chains.
iptablesData = bytes.NewBuffer(nil)
if err := ipt.SaveInto(utiliptables.TableFilter, iptablesData); err != nil {
glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableFilter, err)
encounteredError = true
} else {
existingFilterChains := utiliptables.GetChainLines(utiliptables.TableFilter, iptablesData.Bytes())
filterChains := bytes.NewBuffer(nil)
filterRules := bytes.NewBuffer(nil)
writeLine(filterChains, "*filter")
for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeForwardChain} {
if _, found := existingFilterChains[chain]; found {
chainString := string(chain)
writeLine(filterChains, existingFilterChains[chain])
writeLine(filterRules, "-X", chainString)
}
}
writeLine(filterRules, "COMMIT")
filterLines := append(filterChains.Bytes(), filterRules.Bytes()...)
// Write it.
if err := ipt.Restore(utiliptables.TableFilter, filterBuf.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil {
if err := ipt.Restore(utiliptables.TableFilter, filterLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil {
glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableFilter, err)
encounteredError = true
}
@ -798,7 +819,7 @@ func getLocalIPs(endpointsMap proxyEndpointsMap) map[types.NamespacedName]sets.S
for svcPortName := range endpointsMap {
for _, ep := range endpointsMap[svcPortName] {
if ep.isLocal {
// If the endpoint has a bad format, ipPart() will log an
// If the endpoint has a bad format, utilproxy.IPPart() will log an
// error and ep.IPPart() will return a null string.
if ip := ep.IPPart(); ip != "" {
nsn := svcPortName.NamespacedName
@ -1027,6 +1048,21 @@ func (proxier *Proxier) syncProxyRules() {
}
}
// Create and link the kube forward chain.
{
if _, err := proxier.iptables.EnsureChain(utiliptables.TableFilter, kubeForwardChain); err != nil {
glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableFilter, kubeForwardChain, err)
return
}
comment := "kubernetes forward rules"
args := []string{"-m", "comment", "--comment", comment, "-j", string(kubeForwardChain)}
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainForward, args...); err != nil {
glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainForward, kubeForwardChain, err)
return
}
}
//
// Below this point we will not return until we try to write the iptables rules.
//
@ -1069,6 +1105,11 @@ func (proxier *Proxier) syncProxyRules() {
} else {
writeLine(proxier.filterChains, utiliptables.MakeChainLine(kubeServicesChain))
}
if chain, ok := existingFilterChains[kubeForwardChain]; ok {
writeLine(proxier.filterChains, chain)
} else {
writeLine(proxier.filterChains, utiliptables.MakeChainLine(kubeForwardChain))
}
if chain, ok := existingNATChains[kubeServicesChain]; ok {
writeLine(proxier.natChains, chain)
} else {
@ -1516,6 +1557,18 @@ func (proxier *Proxier) syncProxyRules() {
)
writeLine(proxier.natRules, args...)
} else {
// First write session affinity rules only over local endpoints, if applicable.
if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP {
for _, endpointChain := range localEndpointChains {
writeLine(proxier.natRules,
"-A", string(svcXlbChain),
"-m", "comment", "--comment", svcNameString,
"-m", "recent", "--name", string(endpointChain),
"--rcheck", "--seconds", strconv.Itoa(svcInfo.stickyMaxAgeSeconds), "--reap",
"-j", string(endpointChain))
}
}
// Setup probability filter rules only over local endpoints
for i, endpointChain := range localEndpointChains {
// Balancing rules in the per-service chain.
@ -1562,6 +1615,40 @@ func (proxier *Proxier) syncProxyRules() {
"-m", "addrtype", "--dst-type", "LOCAL",
"-j", string(kubeNodePortsChain))
// If the masqueradeMark has been added then we want to forward that same
// traffic, this allows NodePort traffic to be forwarded even if the default
// FORWARD policy is not accept.
writeLine(proxier.filterRules,
"-A", string(kubeForwardChain),
"-m", "comment", "--comment", `"kubernetes forwarding rules"`,
"-m", "mark", "--mark", proxier.masqueradeMark,
"-j", "ACCEPT",
)
// The following rules can only be set if clusterCIDR has been defined.
if len(proxier.clusterCIDR) != 0 {
// The following two rules ensure the traffic after the initial packet
// accepted by the "kubernetes forwarding rules" rule above will be
// accepted, to be as specific as possible the traffic must be sourced
// or destined to the clusterCIDR (to/from a pod).
writeLine(proxier.filterRules,
"-A", string(kubeForwardChain),
"-s", proxier.clusterCIDR,
"-m", "comment", "--comment", `"kubernetes forwarding conntrack pod source rule"`,
"-m", "conntrack",
"--ctstate", "RELATED,ESTABLISHED",
"-j", "ACCEPT",
)
writeLine(proxier.filterRules,
"-A", string(kubeForwardChain),
"-m", "comment", "--comment", `"kubernetes forwarding conntrack pod destination rule"`,
"-d", proxier.clusterCIDR,
"-m", "conntrack",
"--ctstate", "RELATED,ESTABLISHED",
"-j", "ACCEPT",
)
}
// Write the end-of-table markers.
writeLine(proxier.filterRules, "COMMIT")
writeLine(proxier.natRules, "COMMIT")
@ -1609,7 +1696,7 @@ func (proxier *Proxier) syncProxyRules() {
// Finish housekeeping.
// TODO: these could be made more consistent.
for _, svcIP := range staleServices.List() {
for _, svcIP := range staleServices.UnsortedList() {
if err := utilproxy.ClearUDPConntrackForIP(proxier.exec, svcIP); err != nil {
glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err)
}

View file

@ -27,7 +27,7 @@ import (
// Utilities for dealing with conntrack
const noConnectionToDelete = "0 flow entries have been deleted"
const NoConnectionToDelete = "0 flow entries have been deleted"
func IsIPv6(netIP net.IP) bool {
return netIP != nil && netIP.To4() == nil
@ -50,7 +50,7 @@ func parametersWithFamily(isIPv6 bool, parameters ...string) []string {
func ClearUDPConntrackForIP(execer exec.Interface, ip string) error {
parameters := parametersWithFamily(IsIPv6String(ip), "-D", "--orig-dst", ip, "-p", "udp")
err := ExecConntrackTool(execer, parameters...)
if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) {
if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) {
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it
// is expensive to baby-sit all udp connections to kubernetes services.
@ -84,7 +84,7 @@ func ClearUDPConntrackForPort(execer exec.Interface, port int, isIPv6 bool) erro
}
parameters := parametersWithFamily(isIPv6, "-D", "-p", "udp", "--dport", strconv.Itoa(port))
err := ExecConntrackTool(execer, parameters...)
if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) {
if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) {
return fmt.Errorf("error deleting conntrack entries for UDP port: %d, error: %v", port, err)
}
return nil
@ -95,7 +95,7 @@ func ClearUDPConntrackForPort(execer exec.Interface, port int, isIPv6 bool) erro
func ClearUDPConntrackForPeers(execer exec.Interface, origin, dest string) error {
parameters := parametersWithFamily(IsIPv6String(origin), "-D", "--orig-dst", origin, "--dst-nat", dest, "-p", "udp")
err := ExecConntrackTool(execer, parameters...)
if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) {
if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) {
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it
// is expensive to baby sit all udp connections to kubernetes services.

View file

@ -19,6 +19,7 @@ package util
import (
"fmt"
"net"
"strconv"
"github.com/golang/glog"
)
@ -32,12 +33,33 @@ func IPPart(s string) string {
return s
}
// Must be IP:port
ip, _, err := net.SplitHostPort(s)
host, _, err := net.SplitHostPort(s)
if err != nil {
glog.Errorf("Error parsing '%s': %v", s, err)
return ""
}
return ip
// Check if host string is a valid IP address
if ip := net.ParseIP(host); ip != nil {
return ip.String()
} else {
glog.Errorf("invalid IP part '%s'", host)
}
return ""
}
func PortPart(s string) (int, error) {
// Must be IP:port
_, port, err := net.SplitHostPort(s)
if err != nil {
glog.Errorf("Error parsing '%s': %v", s, err)
return -1, err
}
portNumber, err := strconv.Atoi(port)
if err != nil {
glog.Errorf("Error parsing '%s': %v", port, err)
return -1, err
}
return portNumber, nil
}
// ToCIDR returns a host address of the form <ip-address>/32 for

View file

@ -20,8 +20,8 @@ import (
"net"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/helper"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"github.com/golang/glog"
)

View file

@ -20,7 +20,7 @@ import (
"k8s.io/api/core/v1"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/kubernetes/pkg/api"
api "k8s.io/kubernetes/pkg/apis/core"
)
// UserInfo returns a user.Info interface for the given namespace, service account name and UID

View file

@ -82,6 +82,7 @@ type Table string
const (
TableNAT Table = "nat"
TableFilter Table = "filter"
TableMangle Table = "mangle"
)
type Chain string
@ -91,6 +92,7 @@ const (
ChainPrerouting Chain = "PREROUTING"
ChainOutput Chain = "OUTPUT"
ChainInput Chain = "INPUT"
ChainForward Chain = "FORWARD"
)
const (
@ -592,7 +594,7 @@ func getIPTablesRestoreWaitFlag(exec utilexec.Interface, protocol Protocol) []st
return nil
}
return []string{"--wait=2"}
return []string{WaitSecondsString}
}
// getIPTablesRestoreVersionString runs "iptables-restore --version" to get the version string

140
vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go generated vendored Normal file
View file

@ -0,0 +1,140 @@
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"fmt"
"github.com/golang/glog"
)
// ExecMounter is a mounter that uses provided Exec interface to mount and
// unmount a filesystem. For all other calls it uses a wrapped mounter.
type execMounter struct {
wrappedMounter Interface
exec Exec
}
func NewExecMounter(exec Exec, wrapped Interface) Interface {
return &execMounter{
wrappedMounter: wrapped,
exec: exec,
}
}
// execMounter implements mount.Interface
var _ Interface = &execMounter{}
// Mount runs mount(8) using given exec interface.
func (m *execMounter) Mount(source string, target string, fstype string, options []string) error {
bind, bindRemountOpts := isBind(options)
if bind {
err := m.doExecMount(source, target, fstype, []string{"bind"})
if err != nil {
return err
}
return m.doExecMount(source, target, fstype, bindRemountOpts)
}
return m.doExecMount(source, target, fstype, options)
}
// doExecMount calls exec(mount <waht> <where>) using given exec interface.
func (m *execMounter) doExecMount(source, target, fstype string, options []string) error {
glog.V(5).Infof("Exec Mounting %s %s %s %v", source, target, fstype, options)
mountArgs := makeMountArgs(source, target, fstype, options)
output, err := m.exec.Run("mount", mountArgs...)
glog.V(5).Infof("Exec mounted %v: %v: %s", mountArgs, err, string(output))
if err != nil {
return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n",
err, "mount", source, target, fstype, options, string(output))
}
return err
}
// Unmount runs umount(8) using given exec interface.
func (m *execMounter) Unmount(target string) error {
outputBytes, err := m.exec.Run("umount", target)
if err == nil {
glog.V(5).Infof("Exec unmounted %s: %s", target, string(outputBytes))
} else {
glog.V(5).Infof("Failed to exec unmount %s: err: %q, umount output: %s", target, err, string(outputBytes))
}
return err
}
// List returns a list of all mounted filesystems.
func (m *execMounter) List() ([]MountPoint, error) {
return m.wrappedMounter.List()
}
// IsLikelyNotMountPoint determines whether a path is a mountpoint.
func (m *execMounter) IsLikelyNotMountPoint(file string) (bool, error) {
return m.wrappedMounter.IsLikelyNotMountPoint(file)
}
// DeviceOpened checks if block device in use by calling Open with O_EXCL flag.
// Returns true if open returns errno EBUSY, and false if errno is nil.
// Returns an error if errno is any error other than EBUSY.
// Returns with error if pathname is not a device.
func (m *execMounter) DeviceOpened(pathname string) (bool, error) {
return m.wrappedMounter.DeviceOpened(pathname)
}
// PathIsDevice uses FileInfo returned from os.Stat to check if path refers
// to a device.
func (m *execMounter) PathIsDevice(pathname string) (bool, error) {
return m.wrappedMounter.PathIsDevice(pathname)
}
//GetDeviceNameFromMount given a mount point, find the volume id from checking /proc/mounts
func (m *execMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {
return m.wrappedMounter.GetDeviceNameFromMount(mountPath, pluginDir)
}
func (m *execMounter) IsMountPointMatch(mp MountPoint, dir string) bool {
return m.wrappedMounter.IsMountPointMatch(mp, dir)
}
func (m *execMounter) IsNotMountPoint(dir string) (bool, error) {
return m.wrappedMounter.IsNotMountPoint(dir)
}
func (m *execMounter) MakeRShared(path string) error {
return m.wrappedMounter.MakeRShared(path)
}
func (m *execMounter) GetFileType(pathname string) (FileType, error) {
return m.wrappedMounter.GetFileType(pathname)
}
func (m *execMounter) MakeFile(pathname string) error {
return m.wrappedMounter.MakeFile(pathname)
}
func (m *execMounter) MakeDir(pathname string) error {
return m.wrappedMounter.MakeDir(pathname)
}
func (m *execMounter) ExistsPath(pathname string) bool {
return m.wrappedMounter.ExistsPath(pathname)
}

View file

@ -0,0 +1,87 @@
// +build !linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"errors"
)
type execMounter struct{}
// ExecMounter is a mounter that uses provided Exec interface to mount and
// unmount a filesystem. For all other calls it uses a wrapped mounter.
func NewExecMounter(exec Exec, wrapped Interface) Interface {
return &execMounter{}
}
func (mounter *execMounter) Mount(source string, target string, fstype string, options []string) error {
return nil
}
func (mounter *execMounter) Unmount(target string) error {
return nil
}
func (mounter *execMounter) List() ([]MountPoint, error) {
return []MountPoint{}, nil
}
func (mounter *execMounter) IsMountPointMatch(mp MountPoint, dir string) bool {
return (mp.Path == dir)
}
func (mounter *execMounter) IsNotMountPoint(dir string) (bool, error) {
return IsNotMountPoint(mounter, dir)
}
func (mounter *execMounter) IsLikelyNotMountPoint(file string) (bool, error) {
return true, nil
}
func (mounter *execMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {
return "", nil
}
func (mounter *execMounter) DeviceOpened(pathname string) (bool, error) {
return false, nil
}
func (mounter *execMounter) PathIsDevice(pathname string) (bool, error) {
return true, nil
}
func (mounter *execMounter) MakeRShared(path string) error {
return nil
}
func (mounter *execMounter) GetFileType(pathname string) (FileType, error) {
return FileType("fake"), errors.New("not implemented")
}
func (mounter *execMounter) MakeDir(pathname string) error {
return nil
}
func (mounter *execMounter) MakeFile(pathname string) error {
return nil
}
func (mounter *execMounter) ExistsPath(pathname string) bool {
return true
}

View file

@ -498,7 +498,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string,
if mountErr != nil {
// Mount failed. This indicates either that the disk is unformatted or
// it contains an unexpected filesystem.
existingFormat, err := mounter.getDiskFormat(source)
existingFormat, err := mounter.GetDiskFormat(source)
if err != nil {
return err
}
@ -536,8 +536,8 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string,
return mountErr
}
// getDiskFormat uses 'lsblk' to see if the given disk is unformated
func (mounter *SafeFormatAndMount) getDiskFormat(disk string) (string, error) {
// GetDiskFormat uses 'lsblk' to see if the given disk is unformated
func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) {
args := []string{"-n", "-o", "FSTYPE", disk}
glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args)
dataOut, err := mounter.Exec.Run("lsblk", args...)

View file

@ -86,7 +86,7 @@ func (mounter *Mounter) MakeRShared(path string) error {
}
func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {
return nil
return mounter.Interface.Mount(source, target, fstype, options)
}
func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, error) {

View file

@ -60,3 +60,9 @@ func Int32PtrDerefOr(ptr *int32, def int32) int32 {
}
return def
}
// BoolPtr returns a pointer to a bool
func BoolPtr(b bool) *bool {
o := b
return &o
}

View file

@ -25,8 +25,8 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/helper"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
)
const (

View file

@ -208,6 +208,26 @@ type ExpandableVolumePlugin interface {
RequiresFSResize() bool
}
// BlockVolumePlugin is an extend interface of VolumePlugin and is used for block volumes support.
type BlockVolumePlugin interface {
VolumePlugin
// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification.
// Ownership of the spec pointer in *not* transferred.
// - spec: The v1.Volume spec
// - pod: The enclosing pod
NewBlockVolumeMapper(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (BlockVolumeMapper, error)
// NewBlockVolumeUnmapper creates a new volume.BlockVolumeUnmapper from recoverable state.
// - name: The volume name, as per the v1.Volume spec.
// - podUID: The UID of the enclosing pod
NewBlockVolumeUnmapper(name string, podUID types.UID) (BlockVolumeUnmapper, error)
// ConstructBlockVolumeSpec constructs a volume spec based on the given
// podUID, volume name and a pod device map path.
// The spec may have incomplete information due to limited information
// from input. This function is used by volume manager to reconstruct
// volume spec by reading the volume directories from disk.
ConstructBlockVolumeSpec(podUID types.UID, volumeName, mountPath string) (*Spec, error)
}
// VolumeHost is an interface that plugins can use to access the kubelet.
type VolumeHost interface {
// GetPluginDir returns the absolute path to a directory under which
@ -216,6 +236,11 @@ type VolumeHost interface {
// GetPodPluginDir().
GetPluginDir(pluginName string) string
// GetVolumeDevicePluginDir returns the absolute path to a directory
// under which a given plugin may store data.
// ex. plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/
GetVolumeDevicePluginDir(pluginName string) string
// GetPodVolumeDir returns the absolute path a directory which
// represents the named volume under the named plugin for the given
// pod. If the specified pod does not exist, the result of this call
@ -228,6 +253,13 @@ type VolumeHost interface {
// directory might not actually exist on disk yet.
GetPodPluginDir(podUID types.UID, pluginName string) string
// GetPodVolumeDeviceDir returns the absolute path a directory which
// represents the named plugin for the given pod.
// If the specified pod does not exist, the result of this call
// might not exist.
// ex. pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/
GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string
// GetKubeClient returns a client interface
GetKubeClient() clientset.Interface
@ -271,6 +303,9 @@ type VolumeHost interface {
// Returns the labels on the node
GetNodeLabels() (map[string]string, error)
// Returns the name of the node
GetNodeName() types.NodeName
}
// VolumePluginMgr tracks registered plugins.
@ -675,6 +710,32 @@ func (pm *VolumePluginMgr) FindExpandablePluginByName(name string) (ExpandableVo
return nil, nil
}
// FindMapperPluginBySpec fetches a block volume plugin by spec.
func (pm *VolumePluginMgr) FindMapperPluginBySpec(spec *Spec) (BlockVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, err
}
if blockVolumePlugin, ok := volumePlugin.(BlockVolumePlugin); ok {
return blockVolumePlugin, nil
}
return nil, nil
}
// FindMapperPluginByName fetches a block volume plugin by name.
func (pm *VolumePluginMgr) FindMapperPluginByName(name string) (BlockVolumePlugin, error) {
volumePlugin, err := pm.FindPluginByName(name)
if err != nil {
return nil, err
}
if blockVolumePlugin, ok := volumePlugin.(BlockVolumePlugin); ok {
return blockVolumePlugin, nil
}
return nil, nil
}
// NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler
// pod. By default, a recycler pod simply runs "rm -rf" on a volume and tests
// for emptiness. Most attributes of the template will be correct for most

41
vendor/k8s.io/kubernetes/pkg/volume/util/error.go generated vendored Normal file
View file

@ -0,0 +1,41 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
k8stypes "k8s.io/apimachinery/pkg/types"
)
// This error on attach indicates volume is attached to a different node
// than we expected.
type DanglingAttachError struct {
msg string
CurrentNode k8stypes.NodeName
DevicePath string
}
func (err *DanglingAttachError) Error() string {
return err.msg
}
func NewDanglingError(msg string, node k8stypes.NodeName, devicePath string) error {
return &DanglingAttachError{
msg: msg,
CurrentNode: node,
DevicePath: devicePath,
}
}

68
vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go generated vendored Normal file
View file

@ -0,0 +1,68 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"k8s.io/api/core/v1"
)
const (
// Name of finalizer on PVCs that have a running pod.
PVCProtectionFinalizer = "kubernetes.io/pvc-protection"
)
// IsPVCBeingDeleted returns:
// true: in case PVC is being deleted, i.e. ObjectMeta.DeletionTimestamp is set
// false: in case PVC is not being deleted, i.e. ObjectMeta.DeletionTimestamp is nil
func IsPVCBeingDeleted(pvc *v1.PersistentVolumeClaim) bool {
return pvc.ObjectMeta.DeletionTimestamp != nil
}
// IsProtectionFinalizerPresent returns true in case PVCProtectionFinalizer is
// present among the pvc.Finalizers
func IsProtectionFinalizerPresent(pvc *v1.PersistentVolumeClaim) bool {
for _, finalizer := range pvc.Finalizers {
if finalizer == PVCProtectionFinalizer {
return true
}
}
return false
}
// RemoveProtectionFinalizer returns pvc without PVCProtectionFinalizer in case
// it's present in pvc.Finalizers. It expects that pvc is writable (i.e. is not
// informer's cached copy.)
func RemoveProtectionFinalizer(pvc *v1.PersistentVolumeClaim) {
newFinalizers := make([]string, 0)
for _, finalizer := range pvc.Finalizers {
if finalizer != PVCProtectionFinalizer {
newFinalizers = append(newFinalizers, finalizer)
}
}
if len(newFinalizers) == 0 {
// Sanitize for unit tests so we don't need to distinguish empty array
// and nil.
newFinalizers = nil
}
pvc.Finalizers = newFinalizers
}
// AddProtectionFinalizer adds PVCProtectionFinalizer to pvc. It expects that
// pvc is writable (i.e. is not informer's cached copy.)
func AddProtectionFinalizer(pvc *v1.PersistentVolumeClaim) {
pvc.Finalizers = append(pvc.Finalizers, PVCProtectionFinalizer)
}

View file

@ -21,7 +21,7 @@ import (
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"github.com/golang/glog"
@ -30,15 +30,23 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/legacyscheme"
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/util/mount"
)
const readyFileName = "ready"
const (
readyFileName = "ready"
losetupPath = "losetup"
ErrDeviceNotFound = "device not found"
ErrDeviceNotSupported = "device not supported"
ErrNotAvailable = "not available"
)
// IsReady checks for the existence of a regular file
// called 'ready' in the given directory and returns
@ -233,7 +241,7 @@ func LoadPodFromFile(filePath string) (*v1.Pod, error) {
}
pod := &v1.Pod{}
codec := legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion)
codec := legacyscheme.Codecs.UniversalDecoder()
if err := runtime.DecodeInto(codec, podDef, pod); err != nil {
return nil, fmt.Errorf("failed decoding file: %v", err)
}
@ -270,3 +278,201 @@ func stringToSet(str, delimiter string) (sets.String, error) {
}
return zonesSet, nil
}
// BlockVolumePathHandler defines a set of operations for handling block volume-related operations
type BlockVolumePathHandler interface {
// MapDevice creates a symbolic link to block device under specified map path
MapDevice(devicePath string, mapPath string, linkName string) error
// UnmapDevice removes a symbolic link to block device under specified map path
UnmapDevice(mapPath string, linkName string) error
// RemovePath removes a file or directory on specified map path
RemoveMapPath(mapPath string) error
// IsSymlinkExist retruns true if specified symbolic link exists
IsSymlinkExist(mapPath string) (bool, error)
// GetDeviceSymlinkRefs searches symbolic links under global map path
GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error)
// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath
// corresponding to map path symlink, and then return global map path with pod uuid.
FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
AttachFileDevice(path string) (string, error)
// GetLoopDevice returns the full path to the loop device associated with the given path.
GetLoopDevice(path string) (string, error)
// RemoveLoopDevice removes specified loopback device
RemoveLoopDevice(device string) error
}
// NewBlockVolumePathHandler returns a new instance of BlockVolumeHandler.
func NewBlockVolumePathHandler() BlockVolumePathHandler {
var volumePathHandler VolumePathHandler
return volumePathHandler
}
// VolumePathHandler is path related operation handlers for block volume
type VolumePathHandler struct {
}
// MapDevice creates a symbolic link to block device under specified map path
func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string) error {
// Example of global map path:
// globalMapPath/linkName: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{podUid}
// linkName: {podUid}
//
// Example of pod device map path:
// podDeviceMapPath/linkName: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
// linkName: {volumeName}
if len(devicePath) == 0 {
return fmt.Errorf("Failed to map device to map path. devicePath is empty")
}
if len(mapPath) == 0 {
return fmt.Errorf("Failed to map device to map path. mapPath is empty")
}
if !filepath.IsAbs(mapPath) {
return fmt.Errorf("The map path should be absolute: map path: %s", mapPath)
}
glog.V(5).Infof("MapDevice: devicePath %s", devicePath)
glog.V(5).Infof("MapDevice: mapPath %s", mapPath)
glog.V(5).Infof("MapDevice: linkName %s", linkName)
// Check and create mapPath
_, err := os.Stat(mapPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate map path: %s", mapPath)
return err
}
if err = os.MkdirAll(mapPath, 0750); err != nil {
return fmt.Errorf("Failed to mkdir %s, error %v", mapPath, err)
}
// Remove old symbolic link(or file) then create new one.
// This should be done because current symbolic link is
// stale accross node reboot.
linkPath := path.Join(mapPath, string(linkName))
if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
return err
}
err = os.Symlink(devicePath, linkPath)
return err
}
// UnmapDevice removes a symbolic link associated to block device under specified map path
func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error {
if len(mapPath) == 0 {
return fmt.Errorf("Failed to unmap device from map path. mapPath is empty")
}
glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath)
glog.V(5).Infof("UnmapDevice: linkName %s", linkName)
// Check symbolic link exists
linkPath := path.Join(mapPath, string(linkName))
if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil {
return checkErr
} else if !islinkExist {
glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath)
return nil
}
err := os.Remove(linkPath)
return err
}
// RemoveMapPath removes a file or directory on specified map path
func (v VolumePathHandler) RemoveMapPath(mapPath string) error {
if len(mapPath) == 0 {
return fmt.Errorf("Failed to remove map path. mapPath is empty")
}
glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath)
err := os.RemoveAll(mapPath)
if err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// IsSymlinkExist returns true if specified file exists and the type is symbolik link.
// If file doesn't exist, or file exists but not symbolick link, return false with no error.
// On other cases, return false with error from Lstat().
func (v VolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) {
fi, err := os.Lstat(mapPath)
if err == nil {
// If file exits and it's symbolick link, return true and no error
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
return true, nil
}
// If file exits but it's not symbolick link, return fale and no error
return false, nil
}
// If file doesn't exist, return false and no error
if os.IsNotExist(err) {
return false, nil
}
// Return error from Lstat()
return false, err
}
// GetDeviceSymlinkRefs searches symbolic links under global map path
func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) {
var refs []string
files, err := ioutil.ReadDir(mapPath)
if err != nil {
return nil, fmt.Errorf("Directory cannot read %v", err)
}
for _, file := range files {
if file.Mode()&os.ModeSymlink != os.ModeSymlink {
continue
}
filename := file.Name()
filepath, err := os.Readlink(path.Join(mapPath, filename))
if err != nil {
return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err)
}
glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath)
if filepath == devPath {
refs = append(refs, path.Join(mapPath, filename))
}
}
glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs)
return refs, nil
}
// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath
// corresponding to map path symlink, and then return global map path with pod uuid.
// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX
// globalMapPath/{pod uuid}: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX
func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) {
var globalMapPathUUID string
// Find symbolic link named pod uuid under plugin dir
err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) {
glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath)
if res, err := compareSymlinks(path, mapPath); err == nil && res {
globalMapPathUUID = path
}
}
return nil
})
if err != nil {
return "", err
}
glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID)
// Return path contains global map path + {pod uuid}
return globalMapPathUUID, nil
}
func compareSymlinks(global, pod string) (bool, error) {
devGlobal, err := os.Readlink(global)
if err != nil {
return false, err
}
devPod, err := os.Readlink(pod)
if err != nil {
return false, err
}
glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod)
if devGlobal == devPod {
return true, nil
}
return false, nil
}

106
vendor/k8s.io/kubernetes/pkg/volume/util/util_linux.go generated vendored Normal file
View file

@ -0,0 +1,106 @@
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
"fmt"
"os"
"os/exec"
"strings"
"github.com/golang/glog"
)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
blockDevicePath, err := v.GetLoopDevice(path)
if err != nil && err.Error() != ErrDeviceNotFound {
return "", err
}
// If no existing loop device for the path, create one
if blockDevicePath == "" {
glog.V(4).Infof("Creating device for path: %s", path)
blockDevicePath, err = makeLoopDevice(path)
if err != nil {
return "", err
}
}
return blockDevicePath, nil
}
// GetLoopDevice returns the full path to the loop device associated with the given path.
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
_, err := os.Stat(path)
if os.IsNotExist(err) {
return "", errors.New(ErrNotAvailable)
}
if err != nil {
return "", fmt.Errorf("not attachable: %v", err)
}
args := []string{"-j", path}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
glog.V(2).Infof("Failed device discover command for path %s: %v", path, err)
return "", err
}
return parseLosetupOutputForDevice(out)
}
func makeLoopDevice(path string) (string, error) {
args := []string{"-f", "--show", path}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
glog.V(2).Infof("Failed device create command for path %s: %v", path, err)
return "", err
}
return parseLosetupOutputForDevice(out)
}
// RemoveLoopDevice removes specified loopback device
func (v VolumePathHandler) RemoveLoopDevice(device string) error {
args := []string{"-d", device}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
if !strings.Contains(string(out), "No such device or address") {
return err
}
}
return nil
}
func parseLosetupOutputForDevice(output []byte) (string, error) {
if len(output) == 0 {
return "", errors.New(ErrDeviceNotFound)
}
// losetup returns device in the format:
// /dev/loop1: [0073]:148662 (/dev/sda)
device := strings.TrimSpace(strings.SplitN(string(output), ":", 2)[0])
if len(device) == 0 {
return "", errors.New(ErrDeviceNotFound)
}
return device, nil
}

View file

@ -0,0 +1,39 @@
// +build !linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
return "", fmt.Errorf("AttachFileDevice not supported for this build.")
}
// GetLoopDevice returns the full path to the loop device associated with the given path.
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
return "", fmt.Errorf("GetLoopDevice not supported for this build.")
}
// RemoveLoopDevice removes specified loopback device
func (v VolumePathHandler) RemoveLoopDevice(device string) error {
return fmt.Errorf("RemoveLoopDevice not supported for this build.")
}

View file

@ -37,6 +37,19 @@ type Volume interface {
MetricsProvider
}
// BlockVolume interface provides methods to generate global map path
// and pod device map path.
type BlockVolume interface {
// GetGlobalMapPath returns a global map path which contains
// symbolic links associated to a block device.
// ex. plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid}
GetGlobalMapPath(spec *Spec) (string, error)
// GetPodDeviceMapPath returns a pod device map path
// and name of a symbolic link associated to a block device.
// ex. pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
GetPodDeviceMapPath() (string, string)
}
// MetricsProvider exposes metrics (e.g. used,available space) related to a
// Volume.
type MetricsProvider interface {
@ -132,6 +145,34 @@ type Unmounter interface {
TearDownAt(dir string) error
}
// BlockVolumeMapper interface provides methods to set up/map the volume.
type BlockVolumeMapper interface {
BlockVolume
// SetUpDevice prepares the volume to a self-determined directory path,
// which may or may not exist yet and returns combination of physical
// device path of a block volume and error.
// If the plugin is non-attachable, it should prepare the device
// in /dev/ (or where appropriate) and return unique device path.
// Unique device path across kubelet node reboot is required to avoid
// unexpected block volume destruction.
// If the plugin is attachable, it should not do anything here,
// just return empty string for device path.
// Instead, attachable plugin have to return unique device path
// at attacher.Attach() and attacher.WaitForAttach().
// This may be called more than once, so implementations must be idempotent.
SetUpDevice() (string, error)
}
// BlockVolumeUnmapper interface provides methods to cleanup/unmap the volumes.
type BlockVolumeUnmapper interface {
BlockVolume
// TearDownDevice removes traces of the SetUpDevice procedure under
// a self-determined directory.
// If the plugin is non-attachable, this method detaches the volume
// from a node.
TearDownDevice(mapPath string, devicePath string) error
}
// Provisioner is an interface that creates templates for PersistentVolumes
// and can create the volume as a new resource in the infrastructure provider.
type Provisioner interface {
@ -195,8 +236,10 @@ type BulkVolumeVerifier interface {
// Detacher can detach a volume from a node.
type Detacher interface {
// Detach the given device from the node with the given Name.
Detach(deviceName string, nodeName types.NodeName) error
// Detach the given volume from the node with the given Name.
// volumeName is name of the volume as returned from plugin's
// GetVolumeName().
Detach(volumeName string, nodeName types.NodeName) error
// UnmountDevice unmounts the global mount of the disk. This
// should only be called once all bind mounts have been