vendor: bump to Kube 1.9/master

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2017-11-13 11:33:25 +01:00
parent 7076c73172
commit 7a675ccd92
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
202 changed files with 8543 additions and 7270 deletions

View file

@ -45,12 +45,6 @@ const (
// to one container of a pod.
SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
// CreatedByAnnotation represents the key used to store the spec(json)
// used to create the resource.
// This field is deprecated in favor of ControllerRef (see #44407).
// TODO(#50720): Remove this field in v1.9.
CreatedByAnnotation = "kubernetes.io/created-by"
// PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)
// in the Annotations of a Node.
PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods"

View file

@ -265,6 +265,11 @@ func IsIntegerResourceName(str string) bool {
return integerResources.Has(str) || IsExtendedResourceName(api.ResourceName(str))
}
// Extended and HugePages resources
func IsScalarResourceName(name api.ResourceName) bool {
return IsExtendedResourceName(name) || IsHugePageResourceName(name)
}
// this function aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here
func IsServiceIPSet(service *api.Service) bool {

View file

@ -24,11 +24,12 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/v1"
)
func init() {
Install(api.GroupFactoryRegistry, api.Registry, api.Scheme)
Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme

View file

@ -0,0 +1,46 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package legacyscheme
import (
"os"
"k8s.io/apimachinery/pkg/apimachinery/announced"
"k8s.io/apimachinery/pkg/apimachinery/registered"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
)
// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details)
var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry)
// Registry is an instance of an API registry. This is an interim step to start removing the idea of a global
// API registry.
var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS"))
// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
// NOTE: If you are copying this file to start a new api group, STOP! Copy the
// extensions group instead. This Scheme is special and should appear ONLY in
// the api group, unless you really know what you're doing.
// TODO(lavalamp): make the above error impossible.
var Scheme = runtime.NewScheme()
// Codecs provides access to encoding and decoding for the scheme
var Codecs = serializer.NewCodecFactory(Scheme)
// ParameterCodec handles versioning of objects that are converted to query parameters.
var ParameterCodec = runtime.NewParameterCodec(Scheme)

View file

@ -17,42 +17,17 @@ limitations under the License.
package api
import (
"os"
"k8s.io/apimachinery/pkg/apimachinery/announced"
"k8s.io/apimachinery/pkg/apimachinery/registered"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
)
// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details)
var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry)
// Registry is an instance of an API registry. This is an interim step to start removing the idea of a global
// API registry.
var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS"))
// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
// NOTE: If you are copying this file to start a new api group, STOP! Copy the
// extensions group instead. This Scheme is special and should appear ONLY in
// the api group, unless you really know what you're doing.
// TODO(lavalamp): make the above error impossible.
var Scheme = runtime.NewScheme()
// Codecs provides access to encoding and decoding for the scheme
var Codecs = serializer.NewCodecFactory(Scheme)
// GroupName is the group name use in this package
const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// ParameterCodec handles versioning of objects that are converted to query parameters.
var ParameterCodec = runtime.NewParameterCodec(Scheme)
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()

View file

@ -343,7 +343,7 @@ type PersistentVolumeSource struct {
NFS *NFSVolumeSource
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
// +optional
RBD *RBDVolumeSource
RBD *RBDPersistentVolumeSource
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource
@ -383,7 +383,7 @@ type PersistentVolumeSource struct {
PortworxVolume *PortworxVolumeSource
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOVolumeSource
ScaleIO *ScaleIOPersistentVolumeSource
// Local represents directly-attached storage with node affinity
// +optional
Local *LocalVolumeSource
@ -1006,6 +1006,37 @@ type RBDVolumeSource struct {
ReadOnly bool
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDPersistentVolumeSource struct {
// Required: CephMonitors is a collection of Ceph monitors
CephMonitors []string
// Required: RBDImage is the rados image name
RBDImage string
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string
// Optional: RadosPool is the rados pool name,default is rbd
// +optional
RBDPool string
// Optional: RBDUser is the rados user name, default is admin
// +optional
RadosUser string
// Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring
// +optional
Keyring string
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// +optional
SecretRef *SecretReference
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool
}
// Represents a cinder volume resource in Openstack. A Cinder volume
// must exist before mounting to a container. The volume must also be
// in the same region as the kubelet. Cinder volumes support ownership
@ -1238,7 +1269,7 @@ type AzureDiskVolumeSource struct {
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly *bool
// Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
// Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
Kind *AzureDataDiskKind
}
@ -1254,13 +1285,13 @@ type ScaleIOVolumeSource struct {
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool
// The name of the Protection Domain for the configured storage (defaults to "default").
// The name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string
// The Storage Pool associated with the protection domain (defaults to "default").
// The ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string
// Indicates whether the storage for a volume should be thick or thin (defaults to "thin").
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional
StorageMode string
// The name of a volume already created in the ScaleIO system
@ -1277,6 +1308,42 @@ type ScaleIOVolumeSource struct {
ReadOnly bool
}
// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume that can be defined
// by a an admin via a storage class, for instance.
type ScaleIOPersistentVolumeSource struct {
// The host address of the ScaleIO API Gateway.
Gateway string
// The name of the storage system as configured in ScaleIO.
System string
// SecretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *SecretReference
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool
// The name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string
// The ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional
StorageMode string
// The name of a volume created in the ScaleIO system
// that is associated with this volume source.
VolumeName string
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool
}
// Represents a StorageOS persistent volume resource.
type StorageOSVolumeSource struct {
// VolumeName is the human-readable name of the StorageOS volume. Volume
@ -2229,7 +2296,7 @@ type Taint struct {
// TimeAdded represents the time at which the taint was added.
// It is only written for NoExecute taints.
// +optional
TimeAdded metav1.Time
TimeAdded *metav1.Time
}
type TaintEffect string
@ -3117,7 +3184,7 @@ type NodeConfigSource struct {
type DaemonEndpoint struct {
/*
The port tag was not properly in quotes in earlier releases, so it must be
uppercased for backwards compat (since it was falling back to var name of
uppercase for backwards compatibility (since it was falling back to var name of
'Port').
*/
@ -4043,7 +4110,7 @@ const (
// BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets
BasicAuthPasswordKey = "password"
// SecretTypeSSHAuth contains data needed for SSH authetication.
// SecretTypeSSHAuth contains data needed for SSH authentication.
//
// Required field:
// - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication

View file

@ -1,48 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO: This GetVersion/GetGroup arrangement is temporary and will be replaced
// with a GroupAndVersion type.
package util
import "strings"
func GetVersion(groupVersion string) string {
s := strings.Split(groupVersion, "/")
if len(s) != 2 {
// e.g. return "v1" for groupVersion="v1"
return s[len(s)-1]
}
return s[1]
}
func GetGroup(groupVersion string) string {
s := strings.Split(groupVersion, "/")
if len(s) == 1 {
// e.g. return "" for groupVersion="v1"
return ""
}
return s[0]
}
// GetGroupVersion returns the "group/version". It returns "version" is if group
// is empty. It returns "group/" if version is empty.
func GetGroupVersion(group, version string) string {
if len(group) == 0 {
return version
}
return group + "/" + version
}

View file

@ -235,7 +235,9 @@ func Convert_v1_ReplicationController_to_extensions_ReplicaSet(in *v1.Replicatio
func Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(in *v1.ReplicationControllerSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error {
out.Replicas = *in.Replicas
out.MinReadySeconds = in.MinReadySeconds
if in.Selector != nil {
out.Selector = new(metav1.LabelSelector)
metav1.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s)
}
if in.Template != nil {
@ -252,6 +254,15 @@ func Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(in *v
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.ObservedGeneration = in.ObservedGeneration
for _, cond := range in.Conditions {
out.Conditions = append(out.Conditions, extensions.ReplicaSetCondition{
Type: extensions.ReplicaSetConditionType(cond.Type),
Status: api.ConditionStatus(cond.Status),
LastTransitionTime: cond.LastTransitionTime,
Reason: cond.Reason,
Message: cond.Message,
})
}
return nil
}
@ -294,6 +305,15 @@ func Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(in *e
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.ObservedGeneration = in.ObservedGeneration
for _, cond := range in.Conditions {
out.Conditions = append(out.Conditions, v1.ReplicationControllerCondition{
Type: v1.ReplicationControllerConditionType(cond.Type),
Status: v1.ConditionStatus(cond.Status),
LastTransitionTime: cond.LastTransitionTime,
Reason: cond.Reason,
Message: cond.Message,
})
}
return nil
}

View file

@ -368,13 +368,28 @@ func SetDefaults_RBDVolumeSource(obj *v1.RBDVolumeSource) {
}
}
func SetDefaults_RBDPersistentVolumeSource(obj *v1.RBDPersistentVolumeSource) {
if obj.RBDPool == "" {
obj.RBDPool = "rbd"
}
if obj.RadosUser == "" {
obj.RadosUser = "admin"
}
if obj.Keyring == "" {
obj.Keyring = "/etc/ceph/keyring"
}
}
func SetDefaults_ScaleIOVolumeSource(obj *v1.ScaleIOVolumeSource) {
if obj.ProtectionDomain == "" {
obj.ProtectionDomain = "default"
if obj.StorageMode == "" {
obj.StorageMode = "ThinProvisioned"
}
if obj.StoragePool == "" {
obj.StoragePool = "default"
if obj.FSType == "" {
obj.FSType = "xfs"
}
}
func SetDefaults_ScaleIOPersistentVolumeSource(obj *v1.ScaleIOPersistentVolumeSource) {
if obj.StorageMode == "" {
obj.StorageMode = "ThinProvisioned"
}

View file

@ -1,65 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
"k8s.io/api/core/v1"
utilrand "k8s.io/apimachinery/pkg/util/rand"
)
// NameGenerator generates names for objects. Some backends may have more information
// available to guide selection of new names and this interface hides those details.
type NameGenerator interface {
// GenerateName generates a valid name from the base name, adding a random suffix to the
// the base. If base is valid, the returned name must also be valid. The generator is
// responsible for knowing the maximum valid name length.
GenerateName(base string) string
}
// GenerateName will resolve the object name of the provided v1.ObjectMeta to a generated version if
// necessary. It expects that validation for v1.ObjectMeta has already completed (that Base is a
// valid name) and that the NameGenerator generates a name that is also valid.
func GenerateName(u NameGenerator, meta *v1.ObjectMeta) {
if len(meta.GenerateName) == 0 || len(meta.Name) != 0 {
return
}
meta.Name = u.GenerateName(meta.GenerateName)
}
// simpleNameGenerator generates random names.
type simpleNameGenerator struct{}
// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics
// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes
// name (63 characters)
var SimpleNameGenerator NameGenerator = simpleNameGenerator{}
const (
// TODO: make this flexible for non-core resources with alternate naming rules.
maxNameLength = 63
randomLength = 5
maxGeneratedNameLength = maxNameLength - randomLength
)
func (simpleNameGenerator) GenerateName(base string) string {
if len(base) > maxGeneratedNameLength {
base = base[:maxGeneratedNameLength]
}
return fmt.Sprintf("%s%s", base, utilrand.String(randomLength))
}

View file

@ -88,12 +88,18 @@ func OpaqueIntResourceName(name string) v1.ResourceName {
var overcommitBlacklist = sets.NewString(string(v1.ResourceNvidiaGPU))
// IsOvercommitAllowed returns true if the resource is in the default
// namespace and not blacklisted.
// namespace and not blacklisted and is not hugepages.
func IsOvercommitAllowed(name v1.ResourceName) bool {
return IsDefaultNamespaceResource(name) &&
!IsHugePageResourceName(name) &&
!overcommitBlacklist.Has(string(name))
}
// Extended and Hugepages resources
func IsScalarResourceName(name v1.ResourceName) bool {
return IsExtendedResourceName(name) || IsHugePageResourceName(name)
}
// this function aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here
func IsServiceIPSet(service *v1.Service) bool {

View file

@ -305,6 +305,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource,
Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource,
Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource,
Convert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource,
Convert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource,
Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource,
Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource,
Convert_v1_RangeAllocation_To_api_RangeAllocation,
@ -333,6 +335,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_api_ResourceRequirements_To_v1_ResourceRequirements,
Convert_v1_SELinuxOptions_To_api_SELinuxOptions,
Convert_api_SELinuxOptions_To_v1_SELinuxOptions,
Convert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource,
Convert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource,
Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource,
Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource,
Convert_v1_Secret_To_api_Secret,
@ -3124,7 +3128,7 @@ func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *v1.
out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath))
out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs))
out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS))
out.RBD = (*api.RBDVolumeSource)(unsafe.Pointer(in.RBD))
out.RBD = (*api.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD))
out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI))
out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder))
out.CephFS = (*api.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS))
@ -3137,7 +3141,7 @@ func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *v1.
out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume))
out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO))
out.ScaleIO = (*api.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO))
out.Local = (*api.LocalVolumeSource)(unsafe.Pointer(in.Local))
out.StorageOS = (*api.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS))
return nil
@ -3154,7 +3158,7 @@ func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api
out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath))
out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs))
out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS))
out.RBD = (*v1.RBDVolumeSource)(unsafe.Pointer(in.RBD))
out.RBD = (*v1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD))
out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte))
out.ISCSI = (*v1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI))
out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume))
@ -3167,7 +3171,7 @@ func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api
out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume))
out.ScaleIO = (*v1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO))
out.ScaleIO = (*v1.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO))
out.Local = (*v1.LocalVolumeSource)(unsafe.Pointer(in.Local))
out.StorageOS = (*v1.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS))
return nil
@ -4062,6 +4066,40 @@ func Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVo
return autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s)
}
func autoConvert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *api.RBDPersistentVolumeSource, s conversion.Scope) error {
out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors))
out.RBDImage = in.RBDImage
out.FSType = in.FSType
out.RBDPool = in.RBDPool
out.RadosUser = in.RadosUser
out.Keyring = in.Keyring
out.SecretRef = (*api.SecretReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *api.RBDPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource(in, out, s)
}
func autoConvert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *api.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error {
out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors))
out.RBDImage = in.RBDImage
out.FSType = in.FSType
out.RBDPool = in.RBDPool
out.RadosUser = in.RadosUser
out.Keyring = in.Keyring
out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource is an autogenerated conversion function.
func Convert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *api.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in, out, s)
}
func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *v1.RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error {
out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors))
out.RBDImage = in.RBDImage
@ -4458,6 +4496,44 @@ func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out
return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s)
}
func autoConvert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *api.ScaleIOPersistentVolumeSource, s conversion.Scope) error {
out.Gateway = in.Gateway
out.System = in.System
out.SecretRef = (*api.SecretReference)(unsafe.Pointer(in.SecretRef))
out.SSLEnabled = in.SSLEnabled
out.ProtectionDomain = in.ProtectionDomain
out.StoragePool = in.StoragePool
out.StorageMode = in.StorageMode
out.VolumeName = in.VolumeName
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *api.ScaleIOPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource(in, out, s)
}
func autoConvert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *api.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error {
out.Gateway = in.Gateway
out.System = in.System
out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef))
out.SSLEnabled = in.SSLEnabled
out.ProtectionDomain = in.ProtectionDomain
out.StoragePool = in.StoragePool
out.StorageMode = in.StorageMode
out.VolumeName = in.VolumeName
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource is an autogenerated conversion function.
func Convert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *api.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in, out, s)
}
func autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error {
out.Gateway = in.Gateway
out.System = in.System
@ -5101,7 +5177,7 @@ func autoConvert_v1_Taint_To_api_Taint(in *v1.Taint, out *api.Taint, s conversio
out.Key = in.Key
out.Value = in.Value
out.Effect = api.TaintEffect(in.Effect)
out.TimeAdded = in.TimeAdded
out.TimeAdded = (*meta_v1.Time)(unsafe.Pointer(in.TimeAdded))
return nil
}
@ -5114,7 +5190,7 @@ func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *v1.Taint, s conversio
out.Key = in.Key
out.Value = in.Value
out.Effect = v1.TaintEffect(in.Effect)
out.TimeAdded = in.TimeAdded
out.TimeAdded = (*meta_v1.Time)(unsafe.Pointer(in.TimeAdded))
return nil
}

View file

@ -137,7 +137,7 @@ func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) {
SetDefaults_HostPathVolumeSource(in.Spec.PersistentVolumeSource.HostPath)
}
if in.Spec.PersistentVolumeSource.RBD != nil {
SetDefaults_RBDVolumeSource(in.Spec.PersistentVolumeSource.RBD)
SetDefaults_RBDPersistentVolumeSource(in.Spec.PersistentVolumeSource.RBD)
}
if in.Spec.PersistentVolumeSource.ISCSI != nil {
SetDefaults_ISCSIVolumeSource(in.Spec.PersistentVolumeSource.ISCSI)
@ -146,7 +146,7 @@ func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) {
SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk)
}
if in.Spec.PersistentVolumeSource.ScaleIO != nil {
SetDefaults_ScaleIOVolumeSource(in.Spec.PersistentVolumeSource.ScaleIO)
SetDefaults_ScaleIOPersistentVolumeSource(in.Spec.PersistentVolumeSource.ScaleIO)
}
}

View file

@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/api"
apiutil "k8s.io/kubernetes/pkg/api/util"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
// ValidateEvent makes sure that the event makes sense.
@ -63,12 +63,16 @@ func ValidateEvent(event *api.Event) field.ErrorList {
// Check whether the kind in groupVersion is scoped at the root of the api hierarchy
func isNamespacedKind(kind, groupVersion string) (bool, error) {
group := apiutil.GetGroup(groupVersion)
g, err := api.Registry.Group(group)
gv, err := schema.ParseGroupVersion(groupVersion)
if err != nil {
return false, err
}
restMapping, err := g.RESTMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}, apiutil.GetVersion(groupVersion))
g, err := legacyscheme.Registry.Group(gv.Group)
if err != nil {
return false, err
}
restMapping, err := g.RESTMapper.RESTMapping(schema.GroupKind{Group: gv.Group, Kind: kind}, gv.Version)
if err != nil {
return false, err
}

View file

@ -45,6 +45,7 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/helper"
"k8s.io/kubernetes/pkg/api/legacyscheme"
apiservice "k8s.io/kubernetes/pkg/api/service"
k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1"
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
@ -1087,6 +1088,17 @@ func validateRBDVolumeSource(rbd *api.RBDVolumeSource, fldPath *field.Path) fiel
return allErrs
}
func validateRBDPersistentVolumeSource(rbd *api.RBDPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(rbd.CephMonitors) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), ""))
}
if len(rbd.RBDImage) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("image"), ""))
}
return allErrs
}
func validateCinderVolumeSource(cd *api.CinderVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(cd.VolumeID) == 0 {
@ -1233,6 +1245,20 @@ func validateScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, fldPath *field.Pa
return allErrs
}
func validateScaleIOPersistentVolumeSource(sio *api.ScaleIOPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if sio.Gateway == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), ""))
}
if sio.System == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("system"), ""))
}
if sio.VolumeName == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), ""))
}
return allErrs
}
func validateLocalVolumeSource(ls *api.LocalVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if ls.Path == "" {
@ -1379,7 +1405,7 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList {
allErrs = append(allErrs, field.Forbidden(specPath.Child("rbd"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateRBDVolumeSource(pv.Spec.RBD, specPath.Child("rbd"))...)
allErrs = append(allErrs, validateRBDPersistentVolumeSource(pv.Spec.RBD, specPath.Child("rbd"))...)
}
}
if pv.Spec.Quobyte != nil {
@ -1477,7 +1503,7 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList {
allErrs = append(allErrs, field.Forbidden(specPath.Child("scaleIO"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateScaleIOVolumeSource(pv.Spec.ScaleIO, specPath.Child("scaleIO"))...)
allErrs = append(allErrs, validateScaleIOPersistentVolumeSource(pv.Spec.ScaleIO, specPath.Child("scaleIO"))...)
}
}
if pv.Spec.Local != nil {
@ -1528,6 +1554,12 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList {
func ValidatePersistentVolumeUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = ValidatePersistentVolume(newPv)
// PersistentVolumeSource should be immutable after creation.
if !apiequality.Semantic.DeepEqual(newPv.Spec.PersistentVolumeSource, oldPv.Spec.PersistentVolumeSource) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "persistentvolumesource"), "is immutable after creation"))
}
newPv.Status = oldPv.Status
return allErrs
}
@ -1755,7 +1787,7 @@ func validateObjectFieldSelector(fs *api.ObjectFieldSelector, expressions *sets.
} else if len(fs.FieldPath) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("fieldPath"), ""))
} else {
internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "")
internalFieldPath, _, err := legacyscheme.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "")
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err)))
} else if !expressions.Has(internalFieldPath) {
@ -1923,7 +1955,11 @@ func ValidateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, contain
allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique"))
}
if !path.IsAbs(mnt.MountPath) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be an absolute path"))
// also allow windows absolute path
p := mnt.MountPath
if len(p) < 2 || ((p[0] < 'A' || p[0] > 'Z') && (p[0] < 'a' || p[0] > 'z')) || p[1] != ':' {
allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be an absolute path"))
}
}
mountpoints.Insert(mnt.MountPath)
if len(mnt.SubPath) > 0 {

View file

@ -571,6 +571,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*QuobyteVolumeSource).DeepCopyInto(out.(*QuobyteVolumeSource))
return nil
}, InType: reflect.TypeOf(&QuobyteVolumeSource{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RBDPersistentVolumeSource).DeepCopyInto(out.(*RBDPersistentVolumeSource))
return nil
}, InType: reflect.TypeOf(&RBDPersistentVolumeSource{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RBDVolumeSource).DeepCopyInto(out.(*RBDVolumeSource))
return nil
@ -627,6 +631,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*SELinuxOptions).DeepCopyInto(out.(*SELinuxOptions))
return nil
}, InType: reflect.TypeOf(&SELinuxOptions{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ScaleIOPersistentVolumeSource).DeepCopyInto(out.(*ScaleIOPersistentVolumeSource))
return nil
}, InType: reflect.TypeOf(&ScaleIOPersistentVolumeSource{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ScaleIOVolumeSource).DeepCopyInto(out.(*ScaleIOVolumeSource))
return nil
@ -3747,7 +3755,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
if *in == nil {
*out = nil
} else {
*out = new(RBDVolumeSource)
*out = new(RBDPersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
}
@ -3864,7 +3872,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
if *in == nil {
*out = nil
} else {
*out = new(ScaleIOVolumeSource)
*out = new(ScaleIOPersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
}
@ -4815,6 +4823,36 @@ func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) {
*out = *in
if in.CephMonitors != nil {
in, out := &in.CephMonitors, &out.CephMonitors
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource.
func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource {
if in == nil {
return nil
}
out := new(RBDPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) {
*out = *in
@ -5196,6 +5234,31 @@ func (in *SELinuxOptions) DeepCopy() *SELinuxOptions {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource.
func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource {
if in == nil {
return nil
}
out := new(ScaleIOPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) {
*out = *in
@ -5903,7 +5966,15 @@ func (in *TCPSocketAction) DeepCopy() *TCPSocketAction {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Taint) DeepCopyInto(out *Taint) {
*out = *in
in.TimeAdded.DeepCopyInto(&out.TimeAdded)
if in.TimeAdded != nil {
in, out := &in.TimeAdded, &out.TimeAdded
if *in == nil {
*out = nil
} else {
*out = new(v1.Time)
(*in).DeepCopyInto(*out)
}
}
return
}