*: initial update to kube 1.8

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2017-09-26 16:23:09 +02:00
parent 2453222695
commit d6e819133d
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
1237 changed files with 84117 additions and 564982 deletions

View file

@ -20,6 +20,8 @@ import (
"fmt"
"time"
v1authenticationapi "k8s.io/api/authentication/v1"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
@ -27,14 +29,12 @@ import (
"k8s.io/apimachinery/pkg/watch"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
clientgoclientset "k8s.io/client-go/kubernetes"
clientset "k8s.io/client-go/kubernetes"
v1authentication "k8s.io/client-go/kubernetes/typed/authentication/v1"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
v1authenticationapi "k8s.io/kubernetes/pkg/apis/authentication/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
v1authentication "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
"k8s.io/kubernetes/pkg/serviceaccount"
"github.com/golang/glog"

View file

@ -21,54 +21,41 @@ import (
"sync"
"github.com/golang/glog"
appsv1beta1 "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/pkg/api/v1"
appsv1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
)
// GetControllerOf returns the controllerRef if controllee has a controller,
// otherwise returns nil.
func GetControllerOf(controllee metav1.Object) *metav1.OwnerReference {
ownerRefs := controllee.GetOwnerReferences()
for i := range ownerRefs {
owner := &ownerRefs[i]
if owner.Controller != nil && *owner.Controller == true {
return owner
}
}
return nil
}
type baseControllerRefManager struct {
controller metav1.Object
selector labels.Selector
type BaseControllerRefManager struct {
Controller metav1.Object
Selector labels.Selector
canAdoptErr error
canAdoptOnce sync.Once
canAdoptFunc func() error
CanAdoptFunc func() error
}
func (m *baseControllerRefManager) canAdopt() error {
func (m *BaseControllerRefManager) CanAdopt() error {
m.canAdoptOnce.Do(func() {
if m.canAdoptFunc != nil {
m.canAdoptErr = m.canAdoptFunc()
if m.CanAdoptFunc != nil {
m.canAdoptErr = m.CanAdoptFunc()
}
})
return m.canAdoptErr
}
// claimObject tries to take ownership of an object for this controller.
// ClaimObject tries to take ownership of an object for this controller.
//
// It will reconcile the following:
// * Adopt orphans if the match function returns true.
// * Release owned objects if the match function returns false.
//
// A non-nil error is returned if some form of reconciliation was attemped and
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
@ -77,10 +64,10 @@ func (m *baseControllerRefManager) canAdopt() error {
// own the object.
//
// No reconciliation will be attempted if the controller is being deleted.
func (m *baseControllerRefManager) claimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) {
controllerRef := GetControllerOf(obj)
func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) {
controllerRef := metav1.GetControllerOf(obj)
if controllerRef != nil {
if controllerRef.UID != m.controller.GetUID() {
if controllerRef.UID != m.Controller.GetUID() {
// Owned by someone else. Ignore.
return false, nil
}
@ -93,7 +80,7 @@ func (m *baseControllerRefManager) claimObject(obj metav1.Object, match func(met
}
// Owned by us but selector doesn't match.
// Try to release, unless we're being deleted.
if m.controller.GetDeletionTimestamp() != nil {
if m.Controller.GetDeletionTimestamp() != nil {
return false, nil
}
if err := release(obj); err != nil {
@ -110,7 +97,7 @@ func (m *baseControllerRefManager) claimObject(obj metav1.Object, match func(met
}
// It's an orphan.
if m.controller.GetDeletionTimestamp() != nil || !match(obj) {
if m.Controller.GetDeletionTimestamp() != nil || !match(obj) {
// Ignore if we're being deleted or selector doesn't match.
return false, nil
}
@ -133,7 +120,7 @@ func (m *baseControllerRefManager) claimObject(obj metav1.Object, match func(met
}
type PodControllerRefManager struct {
baseControllerRefManager
BaseControllerRefManager
controllerKind schema.GroupVersionKind
podControl PodControlInterface
}
@ -141,14 +128,14 @@ type PodControllerRefManager struct {
// NewPodControllerRefManager returns a PodControllerRefManager that exposes
// methods to manage the controllerRef of pods.
//
// The canAdopt() function can be used to perform a potentially expensive check
// The CanAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
// If canAdopt() returns a non-nil error, all adoptions will fail.
// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once canAdopt() is called, it will not be called again by the same
// NOTE: Once CanAdopt() is called, it will not be called again by the same
// PodControllerRefManager instance. Create a new instance if it makes
// sense to check canAdopt() again (e.g. in a different sync pass).
// sense to check CanAdopt() again (e.g. in a different sync pass).
func NewPodControllerRefManager(
podControl PodControlInterface,
controller metav1.Object,
@ -157,10 +144,10 @@ func NewPodControllerRefManager(
canAdopt func() error,
) *PodControllerRefManager {
return &PodControllerRefManager{
baseControllerRefManager: baseControllerRefManager{
controller: controller,
selector: selector,
canAdoptFunc: canAdopt,
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
podControl: podControl,
@ -176,7 +163,7 @@ func NewPodControllerRefManager(
// Optional: If one or more filters are specified, a Pod will only be claimed if
// all filters return true.
//
// A non-nil error is returned if some form of reconciliation was attemped and
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
@ -189,7 +176,7 @@ func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.
match := func(obj metav1.Object) bool {
pod := obj.(*v1.Pod)
// Check selector first so filters only run on potentially matching Pods.
if !m.selector.Matches(labels.Set(pod.Labels)) {
if !m.Selector.Matches(labels.Set(pod.Labels)) {
return false
}
for _, filter := range filters {
@ -207,7 +194,7 @@ func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.
}
for _, pod := range pods {
ok, err := m.claimObject(pod, match, adopt, release)
ok, err := m.ClaimObject(pod, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
@ -222,7 +209,7 @@ func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.
// AdoptPod sends a patch to take control of the pod. It returns the error if
// the patching fails.
func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
if err := m.canAdopt(); err != nil {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt Pod %v/%v (%v): %v", pod.Namespace, pod.Name, pod.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
@ -230,7 +217,7 @@ func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
addControllerPatch := fmt.Sprintf(
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
m.controller.GetName(), m.controller.GetUID(), pod.UID)
m.Controller.GetName(), m.Controller.GetUID(), pod.UID)
return m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(addControllerPatch))
}
@ -238,8 +225,8 @@ func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error {
glog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s",
pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controller.GetUID(), pod.UID)
pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), pod.UID)
err := m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(deleteOwnerRefPatch))
if err != nil {
if errors.IsNotFound(err) {
@ -267,7 +254,7 @@ func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error {
// categories and accordingly adopt or release them. See comments on these functions
// for more details.
type ReplicaSetControllerRefManager struct {
baseControllerRefManager
BaseControllerRefManager
controllerKind schema.GroupVersionKind
rsControl RSControlInterface
}
@ -275,14 +262,14 @@ type ReplicaSetControllerRefManager struct {
// NewReplicaSetControllerRefManager returns a ReplicaSetControllerRefManager that exposes
// methods to manage the controllerRef of ReplicaSets.
//
// The canAdopt() function can be used to perform a potentially expensive check
// The CanAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
// If canAdopt() returns a non-nil error, all adoptions will fail.
// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once canAdopt() is called, it will not be called again by the same
// NOTE: Once CanAdopt() is called, it will not be called again by the same
// ReplicaSetControllerRefManager instance. Create a new instance if it
// makes sense to check canAdopt() again (e.g. in a different sync pass).
// makes sense to check CanAdopt() again (e.g. in a different sync pass).
func NewReplicaSetControllerRefManager(
rsControl RSControlInterface,
controller metav1.Object,
@ -291,10 +278,10 @@ func NewReplicaSetControllerRefManager(
canAdopt func() error,
) *ReplicaSetControllerRefManager {
return &ReplicaSetControllerRefManager{
baseControllerRefManager: baseControllerRefManager{
controller: controller,
selector: selector,
canAdoptFunc: canAdopt,
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
rsControl: rsControl,
@ -307,7 +294,7 @@ func NewReplicaSetControllerRefManager(
// * Adopt orphans if the selector matches.
// * Release owned objects if the selector no longer matches.
//
// A non-nil error is returned if some form of reconciliation was attemped and
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
@ -319,7 +306,7 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.Rep
var errlist []error
match := func(obj metav1.Object) bool {
return m.selector.Matches(labels.Set(obj.GetLabels()))
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(obj metav1.Object) error {
return m.AdoptReplicaSet(obj.(*extensions.ReplicaSet))
@ -329,7 +316,7 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.Rep
}
for _, rs := range sets {
ok, err := m.claimObject(rs, match, adopt, release)
ok, err := m.ClaimObject(rs, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
@ -344,7 +331,7 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.Rep
// AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns
// the error if the patching fails.
func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaSet) error {
if err := m.canAdopt(); err != nil {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
@ -352,7 +339,7 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaS
addControllerPatch := fmt.Sprintf(
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
m.controller.GetName(), m.controller.GetUID(), rs.UID)
m.Controller.GetName(), m.Controller.GetUID(), rs.UID)
return m.rsControl.PatchReplicaSet(rs.Namespace, rs.Name, []byte(addControllerPatch))
}
@ -360,8 +347,8 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaS
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *extensions.ReplicaSet) error {
glog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s",
replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controller.GetUID(), replicaSet.UID)
replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID)
err := m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, []byte(deleteOwnerRefPatch))
if err != nil {
if errors.IsNotFound(err) {
@ -379,9 +366,9 @@ func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *extension
return err
}
// RecheckDeletionTimestamp returns a canAdopt() function to recheck deletion.
// RecheckDeletionTimestamp returns a CanAdopt() function to recheck deletion.
//
// The canAdopt() function calls getObject() to fetch the latest value,
// The CanAdopt() function calls getObject() to fetch the latest value,
// and denies adoption attempts if that object has a non-nil DeletionTimestamp.
func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() error {
return func() error {
@ -402,7 +389,7 @@ func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() er
// categories and accordingly adopt or release them. See comments on these functions
// for more details.
type ControllerRevisionControllerRefManager struct {
baseControllerRefManager
BaseControllerRefManager
controllerKind schema.GroupVersionKind
crControl ControllerRevisionControlInterface
}
@ -426,10 +413,10 @@ func NewControllerRevisionControllerRefManager(
canAdopt func() error,
) *ControllerRevisionControllerRefManager {
return &ControllerRevisionControllerRefManager{
baseControllerRefManager: baseControllerRefManager{
controller: controller,
selector: selector,
canAdoptFunc: canAdopt,
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
crControl: crControl,
@ -454,7 +441,7 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor
var errlist []error
match := func(obj metav1.Object) bool {
return m.selector.Matches(labels.Set(obj.GetLabels()))
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(obj metav1.Object) error {
return m.AdoptControllerRevision(obj.(*appsv1beta1.ControllerRevision))
@ -464,7 +451,7 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor
}
for _, h := range histories {
ok, err := m.claimObject(h, match, adopt, release)
ok, err := m.ClaimObject(h, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
@ -479,7 +466,7 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor
// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if
// the patching fails.
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *appsv1beta1.ControllerRevision) error {
if err := m.canAdopt(); err != nil {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
@ -487,7 +474,7 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history
addControllerPatch := fmt.Sprintf(
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
m.controller.GetName(), m.controller.GetUID(), history.UID)
m.Controller.GetName(), m.Controller.GetUID(), history.UID)
return m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(addControllerPatch))
}
@ -495,8 +482,8 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *appsv1beta1.ControllerRevision) error {
glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s",
history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controller.GetUID(), history.UID)
history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID)
err := m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(deleteOwnerRefPatch))
if err != nil {
if errors.IsNotFound(err) {

View file

@ -25,30 +25,30 @@ import (
"sync/atomic"
"time"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
ref "k8s.io/client-go/tools/reference"
"k8s.io/client-go/util/integer"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
clientretry "k8s.io/client-go/util/retry"
_ "k8s.io/kubernetes/pkg/api/install"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/api/v1/ref"
"k8s.io/kubernetes/pkg/api/validation"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
clientretry "k8s.io/kubernetes/pkg/client/retry"
hashutil "k8s.io/kubernetes/pkg/util/hash"
taintutils "k8s.io/kubernetes/pkg/util/taints"
"github.com/golang/glog"
)
@ -65,6 +65,21 @@ const (
// 500 pods. Just creation is limited to 20qps, and watching happens with ~10-30s
// latency/pod at the scale of 3000 pods over 100 nodes.
ExpectationsTimeout = 5 * time.Minute
// When batching pod creates, SlowStartInitialBatchSize is the size of the
// inital batch. The size of each successive batch is twice the size of
// the previous batch. For example, for a value of 1, batch sizes would be
// 1, 2, 4, 8, ... and for a value of 10, batch sizes would be
// 10, 20, 40, 80, ... Setting the value higher means that quota denials
// will result in more doomed API calls and associated event spam. Setting
// the value lower will result in more API call round trip periods for
// large batches.
//
// Given a number of pods to start "N":
// The number of doomed calls per sync once quota is exceeded is given by:
// min(N,SlowStartInitialBatchSize)
// The number of batches is given by:
// 1+floor(log_2(ceil(N/SlowStartInitialBatchSize)))
SlowStartInitialBatchSize = 1
)
var UpdateTaintBackoff = wait.Backoff{
@ -464,7 +479,7 @@ func getPodsAnnotationSet(template *v1.PodTemplateSpec, object runtime.Object) (
for k, v := range template.Annotations {
desiredAnnotations[k] = v
}
createdByRef, err := ref.GetReference(api.Scheme, object)
createdByRef, err := ref.GetReference(scheme.Scheme, object)
if err != nil {
return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err)
}
@ -472,7 +487,7 @@ func getPodsAnnotationSet(template *v1.PodTemplateSpec, object runtime.Object) (
// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
// would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
// We need to consistently handle this case of annotation versioning.
codec := api.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: "v1"})
codec := scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion)
createdByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{
Reference: *createdByRef,
@ -559,11 +574,7 @@ func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Objec
if controllerRef != nil {
pod.OwnerReferences = append(pod.OwnerReferences, *controllerRef)
}
clone, err := api.Scheme.DeepCopy(&template.Spec)
if err != nil {
return nil, err
}
pod.Spec = *clone.(*v1.PodSpec)
pod.Spec = *template.Spec.DeepCopy()
return pod, nil
}
@ -580,7 +591,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT
}
if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil {
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err)
return fmt.Errorf("unable to create pods: %v", err)
return err
} else {
accessor, err := meta.Accessor(object)
if err != nil {
@ -610,11 +621,13 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
type FakePodControl struct {
sync.Mutex
Templates []v1.PodTemplateSpec
ControllerRefs []metav1.OwnerReference
DeletePodName []string
Patches [][]byte
Err error
Templates []v1.PodTemplateSpec
ControllerRefs []metav1.OwnerReference
DeletePodName []string
Patches [][]byte
Err error
CreateLimit int
CreateCallCount int
}
var _ PodControlInterface = &FakePodControl{}
@ -632,6 +645,10 @@ func (f *FakePodControl) PatchPod(namespace, name string, data []byte) error {
func (f *FakePodControl) CreatePods(namespace string, spec *v1.PodTemplateSpec, object runtime.Object) error {
f.Lock()
defer f.Unlock()
f.CreateCallCount++
if f.CreateLimit != 0 && f.CreateCallCount > f.CreateLimit {
return fmt.Errorf("Not creating pod, limit %d already reached (create call %d)", f.CreateLimit, f.CreateCallCount)
}
f.Templates = append(f.Templates, *spec)
if f.Err != nil {
return f.Err
@ -642,6 +659,10 @@ func (f *FakePodControl) CreatePods(namespace string, spec *v1.PodTemplateSpec,
func (f *FakePodControl) CreatePodsWithControllerRef(namespace string, spec *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
f.Lock()
defer f.Unlock()
f.CreateCallCount++
if f.CreateLimit != 0 && f.CreateCallCount > f.CreateLimit {
return fmt.Errorf("Not creating pod, limit %d already reached (create call %d)", f.CreateLimit, f.CreateCallCount)
}
f.Templates = append(f.Templates, *spec)
f.ControllerRefs = append(f.ControllerRefs, *controllerRef)
if f.Err != nil {
@ -653,6 +674,10 @@ func (f *FakePodControl) CreatePodsWithControllerRef(namespace string, spec *v1.
func (f *FakePodControl) CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
f.Lock()
defer f.Unlock()
f.CreateCallCount++
if f.CreateLimit != 0 && f.CreateCallCount > f.CreateLimit {
return fmt.Errorf("Not creating pod, limit %d already reached (create call %d)", f.CreateLimit, f.CreateCallCount)
}
f.Templates = append(f.Templates, *template)
f.ControllerRefs = append(f.ControllerRefs, *controllerRef)
if f.Err != nil {
@ -678,6 +703,8 @@ func (f *FakePodControl) Clear() {
f.Templates = []v1.PodTemplateSpec{}
f.ControllerRefs = []metav1.OwnerReference{}
f.Patches = [][]byte{}
f.CreateLimit = 0
f.CreateCallCount = 0
}
// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs.
@ -711,8 +738,8 @@ func (s ByLogging) Less(i, j int) bool {
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
}
// 6. older pods < newer pods < empty timestamp pods
if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) {
return afterOrZero(s[j].CreationTimestamp, s[i].CreationTimestamp)
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
return afterOrZero(&s[j].CreationTimestamp, &s[i].CreationTimestamp)
}
return false
}
@ -751,31 +778,31 @@ func (s ActivePods) Less(i, j int) bool {
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
}
// 6. Empty creation time pods < newer pods < older pods
if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) {
return afterOrZero(s[i].CreationTimestamp, s[j].CreationTimestamp)
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
return afterOrZero(&s[i].CreationTimestamp, &s[j].CreationTimestamp)
}
return false
}
// afterOrZero checks if time t1 is after time t2; if one of them
// is zero, the zero time is seen as after non-zero time.
func afterOrZero(t1, t2 metav1.Time) bool {
func afterOrZero(t1, t2 *metav1.Time) bool {
if t1.Time.IsZero() || t2.Time.IsZero() {
return t1.Time.IsZero()
}
return t1.After(t2.Time)
}
func podReadyTime(pod *v1.Pod) metav1.Time {
func podReadyTime(pod *v1.Pod) *metav1.Time {
if podutil.IsPodReady(pod) {
for _, c := range pod.Status.Conditions {
// we only care about pod ready conditions
if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
return c.LastTransitionTime
return &c.LastTransitionTime
}
}
}
return metav1.Time{}
return &metav1.Time{}
}
func maxContainerRestarts(pod *v1.Pod) int {
@ -841,10 +868,10 @@ type ControllersByCreationTimestamp []*v1.ReplicationController
func (o ControllersByCreationTimestamp) Len() int { return len(o) }
func (o ControllersByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ControllersByCreationTimestamp) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
}
// ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker.
@ -853,10 +880,10 @@ type ReplicaSetsByCreationTimestamp []*extensions.ReplicaSet
func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) }
func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
}
// ReplicaSetsBySizeOlder sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker.
@ -885,50 +912,11 @@ func (o ReplicaSetsBySizeNewer) Less(i, j int) bool {
return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas)
}
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint *v1.Taint) error {
firstTry := true
return clientretry.RetryOnConflict(UpdateTaintBackoff, func() error {
var err error
var oldNode *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
}
if err != nil {
return err
}
newNode, ok, err := v1helper.AddOrUpdateTaint(oldNode, taint)
if err != nil {
return fmt.Errorf("Failed to update taint annotation!")
}
if !ok {
return nil
}
return PatchNodeTaints(c, nodeName, oldNode, newNode)
})
}
// RemoveTaintOffNode is for cleaning up taints temporarily added to node,
// won't fail if target taint doesn't exist or has been removed.
// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue
// any API calls.
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint *v1.Taint, node *v1.Node) error {
// Short circuit for limiting amount of API calls.
if node != nil {
match := false
for i := range node.Spec.Taints {
if node.Spec.Taints[i].MatchTaint(taint) {
match = true
break
}
}
if !match {
return nil
}
// AddOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls
// to update nodes; otherwise, no API calls. Return error if any.
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
firstTry := true
return clientretry.RetryOnConflict(UpdateTaintBackoff, func() error {
@ -945,11 +933,77 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint *v1.Taint,
if err != nil {
return err
}
newNode, ok, err := v1helper.RemoveTaint(oldNode, taint)
if err != nil {
return fmt.Errorf("Failed to update taint annotation!")
var newNode *v1.Node
oldNodeCopy := oldNode
updated := false
for _, taint := range taints {
curNewNode, ok, err := taintutils.AddOrUpdateTaint(oldNodeCopy, taint)
if err != nil {
return fmt.Errorf("Failed to update taint of node!")
}
updated = updated || ok
newNode = curNewNode
oldNodeCopy = curNewNode
}
if !ok {
if !updated {
return nil
}
return PatchNodeTaints(c, nodeName, oldNode, newNode)
})
}
// RemoveTaintOffNode is for cleaning up taints temporarily added to node,
// won't fail if target taint doesn't exist or has been removed.
// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue
// any API calls.
func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
// Short circuit for limiting amount of API calls.
if node != nil {
match := false
for _, taint := range taints {
if taintutils.TaintExists(node.Spec.Taints, taint) {
match = true
break
}
}
if !match {
return nil
}
}
firstTry := true
return clientretry.RetryOnConflict(UpdateTaintBackoff, func() error {
var err error
var oldNode *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
}
if err != nil {
return err
}
var newNode *v1.Node
oldNodeCopy := oldNode
updated := false
for _, taint := range taints {
curNewNode, ok, err := taintutils.RemoveTaint(oldNodeCopy, taint)
if err != nil {
return fmt.Errorf("Failed to remove taint of node!")
}
updated = updated || ok
newNode = curNewNode
oldNodeCopy = curNewNode
}
if !updated {
return nil
}
return PatchNodeTaints(c, nodeName, oldNode, newNode)
@ -964,18 +1018,11 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
}
newTaints := newNode.Spec.Taints
objCopy, err := api.Scheme.DeepCopy(oldNode)
newNodeClone := oldNode.DeepCopy()
newNodeClone.Spec.Taints = newTaints
newData, err := json.Marshal(newNodeClone)
if err != nil {
return fmt.Errorf("failed to copy node object %#v: %v", oldNode, err)
}
newNode, ok := (objCopy).(*v1.Node)
if !ok {
return fmt.Errorf("failed to cast copy onto node object %#v: %v", newNode, err)
}
newNode.Spec.Taints = newTaints
newData, err := json.Marshal(newNode)
if err != nil {
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNode, nodeName, err)
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
@ -1003,14 +1050,14 @@ func WaitForCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs
}
// ComputeHash returns a hash value calculated from pod template and a collisionCount to avoid hash collision
func ComputeHash(template *v1.PodTemplateSpec, collisionCount *int64) uint32 {
func ComputeHash(template *v1.PodTemplateSpec, collisionCount *int32) uint32 {
podTemplateSpecHasher := fnv.New32a()
hashutil.DeepHashObject(podTemplateSpecHasher, *template)
// Add collisionCount in the hash if it exists.
if collisionCount != nil {
collisionCountBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(collisionCountBytes, uint64(*collisionCount))
binary.LittleEndian.PutUint32(collisionCountBytes, uint32(*collisionCount))
podTemplateSpecHasher.Write(collisionCountBytes)
}