Switch to github.com/golang/dep for vendoring

Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
Mrunal Patel 2017-01-31 16:45:59 -08:00
parent d6ab91be27
commit 8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions

75
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD generated vendored Normal file
View file

@ -0,0 +1,75 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"daemoncontroller.go",
"doc.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
"//plugin/pkg/scheduler/schedulercache:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/apimachinery/pkg/util/errors",
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
"//vendor:k8s.io/apimachinery/pkg/util/wait",
],
)
go_test(
name = "go_default_test",
srcs = ["daemoncontroller_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/securitycontext:go_default_library",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/runtime",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

4
vendor/k8s.io/kubernetes/pkg/controller/daemon/OWNERS generated vendored Executable file
View file

@ -0,0 +1,4 @@
reviewers:
- mikedanese
- kargakis
- lukaszo

View file

@ -0,0 +1,811 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package daemon
import (
"fmt"
"reflect"
"sort"
"sync"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/util/metrics"
"k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
"github.com/golang/glog"
)
const (
// Daemon sets will periodically check that their daemon pods are running as expected.
FullDaemonSetResyncPeriod = 30 * time.Second // TODO: Figure out if this time seems reasonable.
// Realistic value of the burstReplica field for the replication manager based off
// performance requirements for kubernetes 1.0.
BurstReplicas = 500
// If sending a status upate to API server fails, we retry a finite number of times.
StatusUpdateRetries = 1
)
// DaemonSetsController is responsible for synchronizing DaemonSet objects stored
// in the system with actual running pods.
type DaemonSetsController struct {
kubeClient clientset.Interface
eventRecorder record.EventRecorder
podControl controller.PodControlInterface
// An dsc is temporarily suspended after creating/deleting these many replicas.
// It resumes normal action after observing the watch events for them.
burstReplicas int
// To allow injection of syncDaemonSet for testing.
syncHandler func(dsKey string) error
// A TTLCache of pod creates/deletes each ds expects to see
expectations controller.ControllerExpectationsInterface
// A store of daemon sets
dsStore *cache.StoreToDaemonSetLister
// A store of pods
podStore *cache.StoreToPodLister
// A store of nodes
nodeStore *cache.StoreToNodeLister
// dsStoreSynced returns true if the daemonset store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
dsStoreSynced cache.InformerSynced
// podStoreSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podStoreSynced cache.InformerSynced
// nodeStoreSynced returns true if the node store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
nodeStoreSynced cache.InformerSynced
lookupCache *controller.MatchingCache
// DaemonSet keys that need to be synced.
queue workqueue.RateLimitingInterface
}
func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podInformer informers.PodInformer, nodeInformer informers.NodeInformer, kubeClient clientset.Interface, lookupCacheSize int) *DaemonSetsController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.Core().RESTClient().GetRateLimiter())
}
dsc := &DaemonSetsController{
kubeClient: kubeClient,
eventRecorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "daemonset-controller"}),
podControl: controller.RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "daemon-set"}),
},
burstReplicas: BurstReplicas,
expectations: controller.NewControllerExpectations(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "daemonset"),
}
daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ds := obj.(*extensions.DaemonSet)
glog.V(4).Infof("Adding daemon set %s", ds.Name)
dsc.enqueueDaemonSet(ds)
},
UpdateFunc: func(old, cur interface{}) {
oldDS := old.(*extensions.DaemonSet)
curDS := cur.(*extensions.DaemonSet)
// We should invalidate the whole lookup cache if a DS's selector has been updated.
//
// Imagine that you have two RSs:
// * old DS1
// * new DS2
// You also have a pod that is attached to DS2 (because it doesn't match DS1 selector).
// Now imagine that you are changing DS1 selector so that it is now matching that pod,
// in such case we must invalidate the whole cache so that pod could be adopted by DS1
//
// This makes the lookup cache less helpful, but selector update does not happen often,
// so it's not a big problem
if !reflect.DeepEqual(oldDS.Spec.Selector, curDS.Spec.Selector) {
dsc.lookupCache.InvalidateAll()
}
glog.V(4).Infof("Updating daemon set %s", oldDS.Name)
dsc.enqueueDaemonSet(curDS)
},
DeleteFunc: dsc.deleteDaemonset,
})
dsc.dsStore = daemonSetInformer.Lister()
dsc.dsStoreSynced = daemonSetInformer.Informer().HasSynced
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
// more pods until all the effects (expectations) of a daemon set's create/delete have been observed.
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dsc.addPod,
UpdateFunc: dsc.updatePod,
DeleteFunc: dsc.deletePod,
})
dsc.podStore = podInformer.Lister()
dsc.podStoreSynced = podInformer.Informer().HasSynced
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dsc.addNode,
UpdateFunc: dsc.updateNode,
},
)
dsc.nodeStoreSynced = nodeInformer.Informer().HasSynced
dsc.nodeStore = nodeInformer.Lister()
dsc.syncHandler = dsc.syncDaemonSet
dsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
return dsc
}
func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
ds, ok := obj.(*extensions.DaemonSet)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %#v", obj)
return
}
ds, ok = tombstone.Obj.(*extensions.DaemonSet)
if !ok {
glog.Errorf("Tombstone contained object that is not a DaemonSet %#v", obj)
return
}
}
glog.V(4).Infof("Deleting daemon set %s", ds.Name)
dsc.enqueueDaemonSet(ds)
}
// Run begins watching and syncing daemon sets.
func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer dsc.queue.ShutDown()
glog.Infof("Starting Daemon Sets controller manager")
if !cache.WaitForCacheSync(stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.dsStoreSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(dsc.runWorker, time.Second, stopCh)
}
<-stopCh
glog.Infof("Shutting down Daemon Set Controller")
}
func (dsc *DaemonSetsController) runWorker() {
for dsc.processNextWorkItem() {
}
}
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
func (dsc *DaemonSetsController) processNextWorkItem() bool {
dsKey, quit := dsc.queue.Get()
if quit {
return false
}
defer dsc.queue.Done(dsKey)
err := dsc.syncHandler(dsKey.(string))
if err == nil {
dsc.queue.Forget(dsKey)
return true
}
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
dsc.queue.AddRateLimited(dsKey)
return true
}
func (dsc *DaemonSetsController) enqueueDaemonSet(ds *extensions.DaemonSet) {
key, err := controller.KeyFunc(ds)
if err != nil {
glog.Errorf("Couldn't get key for object %#v: %v", ds, err)
return
}
// TODO: Handle overlapping controllers better. See comment in ReplicationManager.
dsc.queue.Add(key)
}
func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.DaemonSet {
// look up in the cache, if cached and the cache is valid, just return cached value
if obj, cached := dsc.lookupCache.GetMatchingObject(pod); cached {
ds, ok := obj.(*extensions.DaemonSet)
if !ok {
// This should not happen
glog.Errorf("lookup cache does not retuen a ReplicationController object")
return nil
}
if dsc.isCacheValid(pod, ds) {
return ds
}
}
sets, err := dsc.dsStore.GetPodDaemonSets(pod)
if err != nil {
glog.V(4).Infof("No daemon sets found for pod %v, daemon set controller will avoid syncing", pod.Name)
return nil
}
if len(sets) > 1 {
// More than two items in this list indicates user error. If two daemon
// sets overlap, sort by creation timestamp, subsort by name, then pick
// the first.
glog.Errorf("user error! more than one daemon is selecting pods with labels: %+v", pod.Labels)
sort.Sort(byCreationTimestamp(sets))
}
// update lookup cache
dsc.lookupCache.Update(pod, &sets[0])
return &sets[0]
}
// isCacheValid check if the cache is valid
func (dsc *DaemonSetsController) isCacheValid(pod *v1.Pod, cachedDS *extensions.DaemonSet) bool {
_, exists, err := dsc.dsStore.Get(cachedDS)
// ds has been deleted or updated, cache is invalid
if err != nil || !exists || !isDaemonSetMatch(pod, cachedDS) {
return false
}
return true
}
// isDaemonSetMatch take a Pod and DaemonSet, return whether the Pod and DaemonSet are matching
// TODO(mqliang): This logic is a copy from GetPodDaemonSets(), remove the duplication
func isDaemonSetMatch(pod *v1.Pod, ds *extensions.DaemonSet) bool {
if ds.Namespace != pod.Namespace {
return false
}
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil {
err = fmt.Errorf("invalid selector: %v", err)
return false
}
// If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything.
if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
return false
}
return true
}
func (dsc *DaemonSetsController) addPod(obj interface{}) {
pod := obj.(*v1.Pod)
glog.V(4).Infof("Pod %s added.", pod.Name)
if ds := dsc.getPodDaemonSet(pod); ds != nil {
dsKey, err := controller.KeyFunc(ds)
if err != nil {
glog.Errorf("Couldn't get key for object %#v: %v", ds, err)
return
}
dsc.expectations.CreationObserved(dsKey)
dsc.enqueueDaemonSet(ds)
}
}
// When a pod is updated, figure out what sets manage it and wake them
// up. If the labels of the pod have changed we need to awaken both the old
// and new set. old and cur must be *v1.Pod types.
func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
curPod := cur.(*v1.Pod)
oldPod := old.(*v1.Pod)
if curPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known pods.
// Two different versions of the same pod will always have different RVs.
return
}
glog.V(4).Infof("Pod %s updated.", curPod.Name)
if curDS := dsc.getPodDaemonSet(curPod); curDS != nil {
dsc.enqueueDaemonSet(curDS)
}
// If the labels have not changed, then the daemon set responsible for
// the pod is the same as it was before. In that case we have enqueued the daemon
// set above, and do not have to enqueue the set again.
if !reflect.DeepEqual(curPod.Labels, oldPod.Labels) {
// It's ok if both oldDS and curDS are the same, because curDS will set
// the expectations on its run so oldDS will have no effect.
if oldDS := dsc.getPodDaemonSet(oldPod); oldDS != nil {
dsc.enqueueDaemonSet(oldDS)
}
}
}
func (dsc *DaemonSetsController) deletePod(obj interface{}) {
pod, ok := obj.(*v1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pod
// changed labels the new daemonset will not be woken up till the periodic
// resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %#v", obj)
return
}
pod, ok = tombstone.Obj.(*v1.Pod)
if !ok {
glog.Errorf("Tombstone contained object that is not a pod %#v", obj)
return
}
}
glog.V(4).Infof("Pod %s deleted.", pod.Name)
if ds := dsc.getPodDaemonSet(pod); ds != nil {
dsKey, err := controller.KeyFunc(ds)
if err != nil {
glog.Errorf("Couldn't get key for object %#v: %v", ds, err)
return
}
dsc.expectations.DeletionObserved(dsKey)
dsc.enqueueDaemonSet(ds)
}
}
func (dsc *DaemonSetsController) addNode(obj interface{}) {
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
dsList, err := dsc.dsStore.List()
if err != nil {
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
return
}
node := obj.(*v1.Node)
for i := range dsList.Items {
ds := &dsList.Items[i]
_, shouldSchedule, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
continue
}
if shouldSchedule {
dsc.enqueueDaemonSet(ds)
}
}
}
func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
oldNode := old.(*v1.Node)
curNode := cur.(*v1.Node)
if reflect.DeepEqual(oldNode.Labels, curNode.Labels) {
// If node labels didn't change, we can ignore this update.
return
}
dsList, err := dsc.dsStore.List()
if err != nil {
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
return
}
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
for i := range dsList.Items {
ds := &dsList.Items[i]
_, oldShouldSchedule, oldShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(oldNode, ds)
if err != nil {
continue
}
_, currentShouldSchedule, currentShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(curNode, ds)
if err != nil {
continue
}
if (oldShouldSchedule != currentShouldSchedule) || (oldShouldContinueRunning != currentShouldContinueRunning) {
dsc.enqueueDaemonSet(ds)
}
}
}
// getNodesToDaemonSetPods returns a map from nodes to daemon pods (corresponding to ds) running on the nodes.
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*v1.Pod, error) {
nodeToDaemonPods := make(map[string][]*v1.Pod)
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil {
return nil, err
}
daemonPods, err := dsc.podStore.Pods(ds.Namespace).List(selector)
if err != nil {
return nodeToDaemonPods, err
}
for i := range daemonPods {
// TODO: Do we need to copy here?
daemonPod := &(*daemonPods[i])
nodeName := daemonPod.Spec.NodeName
nodeToDaemonPods[nodeName] = append(nodeToDaemonPods[nodeName], daemonPod)
}
return nodeToDaemonPods, nil
}
func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
// Find out which nodes are running the daemon pods selected by ds.
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
}
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
nodeList, err := dsc.nodeStore.List()
if err != nil {
return fmt.Errorf("couldn't get list of nodes when syncing daemon set %#v: %v", ds, err)
}
var nodesNeedingDaemonPods, podsToDelete []string
for _, node := range nodeList.Items {
_, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(&node, ds)
if err != nil {
continue
}
daemonPods, isRunning := nodeToDaemonPods[node.Name]
switch {
case shouldSchedule && !isRunning:
// If daemon pod is supposed to be running on node, but isn't, create daemon pod.
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, node.Name)
case shouldContinueRunning && len(daemonPods) > 1:
// If daemon pod is supposed to be running on node, but more than 1 daemon pod is running, delete the excess daemon pods.
// Sort the daemon pods by creation time, so the the oldest is preserved.
sort.Sort(podByCreationTimestamp(daemonPods))
for i := 1; i < len(daemonPods); i++ {
podsToDelete = append(podsToDelete, daemonPods[i].Name)
}
case !shouldContinueRunning && isRunning:
// If daemon pod isn't supposed to run on node, but it is, delete all daemon pods on node.
for i := range daemonPods {
podsToDelete = append(podsToDelete, daemonPods[i].Name)
}
}
}
// We need to set expectations before creating/deleting pods to avoid race conditions.
dsKey, err := controller.KeyFunc(ds)
if err != nil {
return fmt.Errorf("couldn't get key for object %#v: %v", ds, err)
}
createDiff := len(nodesNeedingDaemonPods)
deleteDiff := len(podsToDelete)
if createDiff > dsc.burstReplicas {
createDiff = dsc.burstReplicas
}
if deleteDiff > dsc.burstReplicas {
deleteDiff = dsc.burstReplicas
}
dsc.expectations.SetExpectations(dsKey, createDiff, deleteDiff)
// error channel to communicate back failures. make the buffer big enough to avoid any blocking
errCh := make(chan error, createDiff+deleteDiff)
glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
createWait := sync.WaitGroup{}
createWait.Add(createDiff)
for i := 0; i < createDiff; i++ {
go func(ix int) {
defer createWait.Done()
if err := dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, &ds.Spec.Template, ds); err != nil {
glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
dsc.expectations.CreationObserved(dsKey)
errCh <- err
utilruntime.HandleError(err)
}
}(i)
}
createWait.Wait()
glog.V(4).Infof("Pods to delete for daemon set %s: %+v, deleting %d", ds.Name, podsToDelete, deleteDiff)
deleteWait := sync.WaitGroup{}
deleteWait.Add(deleteDiff)
for i := 0; i < deleteDiff; i++ {
go func(ix int) {
defer deleteWait.Done()
if err := dsc.podControl.DeletePod(ds.Namespace, podsToDelete[ix], ds); err != nil {
glog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
dsc.expectations.DeletionObserved(dsKey)
errCh <- err
utilruntime.HandleError(err)
}
}(i)
}
deleteWait.Wait()
// collect errors if any for proper reporting/retry logic in the controller
errors := []error{}
close(errCh)
for err := range errCh {
errors = append(errors, err)
}
return utilerrors.NewAggregate(errors)
}
func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int) error {
if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled &&
int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled &&
int(ds.Status.NumberMisscheduled) == numberMisscheduled &&
int(ds.Status.NumberReady) == numberReady &&
ds.Status.ObservedGeneration >= ds.Generation {
return nil
}
clone, err := api.Scheme.DeepCopy(ds)
if err != nil {
return err
}
toUpdate := clone.(*extensions.DaemonSet)
var updateErr, getErr error
for i := 0; i < StatusUpdateRetries; i++ {
toUpdate.Status.ObservedGeneration = ds.Generation
toUpdate.Status.DesiredNumberScheduled = int32(desiredNumberScheduled)
toUpdate.Status.CurrentNumberScheduled = int32(currentNumberScheduled)
toUpdate.Status.NumberMisscheduled = int32(numberMisscheduled)
toUpdate.Status.NumberReady = int32(numberReady)
if _, updateErr = dsClient.UpdateStatus(toUpdate); updateErr == nil {
return nil
}
// Update the set with the latest resource version for the next poll
if toUpdate, getErr = dsClient.Get(ds.Name, metav1.GetOptions{}); getErr != nil {
// If the GET fails we can't trust status.Replicas anymore. This error
// is bound to be more interesting than the update failure.
return getErr
}
}
return updateErr
}
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet) error {
glog.V(4).Infof("Updating daemon set status")
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
}
nodeList, err := dsc.nodeStore.List()
if err != nil {
return fmt.Errorf("couldn't get list of nodes when updating daemon set %#v: %v", ds, err)
}
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int
for _, node := range nodeList.Items {
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(&node, ds)
if err != nil {
return err
}
scheduled := len(nodeToDaemonPods[node.Name]) > 0
if wantToRun {
desiredNumberScheduled++
if scheduled {
currentNumberScheduled++
// Sort the daemon pods by creation time, so the the oldest is first.
daemonPods, _ := nodeToDaemonPods[node.Name]
sort.Sort(podByCreationTimestamp(daemonPods))
if v1.IsPodReady(daemonPods[0]) {
numberReady++
}
}
} else {
if scheduled {
numberMisscheduled++
}
}
}
err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady)
if err != nil {
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
}
return nil
}
func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime))
}()
obj, exists, err := dsc.dsStore.Store.GetByKey(key)
if err != nil {
return fmt.Errorf("unable to retrieve ds %v from store: %v", key, err)
}
if !exists {
glog.V(3).Infof("daemon set has been deleted %v", key)
dsc.expectations.DeleteExpectations(key)
return nil
}
ds := obj.(*extensions.DaemonSet)
everything := metav1.LabelSelector{}
if reflect.DeepEqual(ds.Spec.Selector, &everything) {
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, "SelectingAll", "This daemon set is selecting all pods. A non-empty selector is required.")
return nil
}
// Don't process a daemon set until all its creations and deletions have been processed.
// For example if daemon set foo asked for 3 new daemon pods in the previous call to manage,
// then we do not want to call manage on foo until the daemon pods have been created.
dsKey, err := controller.KeyFunc(ds)
if err != nil {
return fmt.Errorf("couldn't get key for object %#v: %v", ds, err)
}
dsNeedsSync := dsc.expectations.SatisfiedExpectations(dsKey)
if dsNeedsSync && ds.DeletionTimestamp == nil {
if err := dsc.manage(ds); err != nil {
return err
}
}
return dsc.updateDaemonSetStatus(ds)
}
// nodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a
// summary. Returned booleans are:
// * wantToRun:
// Returns true when a user would expect a pod to run on this node and ignores conditions
// such as OutOfDisk or insufficent resource that would cause a daemonset pod not to schedule.
// This is primarily used to populate daemonset status.
// * shouldSchedule:
// Returns true when a daemonset should be scheduled to a node if a daemonset pod is not already
// running on that node.
// * shouldContinueRunning:
// Returns true when a daemonset should continue running on a node if a daemonset pod is already
// running on that node.
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *extensions.DaemonSet) (wantToRun, shouldSchedule, shouldContinueRunning bool, err error) {
// Because these bools require an && of all their required conditions, we start
// with all bools set to true and set a bool to false if a condition is not met.
// A bool should probably not be set to true after this line.
wantToRun, shouldSchedule, shouldContinueRunning = true, true, true
// If the daemon set specifies a node name, check that it matches with node.Name.
if !(ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name) {
return false, false, false, nil
}
// TODO: Move it to the predicates
for _, c := range node.Status.Conditions {
if c.Type == v1.NodeOutOfDisk && c.Status == v1.ConditionTrue {
// the kubelet will evict this pod if it needs to. Let kubelet
// decide whether to continue running this pod so leave shouldContinueRunning
// set to true
shouldSchedule = false
}
}
newPod := &v1.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta}
newPod.Namespace = ds.Namespace
newPod.Spec.NodeName = node.Name
pods := []*v1.Pod{}
for _, m := range dsc.podStore.Indexer.List() {
pod := m.(*v1.Pod)
if pod.Spec.NodeName != node.Name {
continue
}
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
continue
}
// ignore pods that belong to the daemonset when taking into account whether
// a daemonset should bind to a node.
if pds := dsc.getPodDaemonSet(pod); pds != nil && ds.Name == pds.Name {
continue
}
pods = append(pods, pod)
}
nodeInfo := schedulercache.NewNodeInfo(pods...)
nodeInfo.SetNode(node)
_, reasons, err := predicates.GeneralPredicates(newPod, nil, nodeInfo)
if err != nil {
glog.Warningf("GeneralPredicates failed on ds '%s/%s' due to unexpected error: %v", ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err)
return false, false, false, err
}
for _, r := range reasons {
glog.V(4).Infof("GeneralPredicates failed on ds '%s/%s' for reason: %v", ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
switch reason := r.(type) {
case *predicates.InsufficientResourceError:
dsc.eventRecorder.Eventf(ds, v1.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: %s", node.ObjectMeta.Name, reason.Error())
shouldSchedule = false
case *predicates.PredicateFailureError:
var emitEvent bool
// we try to partition predicates into two partitions here: intentional on the part of the operator and not.
switch reason {
// intentional
case
predicates.ErrNodeSelectorNotMatch,
predicates.ErrPodNotMatchHostName,
predicates.ErrNodeLabelPresenceViolated,
// this one is probably intentional since it's a workaround for not having
// pod hard anti affinity.
predicates.ErrPodNotFitsHostPorts:
wantToRun, shouldSchedule, shouldContinueRunning = false, false, false
// unintentional
case
predicates.ErrDiskConflict,
predicates.ErrVolumeZoneConflict,
predicates.ErrMaxVolumeCountExceeded,
predicates.ErrNodeUnderMemoryPressure,
predicates.ErrNodeUnderDiskPressure:
// wantToRun and shouldContinueRunning are likely true here. They are
// absolutely true at the time of writing the comment. See first comment
// of this method.
shouldSchedule = false
emitEvent = true
// unexpected
case
predicates.ErrPodAffinityNotMatch,
predicates.ErrServiceAffinityViolated,
predicates.ErrTaintsTolerationsNotMatch:
return false, false, false, fmt.Errorf("unexpected reason: GeneralPredicates should not return reason %s", reason.GetReason())
default:
glog.V(4).Infof("unknownd predicate failure reason: %s", reason.GetReason())
wantToRun, shouldSchedule, shouldContinueRunning = false, false, false
emitEvent = true
}
if emitEvent {
dsc.eventRecorder.Eventf(ds, v1.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: %s", node.ObjectMeta.Name, reason.GetReason())
}
}
}
return
}
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
type byCreationTimestamp []extensions.DaemonSet
func (o byCreationTimestamp) Len() int { return len(o) }
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byCreationTimestamp) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
}
type podByCreationTimestamp []*v1.Pod
func (o podByCreationTimestamp) Len() int { return len(o) }
func (o podByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o podByCreationTimestamp) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
}

View file

@ -0,0 +1,769 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package daemon
import (
"fmt"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/securitycontext"
)
var (
simpleDaemonSetLabel = map[string]string{"name": "simple-daemon", "type": "production"}
simpleDaemonSetLabel2 = map[string]string{"name": "simple-daemon", "type": "test"}
simpleNodeLabel = map[string]string{"color": "blue", "speed": "fast"}
simpleNodeLabel2 = map[string]string{"color": "red", "speed": "fast"}
alwaysReady = func() bool { return true }
)
func getKey(ds *extensions.DaemonSet, t *testing.T) string {
if key, err := controller.KeyFunc(ds); err != nil {
t.Errorf("Unexpected error getting key for ds %v: %v", ds.Name, err)
return ""
} else {
return key
}
}
func newDaemonSet(name string) *extensions.DaemonSet {
return &extensions.DaemonSet{
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: v1.NamespaceDefault,
},
Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: simpleDaemonSetLabel,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "foo/bar",
TerminationMessagePath: v1.TerminationMessagePathDefault,
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
},
},
DNSPolicy: v1.DNSDefault,
},
},
},
}
}
func newNode(name string, label map[string]string) *v1.Node {
return &v1.Node{
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: label,
Namespace: v1.NamespaceDefault,
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{Type: v1.NodeReady, Status: v1.ConditionTrue},
},
Allocatable: v1.ResourceList{
v1.ResourcePods: resource.MustParse("100"),
},
},
}
}
func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]string) {
for i := startIndex; i < startIndex+numNodes; i++ {
nodeStore.Add(newNode(fmt.Sprintf("node-%d", i), label))
}
}
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: v1.ObjectMeta{
GenerateName: podName,
Labels: label,
Namespace: v1.NamespaceDefault,
},
Spec: v1.PodSpec{
NodeName: nodeName,
Containers: []v1.Container{
{
Image: "foo/bar",
TerminationMessagePath: v1.TerminationMessagePathDefault,
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
},
},
DNSPolicy: v1.DNSDefault,
},
}
v1.GenerateName(v1.SimpleNameGenerator, &pod.ObjectMeta)
return pod
}
func addPods(podStore cache.Store, nodeName string, label map[string]string, number int) {
for i := 0; i < number; i++ {
podStore.Add(newPod(fmt.Sprintf("%s-", nodeName), nodeName, label))
}
}
func newTestController(initialObjects ...runtime.Object) (*DaemonSetsController, *controller.FakePodControl, *fake.Clientset) {
clientset := fake.NewSimpleClientset(initialObjects...)
informerFactory := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc())
manager := NewDaemonSetsController(informerFactory.DaemonSets(), informerFactory.Pods(), informerFactory.Nodes(), clientset, 0)
manager.podStoreSynced = alwaysReady
manager.nodeStoreSynced = alwaysReady
manager.dsStoreSynced = alwaysReady
podControl := &controller.FakePodControl{}
manager.podControl = podControl
return manager, podControl, clientset
}
func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
if len(fakePodControl.Templates) != expectedCreates {
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates))
}
if len(fakePodControl.DeletePodName) != expectedDeletes {
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", expectedDeletes, len(fakePodControl.DeletePodName))
}
}
func syncAndValidateDaemonSets(t *testing.T, manager *DaemonSetsController, ds *extensions.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
key, err := controller.KeyFunc(ds)
if err != nil {
t.Errorf("Could not get key for daemon.")
}
manager.syncHandler(key)
validateSyncDaemonSets(t, podControl, expectedCreates, expectedDeletes)
}
func TestDeleteFinalStateUnknown(t *testing.T) {
manager, _, _ := newTestController()
addNodes(manager.nodeStore.Store, 0, 1, nil)
ds := newDaemonSet("foo")
// DeletedFinalStateUnknown should queue the embedded DS if found.
manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds})
enqueuedKey, _ := manager.queue.Get()
if enqueuedKey.(string) != "default/foo" {
t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey)
}
}
// DaemonSets without node selectors should launch pods on every node.
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore.Store, 0, 5, nil)
ds := newDaemonSet("foo")
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
}
// DaemonSets should do nothing if there aren't any nodes
func TestNoNodesDoesNothing(t *testing.T) {
manager, podControl, _ := newTestController()
ds := newDaemonSet("foo")
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
}
// DaemonSets without node selectors should launch on a single node in a
// single node cluster.
func TestOneNodeDaemonLaunchesPod(t *testing.T) {
manager, podControl, _ := newTestController()
manager.nodeStore.Add(newNode("only-node", nil))
ds := newDaemonSet("foo")
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
}
// DaemonSets should place onto NotReady nodes
func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
manager, podControl, _ := newTestController()
node := newNode("not-ready", nil)
node.Status.Conditions = []v1.NodeCondition{
{Type: v1.NodeReady, Status: v1.ConditionFalse},
}
manager.nodeStore.Add(node)
ds := newDaemonSet("foo")
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
}
// DaemonSets should not place onto OutOfDisk nodes
func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) {
manager, podControl, _ := newTestController()
node := newNode("not-enough-disk", nil)
node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
manager.nodeStore.Add(node)
ds := newDaemonSet("foo")
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
}
func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
return v1.PodSpec{
NodeName: nodeName,
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: allocatableResources(memory, cpu),
},
}},
}
}
func allocatableResources(memory, cpu string) v1.ResourceList {
return v1.ResourceList{
v1.ResourceMemory: resource.MustParse(memory),
v1.ResourceCPU: resource.MustParse(cpu),
v1.ResourcePods: resource.MustParse("100"),
}
}
// DaemonSets should not place onto nodes with insufficient free resource
func TestInsufficentCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
manager, podControl, _ := newTestController()
node := newNode("too-much-mem", nil)
node.Status.Allocatable = allocatableResources("100M", "200m")
manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&v1.Pod{
Spec: podSpec,
})
ds := newDaemonSet("foo")
ds.Spec.Template.Spec = podSpec
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
}
// DaemonSets should not unschedule a daemonset pod from a node with insufficient free resource
func TestInsufficentCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) {
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
podSpec.NodeName = "too-much-mem"
manager, podControl, _ := newTestController()
node := newNode("too-much-mem", nil)
node.Status.Allocatable = allocatableResources("100M", "200m")
manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&v1.Pod{
Spec: podSpec,
})
ds := newDaemonSet("foo")
ds.Spec.Template.Spec = podSpec
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
}
func TestSufficentCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
manager, podControl, _ := newTestController()
node := newNode("too-much-mem", nil)
node.Status.Allocatable = allocatableResources("100M", "200m")
manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&v1.Pod{
Spec: podSpec,
Status: v1.PodStatus{Phase: v1.PodSucceeded},
})
ds := newDaemonSet("foo")
ds.Spec.Template.Spec = podSpec
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
}
// DaemonSets should place onto nodes with sufficient free resource
func TestSufficentCapacityNodeDaemonLaunchesPod(t *testing.T) {
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
manager, podControl, _ := newTestController()
node := newNode("not-too-much-mem", nil)
node.Status.Allocatable = allocatableResources("200M", "200m")
manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&v1.Pod{
Spec: podSpec,
})
ds := newDaemonSet("foo")
ds.Spec.Template.Spec = podSpec
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
}
// DaemonSets not take any actions when being deleted
func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
manager, podControl, _ := newTestController()
node := newNode("not-too-much-mem", nil)
node.Status.Allocatable = allocatableResources("200M", "200m")
manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&v1.Pod{
Spec: podSpec,
})
ds := newDaemonSet("foo")
ds.Spec.Template.Spec = podSpec
now := metav1.Now()
ds.DeletionTimestamp = &now
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
}
// DaemonSets should not place onto nodes that would cause port conflicts
func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
podSpec := v1.PodSpec{
NodeName: "port-conflict",
Containers: []v1.Container{{
Ports: []v1.ContainerPort{{
HostPort: 666,
}},
}},
}
manager, podControl, _ := newTestController()
node := newNode("port-conflict", nil)
manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&v1.Pod{
Spec: podSpec,
})
ds := newDaemonSet("foo")
ds.Spec.Template.Spec = podSpec
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
}
// Test that if the node is already scheduled with a pod using a host port
// but belonging to the same daemonset, we don't delete that pod
//
// Issue: https://github.com/kubernetes/kubernetes/issues/22309
func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
podSpec := v1.PodSpec{
NodeName: "port-conflict",
Containers: []v1.Container{{
Ports: []v1.ContainerPort{{
HostPort: 666,
}},
}},
}
manager, podControl, _ := newTestController()
node := newNode("port-conflict", nil)
manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Labels: simpleDaemonSetLabel,
Namespace: v1.NamespaceDefault,
},
Spec: podSpec,
})
ds := newDaemonSet("foo")
ds.Spec.Template.Spec = podSpec
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
}
// DaemonSets should place onto nodes that would not cause port conflicts
func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
podSpec1 := v1.PodSpec{
NodeName: "no-port-conflict",
Containers: []v1.Container{{
Ports: []v1.ContainerPort{{
HostPort: 6661,
}},
}},
}
podSpec2 := v1.PodSpec{
NodeName: "no-port-conflict",
Containers: []v1.Container{{
Ports: []v1.ContainerPort{{
HostPort: 6662,
}},
}},
}
manager, podControl, _ := newTestController()
node := newNode("no-port-conflict", nil)
manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&v1.Pod{
Spec: podSpec1,
})
ds := newDaemonSet("foo")
ds.Spec.Template.Spec = podSpec2
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
}
// DaemonSetController should not sync DaemonSets with empty pod selectors.
//
// issue https://github.com/kubernetes/kubernetes/pull/23223
func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
manager, podControl, _ := newTestController()
manager.nodeStore.Store.Add(newNode("node1", nil))
// Create pod not controlled by a daemonset.
manager.podStore.Indexer.Add(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"bang": "boom"},
Namespace: v1.NamespaceDefault,
},
Spec: v1.PodSpec{
NodeName: "node1",
},
})
// Create a misconfigured DaemonSet. An empty pod selector is invalid but could happen
// if we upgrade and make a backwards incompatible change.
//
// The node selector matches no nodes which mimics the behavior of kubectl delete.
//
// The DaemonSet should not schedule pods and should not delete scheduled pods in
// this case even though it's empty pod selector matches all pods. The DaemonSetController
// should detect this misconfiguration and choose not to sync the DaemonSet. We should
// not observe a deletion of the pod on node1.
ds := newDaemonSet("foo")
ls := metav1.LabelSelector{}
ds.Spec.Selector = &ls
ds.Spec.Template.Spec.NodeSelector = map[string]string{"foo": "bar"}
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
}
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
func TestDealsWithExistingPods(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore.Store, 0, 5, nil)
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 1)
addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 2)
addPods(manager.podStore.Indexer, "node-3", simpleDaemonSetLabel, 5)
addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel2, 2)
ds := newDaemonSet("foo")
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5)
}
// Daemon with node selector should launch pods on nodes matching selector.
func TestSelectorDaemonLaunchesPods(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore.Store, 0, 4, nil)
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
daemon := newDaemonSet("foo")
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
manager.dsStore.Add(daemon)
syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0)
}
// Daemon with node selector should delete pods from nodes that do not satisfy selector.
func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore.Store, 0, 5, nil)
addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel)
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel2, 2)
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3)
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 1)
addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel, 1)
daemon := newDaemonSet("foo")
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
manager.dsStore.Add(daemon)
syncAndValidateDaemonSets(t, manager, daemon, podControl, 5, 4)
}
// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore.Store, 0, 5, nil)
addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel)
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1)
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3)
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 2)
addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 4)
addPods(manager.podStore.Indexer, "node-6", simpleDaemonSetLabel, 13)
addPods(manager.podStore.Indexer, "node-7", simpleDaemonSetLabel2, 4)
addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel, 1)
addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel2, 1)
ds := newDaemonSet("foo")
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20)
}
// DaemonSet with node selector which does not match any node labels should not launch pods.
func TestBadSelectorDaemonDoesNothing(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore.Store, 0, 4, nil)
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
ds := newDaemonSet("foo")
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
}
// DaemonSet with node name should launch pod on node with corresponding name.
func TestNameDaemonSetLaunchesPods(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore.Store, 0, 5, nil)
ds := newDaemonSet("foo")
ds.Spec.Template.Spec.NodeName = "node-0"
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
}
// DaemonSet with node name that does not exist should not launch pods.
func TestBadNameDaemonSetDoesNothing(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore.Store, 0, 5, nil)
ds := newDaemonSet("foo")
ds.Spec.Template.Spec.NodeName = "node-10"
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
}
// DaemonSet with node selector, and node name, matching a node, should launch a pod on the node.
func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore.Store, 0, 4, nil)
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
ds := newDaemonSet("foo")
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
ds.Spec.Template.Spec.NodeName = "node-6"
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
}
// DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing.
func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore.Store, 0, 4, nil)
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
ds := newDaemonSet("foo")
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
ds.Spec.Template.Spec.NodeName = "node-0"
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
}
// Daemon with node affinity should launch pods on nodes matching affinity.
func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
manager, podControl, _ := newTestController()
addNodes(manager.nodeStore.Store, 0, 4, nil)
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
daemon := newDaemonSet("foo")
daemon.Spec.Template.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "color",
Operator: v1.NodeSelectorOpIn,
Values: []string{simpleNodeLabel["color"]},
},
},
},
},
},
},
}
manager.dsStore.Add(daemon)
syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0)
}
func TestNumberReadyStatus(t *testing.T) {
daemon := newDaemonSet("foo")
manager, podControl, clientset := newTestController()
var updated *extensions.DaemonSet
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
if action.GetSubresource() != "status" {
return false, nil, nil
}
if u, ok := action.(core.UpdateAction); ok {
updated = u.GetObject().(*extensions.DaemonSet)
}
return false, nil, nil
})
addNodes(manager.nodeStore.Store, 0, 2, simpleNodeLabel)
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1)
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 1)
manager.dsStore.Add(daemon)
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
if updated.Status.NumberReady != 0 {
t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
}
selector, _ := metav1.LabelSelectorAsSelector(daemon.Spec.Selector)
daemonPods, _ := manager.podStore.Pods(daemon.Namespace).List(selector)
for _, pod := range daemonPods {
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
pod.Status.Conditions = append(pod.Status.Conditions, condition)
}
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
if updated.Status.NumberReady != 2 {
t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
}
}
func TestObservedGeneration(t *testing.T) {
daemon := newDaemonSet("foo")
daemon.Generation = 1
manager, podControl, clientset := newTestController()
var updated *extensions.DaemonSet
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
if action.GetSubresource() != "status" {
return false, nil, nil
}
if u, ok := action.(core.UpdateAction); ok {
updated = u.GetObject().(*extensions.DaemonSet)
}
return false, nil, nil
})
addNodes(manager.nodeStore.Store, 0, 1, simpleNodeLabel)
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1)
manager.dsStore.Add(daemon)
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
if updated.Status.ObservedGeneration != daemon.Generation {
t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, daemon.Generation, updated.Status.ObservedGeneration)
}
}
func TestNodeShouldRunDaemonPod(t *testing.T) {
cases := []struct {
podsOnNode []*v1.Pod
ds *extensions.DaemonSet
wantToRun, shouldSchedule, shouldContinueRunning bool
err error
}{
{
ds: &extensions.DaemonSet{
Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: simpleDaemonSetLabel,
},
Spec: resourcePodSpec("", "50M", "0.5"),
},
},
},
wantToRun: true,
shouldSchedule: true,
shouldContinueRunning: true,
},
{
ds: &extensions.DaemonSet{
Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: simpleDaemonSetLabel,
},
Spec: resourcePodSpec("", "200M", "0.5"),
},
},
},
wantToRun: true,
shouldSchedule: false,
shouldContinueRunning: true,
},
{
ds: &extensions.DaemonSet{
Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: simpleDaemonSetLabel,
},
Spec: resourcePodSpec("other-node", "50M", "0.5"),
},
},
},
wantToRun: false,
shouldSchedule: false,
shouldContinueRunning: false,
},
{
podsOnNode: []*v1.Pod{
{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Ports: []v1.ContainerPort{{
HostPort: 666,
}},
}},
},
},
},
ds: &extensions.DaemonSet{
Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: simpleDaemonSetLabel,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Ports: []v1.ContainerPort{{
HostPort: 666,
}},
}},
},
},
},
},
wantToRun: false,
shouldSchedule: false,
shouldContinueRunning: false,
},
}
for i, c := range cases {
node := newNode("test-node", nil)
node.Status.Allocatable = allocatableResources("100M", "1")
manager, _, _ := newTestController()
manager.nodeStore.Store.Add(node)
for _, p := range c.podsOnNode {
manager.podStore.Indexer.Add(p)
p.Spec.NodeName = "test-node"
}
wantToRun, shouldSchedule, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds)
if wantToRun != c.wantToRun {
t.Errorf("[%v] expected wantToRun: %v, got: %v", i, c.wantToRun, wantToRun)
}
if shouldSchedule != c.shouldSchedule {
t.Errorf("[%v] expected shouldSchedule: %v, got: %v", i, c.shouldSchedule, shouldSchedule)
}
if shouldContinueRunning != c.shouldContinueRunning {
t.Errorf("[%v] expected shouldContinueRunning: %v, got: %v", i, c.shouldContinueRunning, shouldContinueRunning)
}
if err != c.err {
t.Errorf("[%v] expected err: %v, got: %v", i, c.err, err)
}
}
}

19
vendor/k8s.io/kubernetes/pkg/controller/daemon/doc.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package daemon contains logic for watching and synchronizing
// daemons.
package daemon // import "k8s.io/kubernetes/pkg/controller/daemon"