Vendor: Update k8s version
Signed-off-by: Michał Żyłowski <michal.zylowski@intel.com>
This commit is contained in:
parent
dfa93414c5
commit
52baf68d50
3756 changed files with 113013 additions and 92675 deletions
7
vendor/k8s.io/kubernetes/pkg/controller/node/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/node/BUILD
generated
vendored
|
@ -26,12 +26,11 @@ go_library(
|
|||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
|
@ -40,12 +39,14 @@ go_library(
|
|||
"//vendor:github.com/prometheus/client_golang/prometheus",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -73,7 +74,6 @@ go_test(
|
|||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/controller/node/testutil:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
|
@ -81,6 +81,7 @@ go_test(
|
|||
"//vendor:k8s.io/apimachinery/pkg/util/diff",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
15
vendor/k8s.io/kubernetes/pkg/controller/node/cidr_allocator_test.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/controller/node/cidr_allocator_test.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
|
@ -55,7 +56,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
|
@ -75,7 +76,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
|
@ -99,7 +100,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
|
@ -185,7 +186,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
|
@ -268,7 +269,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
|
@ -291,7 +292,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
|
@ -359,7 +360,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
|||
|
||||
for _, cidrToRelease := range tc.cidrsToRelease {
|
||||
nodeToRelease := v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
}
|
||||
|
|
16
vendor/k8s.io/kubernetes/pkg/controller/node/controller_utils.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/controller/node/controller_utils.go
generated
vendored
|
@ -21,6 +21,8 @@ import (
|
|||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
@ -28,9 +30,9 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
|
@ -45,11 +47,11 @@ const (
|
|||
|
||||
// deletePods will delete all pods from master running on given node, and return true
|
||||
// if any pods were deleted, or were found pending deletion.
|
||||
func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore cache.StoreToDaemonSetLister) (bool, error) {
|
||||
func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore listers.StoreToDaemonSetLister) (bool, error) {
|
||||
remaining := false
|
||||
selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String()
|
||||
options := v1.ListOptions{FieldSelector: selector}
|
||||
pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(options)
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
pods, err := kubeClient.Core().Pods(metav1.NamespaceAll).List(options)
|
||||
var updateErrList []error
|
||||
|
||||
if err != nil {
|
||||
|
@ -120,7 +122,7 @@ func setPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
|
|||
func forcefullyDeletePod(c clientset.Interface, pod *v1.Pod) error {
|
||||
var zero int64
|
||||
glog.Infof("NodeController is force deleting Pod: %v:%v", pod.Namespace, pod.Name)
|
||||
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
if err == nil {
|
||||
glog.V(4).Infof("forceful deletion of %s succeeded", pod.Name)
|
||||
}
|
||||
|
@ -202,8 +204,8 @@ func markAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
|
|||
}
|
||||
nodeName := node.Name
|
||||
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
|
||||
opts := v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
|
||||
pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(opts)
|
||||
opts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
|
||||
pods, err := kubeClient.Core().Pods(metav1.NamespaceAll).List(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
13
vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go
generated
vendored
|
@ -25,20 +25,21 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
|
@ -135,9 +136,9 @@ type NodeController struct {
|
|||
nodeInformer informers.NodeInformer
|
||||
daemonSetInformer informers.DaemonSetInformer
|
||||
|
||||
podStore cache.StoreToPodLister
|
||||
nodeStore cache.StoreToNodeLister
|
||||
daemonSetStore cache.StoreToDaemonSetLister
|
||||
podStore listers.StoreToPodLister
|
||||
nodeStore listers.StoreToNodeLister
|
||||
daemonSetStore listers.StoreToDaemonSetLister
|
||||
// allocate/recycle CIDRs for node if allocateNodeCIDRs == true
|
||||
cidrAllocator CIDRAllocator
|
||||
|
||||
|
@ -252,7 +253,7 @@ func NewNodeController(
|
|||
// We must poll because apiserver might not be up. This error causes
|
||||
// controller manager to restart.
|
||||
if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) {
|
||||
nodeList, err = kubeClient.Core().Nodes().List(v1.ListOptions{
|
||||
nodeList, err = kubeClient.Core().Nodes().List(metav1.ListOptions{
|
||||
FieldSelector: fields.Everything().String(),
|
||||
LabelSelector: labels.Everything().String(),
|
||||
})
|
||||
|
|
116
vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller_test.go
generated
vendored
116
vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller_test.go
generated
vendored
|
@ -80,7 +80,7 @@ func NewNodeControllerFromClient(
|
|||
}
|
||||
|
||||
func syncNodeStore(nc *NodeController, fakeNodeHandler *testutil.FakeNodeHandler) error {
|
||||
nodes, err := fakeNodeHandler.List(v1.ListOptions{})
|
||||
nodes, err := fakeNodeHandler.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: fakeNow,
|
||||
Labels: map[string]string{
|
||||
|
@ -134,7 +134,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -168,7 +168,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -188,7 +188,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -232,7 +232,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -252,7 +252,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -276,7 +276,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
&v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod0",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{"daemon": "yes"},
|
||||
|
@ -291,7 +291,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
daemonSets: []extensions.DaemonSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ds0",
|
||||
Namespace: "default",
|
||||
},
|
||||
|
@ -323,7 +323,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -343,7 +343,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -387,7 +387,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -407,7 +407,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -451,7 +451,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -471,7 +471,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -594,7 +594,7 @@ func TestPodStatusChange(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -614,7 +614,7 @@ func TestPodStatusChange(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -749,7 +749,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
{
|
||||
nodeList: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -769,7 +769,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -804,7 +804,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
{
|
||||
nodeList: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -824,7 +824,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -866,7 +866,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
{
|
||||
nodeList: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -886,7 +886,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -927,7 +927,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
{
|
||||
nodeList: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -947,7 +947,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-master",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -986,7 +986,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
{
|
||||
nodeList: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1006,7 +1006,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1048,7 +1048,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
{
|
||||
nodeList: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1068,7 +1068,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1088,7 +1088,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1108,7 +1108,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node3",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1128,7 +1128,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node4",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1245,7 +1245,7 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
|
|||
fnh := &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1309,7 +1309,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1320,7 +1320,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
expectedRequestCount: 2, // List+Update
|
||||
expectedNodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1353,7 +1353,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: fakeNow,
|
||||
},
|
||||
|
@ -1370,7 +1370,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1429,7 +1429,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
},
|
||||
expectedNodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1469,7 +1469,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1547,7 +1547,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: fakeNow,
|
||||
},
|
||||
|
@ -1563,7 +1563,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1597,7 +1597,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1667,7 +1667,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1772,7 +1772,7 @@ func TestNodeEventGeneration(t *testing.T) {
|
|||
fakeNodeHandler := &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
UID: "1234567890",
|
||||
CreationTimestamp: metav1.Date(2015, 8, 10, 0, 0, 0, 0, time.UTC),
|
||||
|
@ -1839,70 +1839,70 @@ func TestCheckPod(t *testing.T) {
|
|||
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: nil},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: nil},
|
||||
Spec: v1.PodSpec{NodeName: "new"},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: nil},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: nil},
|
||||
Spec: v1.PodSpec{NodeName: "old"},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: nil},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: nil},
|
||||
Spec: v1.PodSpec{NodeName: ""},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: nil},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: nil},
|
||||
Spec: v1.PodSpec{NodeName: "nonexistant"},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "new"},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "old"},
|
||||
},
|
||||
prune: true,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "older"},
|
||||
},
|
||||
prune: true,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "oldest"},
|
||||
},
|
||||
prune: true,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: ""},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "nonexistant"},
|
||||
},
|
||||
prune: false,
|
||||
|
@ -1912,7 +1912,7 @@ func TestCheckPod(t *testing.T) {
|
|||
nc, _ := NewNodeControllerFromClient(nil, fake.NewSimpleClientset(), 0, 0, 0, 0, 0, 0, 0, 0, nil, nil, 0, false)
|
||||
nc.nodeStore.Store = cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
nc.nodeStore.Store.Add(&v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "new",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
|
@ -1922,7 +1922,7 @@ func TestCheckPod(t *testing.T) {
|
|||
},
|
||||
})
|
||||
nc.nodeStore.Store.Add(&v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "old",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
|
@ -1932,7 +1932,7 @@ func TestCheckPod(t *testing.T) {
|
|||
},
|
||||
})
|
||||
nc.nodeStore.Store.Add(&v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "older",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
|
@ -1942,7 +1942,7 @@ func TestCheckPod(t *testing.T) {
|
|||
},
|
||||
})
|
||||
nc.nodeStore.Store.Add(&v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "oldest",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue.go
generated
vendored
|
@ -22,7 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue_test.go
generated
vendored
|
@ -22,7 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
)
|
||||
|
||||
func CheckQueueEq(lhs []string, rhs TimedQueue) bool {
|
||||
|
|
3
vendor/k8s.io/kubernetes/pkg/controller/node/testutil/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/node/testutil/BUILD
generated
vendored
|
@ -17,13 +17,14 @@ go_library(
|
|||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
23
vendor/k8s.io/kubernetes/pkg/controller/node/testutil/test_utils.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/controller/node/testutil/test_utils.go
generated
vendored
|
@ -25,14 +25,15 @@ import (
|
|||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
|
@ -129,7 +130,7 @@ func (m *FakeNodeHandler) Get(name string, opts metav1.GetOptions) (*v1.Node, er
|
|||
}
|
||||
|
||||
// List returns a list of Nodes from the fake store.
|
||||
func (m *FakeNodeHandler) List(opts v1.ListOptions) (*v1.NodeList, error) {
|
||||
func (m *FakeNodeHandler) List(opts metav1.ListOptions) (*v1.NodeList, error) {
|
||||
m.lock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
|
@ -159,7 +160,7 @@ func (m *FakeNodeHandler) List(opts v1.ListOptions) (*v1.NodeList, error) {
|
|||
}
|
||||
|
||||
// Delete delets a Node from the fake store.
|
||||
func (m *FakeNodeHandler) Delete(id string, opt *v1.DeleteOptions) error {
|
||||
func (m *FakeNodeHandler) Delete(id string, opt *metav1.DeleteOptions) error {
|
||||
m.lock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
|
@ -173,7 +174,7 @@ func (m *FakeNodeHandler) Delete(id string, opt *v1.DeleteOptions) error {
|
|||
}
|
||||
|
||||
// DeleteCollection deletes a collection of Nodes from the fake store.
|
||||
func (m *FakeNodeHandler) DeleteCollection(opt *v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
func (m *FakeNodeHandler) DeleteCollection(opt *metav1.DeleteOptions, listOpts metav1.ListOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -214,12 +215,12 @@ func (m *FakeNodeHandler) PatchStatus(nodeName string, data []byte) (*v1.Node, e
|
|||
}
|
||||
|
||||
// Watch watches Nodes in a fake store.
|
||||
func (m *FakeNodeHandler) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
func (m *FakeNodeHandler) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return watch.NewFake(), nil
|
||||
}
|
||||
|
||||
// Patch patches a Node in the fake store.
|
||||
func (m *FakeNodeHandler) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*v1.Node, error) {
|
||||
func (m *FakeNodeHandler) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1.Node, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -262,10 +263,10 @@ func (f *FakeRecorder) makeEvent(ref *v1.ObjectReference, eventtype, reason, mes
|
|||
t := metav1.Time{Time: f.clock.Now()}
|
||||
namespace := ref.Namespace
|
||||
if namespace == "" {
|
||||
namespace = v1.NamespaceDefault
|
||||
namespace = metav1.NamespaceDefault
|
||||
}
|
||||
return &v1.Event{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
|
||||
Namespace: namespace,
|
||||
},
|
||||
|
@ -291,7 +292,7 @@ func NewFakeRecorder() *FakeRecorder {
|
|||
// NewNode is a helper function for creating Nodes for testing.
|
||||
func NewNode(name string) *v1.Node {
|
||||
return &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: name},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: name,
|
||||
},
|
||||
|
@ -307,7 +308,7 @@ func NewNode(name string) *v1.Node {
|
|||
// NewPod is a helper function for creating Pods for testing.
|
||||
func NewPod(name, host string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
|
@ -338,7 +339,7 @@ func contains(node *v1.Node, nodes []*v1.Node) bool {
|
|||
|
||||
// GetZones returns list of zones for all Nodes stored in FakeNodeHandler
|
||||
func GetZones(nodeHandler *FakeNodeHandler) []string {
|
||||
nodes, _ := nodeHandler.List(v1.ListOptions{})
|
||||
nodes, _ := nodeHandler.List(metav1.ListOptions{})
|
||||
zones := sets.NewString()
|
||||
for _, node := range nodes.Items {
|
||||
zones.Insert(utilnode.GetZoneKey(&node))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue