Vendor: Update k8s version
Signed-off-by: Michał Żyłowski <michal.zylowski@intel.com>
This commit is contained in:
parent
dfa93414c5
commit
52baf68d50
3756 changed files with 113013 additions and 92675 deletions
18
vendor/k8s.io/kubernetes/pkg/controller/BUILD
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/controller/BUILD
generated
vendored
|
@ -27,22 +27,24 @@ go_library(
|
|||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/serviceaccount:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//pkg/util/integer:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/golang/groupcache/lru",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/meta",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/serviceaccount",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
"//vendor:k8s.io/client-go/util/integer",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -58,14 +60,14 @@ go_test(
|
|||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/util/uuid:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
"//vendor:k8s.io/client-go/util/testing",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -92,7 +94,6 @@ filegroup(
|
|||
"//pkg/controller/job:all-srcs",
|
||||
"//pkg/controller/namespace:all-srcs",
|
||||
"//pkg/controller/node:all-srcs",
|
||||
"//pkg/controller/petset:all-srcs",
|
||||
"//pkg/controller/podautoscaler:all-srcs",
|
||||
"//pkg/controller/podgc:all-srcs",
|
||||
"//pkg/controller/replicaset:all-srcs",
|
||||
|
@ -101,6 +102,7 @@ filegroup(
|
|||
"//pkg/controller/route:all-srcs",
|
||||
"//pkg/controller/service:all-srcs",
|
||||
"//pkg/controller/serviceaccount:all-srcs",
|
||||
"//pkg/controller/statefulset:all-srcs",
|
||||
"//pkg/controller/volume/attachdetach:all-srcs",
|
||||
"//pkg/controller/volume/persistentvolume:all-srcs",
|
||||
],
|
||||
|
|
11
vendor/k8s.io/kubernetes/pkg/controller/certificates/BUILD
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/controller/certificates/BUILD
generated
vendored
|
@ -19,12 +19,12 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/certificates/v1alpha1:go_default_library",
|
||||
"//pkg/apis/certificates/v1beta1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/certificates/v1alpha1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/certificates/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/workqueue:go_default_library",
|
||||
|
@ -33,6 +33,7 @@ go_library(
|
|||
"//vendor:github.com/cloudflare/cfssl/signer",
|
||||
"//vendor:github.com/cloudflare/cfssl/signer/local",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
|
@ -67,7 +68,7 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/apis/certificates/v1alpha1:go_default_library",
|
||||
"//pkg/util/cert:go_default_library",
|
||||
"//pkg/apis/certificates/v1beta1:go_default_library",
|
||||
"//vendor:k8s.io/client-go/util/cert",
|
||||
],
|
||||
)
|
||||
|
|
11
vendor/k8s.io/kubernetes/pkg/controller/certificates/certificate_controller.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/controller/certificates/certificate_controller.go
generated
vendored
|
@ -20,15 +20,16 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
|
@ -49,7 +50,7 @@ type CertificateController struct {
|
|||
|
||||
// CSR framework and store
|
||||
csrController cache.Controller
|
||||
csrStore cache.StoreToCertificateRequestLister
|
||||
csrStore listers.StoreToCertificateRequestLister
|
||||
|
||||
syncHandler func(csrKey string) error
|
||||
|
||||
|
@ -80,10 +81,10 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
|
|||
// Manage the addition/update of certificate requests
|
||||
cc.csrStore.Store, cc.csrController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return cc.kubeClient.Certificates().CertificateSigningRequests().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return cc.kubeClient.Certificates().CertificateSigningRequests().Watch(options)
|
||||
},
|
||||
},
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/certificates/certificate_controller_utils.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/certificates/certificate_controller_utils.go
generated
vendored
|
@ -16,7 +16,7 @@ limitations under the License.
|
|||
|
||||
package certificates
|
||||
|
||||
import certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||
import certificates "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
|
||||
|
||||
// IsCertificateRequestApproved returns true if a certificate request has the
|
||||
// "Approved" condition and no "Denied" conditions; false otherwise.
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/certificates/cfssl_signer.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/certificates/cfssl_signer.go
generated
vendored
|
@ -23,7 +23,7 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
|
||||
|
||||
"github.com/cloudflare/cfssl/config"
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/controller/certificates/cfssl_signer_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/certificates/cfssl_signer_test.go
generated
vendored
|
@ -22,8 +22,8 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
capi "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/util/cert"
|
||||
"k8s.io/client-go/util/cert"
|
||||
capi "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
|
||||
)
|
||||
|
||||
func TestSigner(t *testing.T) {
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/controller/certificates/groupapprove.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/certificates/groupapprove.go
generated
vendored
|
@ -22,8 +22,8 @@ import (
|
|||
"strings"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||
clientcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1alpha1"
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
|
||||
clientcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1"
|
||||
)
|
||||
|
||||
// groupApprover implements AutoApprover for signing Kubelet certificates.
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/certificates/groupapprove_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/certificates/groupapprove_test.go
generated
vendored
|
@ -19,7 +19,7 @@ package certificates
|
|||
import (
|
||||
"testing"
|
||||
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
|
||||
)
|
||||
|
||||
func TestHasKubeletUsages(t *testing.T) {
|
||||
|
|
15
vendor/k8s.io/kubernetes/pkg/controller/client_builder.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/controller/client_builder.go
generated
vendored
|
@ -22,15 +22,16 @@ import (
|
|||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -107,25 +108,25 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro
|
|||
// check to see if the namespace exists. If it isn't a NotFound, just try to create the SA.
|
||||
// It'll probably fail, but perhaps that will have a better message.
|
||||
if _, err := b.CoreClient.Namespaces().Get(b.Namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) {
|
||||
_, err = b.CoreClient.Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: b.Namespace}})
|
||||
_, err = b.CoreClient.Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: b.Namespace}})
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
sa, err = b.CoreClient.ServiceAccounts(b.Namespace).Create(
|
||||
&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Namespace: b.Namespace, Name: name}})
|
||||
&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: b.Namespace, Name: name}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
|
||||
return b.CoreClient.Secrets(b.Namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
|
||||
return b.CoreClient.Secrets(b.Namespace).Watch(options)
|
||||
},
|
||||
|
@ -146,7 +147,7 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro
|
|||
}
|
||||
// TODO maybe verify the token is valid
|
||||
clientConfig.BearerToken = string(secret.Data[v1.ServiceAccountTokenKey])
|
||||
restclient.AddUserAgent(clientConfig, serviceaccount.MakeUsername(b.Namespace, name))
|
||||
restclient.AddUserAgent(clientConfig, apiserverserviceaccount.MakeUsername(b.Namespace, name))
|
||||
return true, nil
|
||||
|
||||
default:
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/cloud/nodecontroller.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/cloud/nodecontroller.go
generated
vendored
|
@ -89,7 +89,7 @@ func (cnc *CloudNodeController) Run() {
|
|||
defer utilruntime.HandleCrash()
|
||||
|
||||
go wait.Until(func() {
|
||||
nodes, err := cnc.kubeClient.Core().Nodes().List(v1.ListOptions{ResourceVersion: "0"})
|
||||
nodes, err := cnc.kubeClient.Core().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
|
||||
if err != nil {
|
||||
glog.Errorf("Error monitoring node status: %v", err)
|
||||
}
|
||||
|
|
6
vendor/k8s.io/kubernetes/pkg/controller/cloud/nodecontroller_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/cloud/nodecontroller_test.go
generated
vendored
|
@ -39,7 +39,7 @@ import (
|
|||
// and cloud provider says node is gone
|
||||
func TestNodeDeleted(t *testing.T) {
|
||||
pod0 := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pod0",
|
||||
},
|
||||
|
@ -57,7 +57,7 @@ func TestNodeDeleted(t *testing.T) {
|
|||
}
|
||||
|
||||
pod1 := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pod1",
|
||||
},
|
||||
|
@ -77,7 +77,7 @@ func TestNodeDeleted(t *testing.T) {
|
|||
fnh := &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
|
10
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
generated
vendored
|
@ -31,7 +31,7 @@ import (
|
|||
|
||||
type PodControllerRefManager struct {
|
||||
podControl PodControlInterface
|
||||
controllerObject v1.ObjectMeta
|
||||
controllerObject metav1.ObjectMeta
|
||||
controllerSelector labels.Selector
|
||||
controllerKind schema.GroupVersionKind
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ type PodControllerRefManager struct {
|
|||
// methods to manage the controllerRef of pods.
|
||||
func NewPodControllerRefManager(
|
||||
podControl PodControlInterface,
|
||||
controllerObject v1.ObjectMeta,
|
||||
controllerObject metav1.ObjectMeta,
|
||||
controllerSelector labels.Selector,
|
||||
controllerKind schema.GroupVersionKind,
|
||||
) *PodControllerRefManager {
|
||||
|
@ -93,7 +93,7 @@ func (m *PodControllerRefManager) Classify(pods []*v1.Pod) (
|
|||
|
||||
// GetControllerOf returns the controllerRef if controllee has a controller,
|
||||
// otherwise returns nil.
|
||||
func GetControllerOf(controllee *v1.ObjectMeta) *metav1.OwnerReference {
|
||||
func GetControllerOf(controllee *metav1.ObjectMeta) *metav1.OwnerReference {
|
||||
for i := range controllee.OwnerReferences {
|
||||
owner := &controllee.OwnerReferences[i]
|
||||
// controlled by other controller
|
||||
|
@ -153,7 +153,7 @@ func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error {
|
|||
// for more details.
|
||||
type ReplicaSetControllerRefManager struct {
|
||||
rsControl RSControlInterface
|
||||
controllerObject v1.ObjectMeta
|
||||
controllerObject metav1.ObjectMeta
|
||||
controllerSelector labels.Selector
|
||||
controllerKind schema.GroupVersionKind
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ type ReplicaSetControllerRefManager struct {
|
|||
// methods to manage the controllerRef of ReplicaSets.
|
||||
func NewReplicaSetControllerRefManager(
|
||||
rsControl RSControlInterface,
|
||||
controllerObject v1.ObjectMeta,
|
||||
controllerObject metav1.ObjectMeta,
|
||||
controllerSelector labels.Selector,
|
||||
controllerKind schema.GroupVersionKind,
|
||||
) *ReplicaSetControllerRefManager {
|
||||
|
|
11
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
|
@ -28,7 +28,10 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/client-go/util/integer"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
|
@ -36,8 +39,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/integer"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -377,7 +378,7 @@ type RealRSControl struct {
|
|||
var _ RSControlInterface = &RealRSControl{}
|
||||
|
||||
func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error {
|
||||
_, err := r.KubeClient.Extensions().ReplicaSets(namespace).Patch(name, api.StrategicMergePatchType, data)
|
||||
_, err := r.KubeClient.Extensions().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -477,7 +478,7 @@ func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v
|
|||
}
|
||||
|
||||
func (r RealPodControl) PatchPod(namespace, name string, data []byte) error {
|
||||
_, err := r.KubeClient.Core().Pods(namespace).Patch(name, api.StrategicMergePatchType, data)
|
||||
_, err := r.KubeClient.Core().Pods(namespace).Patch(name, types.StrategicMergePatchType, data)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -495,7 +496,7 @@ func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Objec
|
|||
prefix := getPodsPrefix(accessor.GetName())
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: desiredLabels,
|
||||
Annotations: desiredAnnotations,
|
||||
GenerateName: prefix,
|
||||
|
|
22
vendor/k8s.io/kubernetes/pkg/controller/controller_utils_test.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/controller/controller_utils_test.go
generated
vendored
|
@ -30,16 +30,16 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/clock"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
)
|
||||
|
||||
|
@ -56,17 +56,17 @@ func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectat
|
|||
func newReplicationController(replicas int) *v1.ReplicationController {
|
||||
rc := &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"type": "production",
|
||||
|
@ -98,7 +98,7 @@ func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.Replica
|
|||
pods := []v1.Pod{}
|
||||
for i := 0; i < count; i++ {
|
||||
newPod := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
Labels: rc.Spec.Selector,
|
||||
Namespace: rc.Namespace,
|
||||
|
@ -238,8 +238,8 @@ func TestUIDExpectations(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreatePods(t *testing.T) {
|
||||
ns := v1.NamespaceDefault
|
||||
body := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "empty_pod"}})
|
||||
ns := metav1.NamespaceDefault
|
||||
body := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "empty_pod"}})
|
||||
fakeHandler := utiltesting.FakeHandler{
|
||||
StatusCode: 200,
|
||||
ResponseBody: string(body),
|
||||
|
@ -261,13 +261,13 @@ func TestCreatePods(t *testing.T) {
|
|||
}
|
||||
|
||||
expectedPod := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: controllerSpec.Spec.Template.Labels,
|
||||
GenerateName: fmt.Sprintf("%s-", controllerSpec.Name),
|
||||
},
|
||||
Spec: controllerSpec.Spec.Template.Spec,
|
||||
}
|
||||
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath("pods", v1.NamespaceDefault, ""), "POST", nil)
|
||||
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath("pods", metav1.NamespaceDefault, ""), "POST", nil)
|
||||
var actualPod = &v1.Pod{}
|
||||
err := json.Unmarshal([]byte(fakeHandler.RequestBody), actualPod)
|
||||
if err != nil {
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go
generated
vendored
|
@ -97,7 +97,7 @@ func (jm *CronJobController) Run(stopCh <-chan struct{}) {
|
|||
|
||||
// SyncAll lists all the CronJobs and Jobs and reconciles them.
|
||||
func (jm *CronJobController) SyncAll() {
|
||||
sjl, err := jm.kubeClient.BatchV2alpha1().CronJobs(v1.NamespaceAll).List(v1.ListOptions{})
|
||||
sjl, err := jm.kubeClient.BatchV2alpha1().CronJobs(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("Error listing cronjobs: %v", err)
|
||||
return
|
||||
|
@ -105,7 +105,7 @@ func (jm *CronJobController) SyncAll() {
|
|||
sjs := sjl.Items
|
||||
glog.V(4).Infof("Found %d cronjobs", len(sjs))
|
||||
|
||||
jl, err := jm.kubeClient.BatchV2alpha1().Jobs(v1.NamespaceAll).List(v1.ListOptions{})
|
||||
jl, err := jm.kubeClient.BatchV2alpha1().Jobs(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("Error listing jobs")
|
||||
return
|
||||
|
@ -174,6 +174,7 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
|
|||
}
|
||||
times, err := getRecentUnmetScheduleTimes(sj, now)
|
||||
if err != nil {
|
||||
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedNeedsStart", "Cannot determine if job needs to be started: %v", err)
|
||||
glog.Errorf("Cannot determine if %s needs to be started: %v", nameForLog, err)
|
||||
}
|
||||
// TODO: handle multiple unmet start times, from oldest to newest, updating status as needed.
|
||||
|
@ -237,7 +238,7 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
|
|||
}
|
||||
// remove all pods...
|
||||
selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector)
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := pc.ListPods(job.Namespace, options)
|
||||
if err != nil {
|
||||
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedList", "List job-pods: %v", err)
|
||||
|
|
109
vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller_test.go
generated
vendored
109
vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller_test.go
generated
vendored
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package cronjob
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -83,7 +84,7 @@ func justAfterThePriorHour() time.Time {
|
|||
// returns a cronJob with some fields filled in.
|
||||
func cronJob() batch.CronJob {
|
||||
return batch.CronJob{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mycronjob",
|
||||
Namespace: "snazzycats",
|
||||
UID: types.UID("1a2b3c"),
|
||||
|
@ -94,7 +95,7 @@ func cronJob() batch.CronJob {
|
|||
Schedule: "* * * * ?",
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"a": "b"},
|
||||
Annotations: map[string]string{"x": "y"},
|
||||
},
|
||||
|
@ -110,7 +111,7 @@ func jobSpec() batch.JobSpec {
|
|||
Parallelism: &one,
|
||||
Completions: &one,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
|
@ -126,10 +127,10 @@ func jobSpec() batch.JobSpec {
|
|||
|
||||
func newJob(UID string) batch.Job {
|
||||
return batch.Job{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(UID),
|
||||
Name: "foobar",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
SelfLink: "/apis/batch/v1/namespaces/snazzycats/jobs/myjob",
|
||||
},
|
||||
Spec: jobSpec(),
|
||||
|
@ -177,57 +178,58 @@ func TestSyncOne_RunOrNot(t *testing.T) {
|
|||
now time.Time
|
||||
|
||||
// expectations
|
||||
expectCreate bool
|
||||
expectDelete bool
|
||||
expectActive int
|
||||
expectCreate bool
|
||||
expectDelete bool
|
||||
expectActive int
|
||||
expectedWarnings int
|
||||
}{
|
||||
"never ran, not time, A": {A, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0},
|
||||
"never ran, not time, F": {f, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0},
|
||||
"never ran, not time, R": {R, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0},
|
||||
"never ran, is time, A": {A, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1},
|
||||
"never ran, is time, F": {f, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1},
|
||||
"never ran, is time, R": {R, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1},
|
||||
"never ran, is time, suspended": {A, T, onTheHour, noDead, F, F, justAfterTheHour(), F, F, 0},
|
||||
"never ran, is time, past deadline": {A, F, onTheHour, shortDead, F, F, justAfterTheHour(), F, F, 0},
|
||||
"never ran, is time, not past deadline": {A, F, onTheHour, longDead, F, F, justAfterTheHour(), T, F, 1},
|
||||
"never ran, not time, A": {A, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0, 0},
|
||||
"never ran, not time, F": {f, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0, 0},
|
||||
"never ran, not time, R": {R, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0, 0},
|
||||
"never ran, is time, A": {A, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1, 0},
|
||||
"never ran, is time, F": {f, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1, 0},
|
||||
"never ran, is time, R": {R, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1, 0},
|
||||
"never ran, is time, suspended": {A, T, onTheHour, noDead, F, F, justAfterTheHour(), F, F, 0, 0},
|
||||
"never ran, is time, past deadline": {A, F, onTheHour, shortDead, F, F, justAfterTheHour(), F, F, 0, 0},
|
||||
"never ran, is time, not past deadline": {A, F, onTheHour, longDead, F, F, justAfterTheHour(), T, F, 1, 0},
|
||||
|
||||
"prev ran but done, not time, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0},
|
||||
"prev ran but done, not time, F": {f, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0},
|
||||
"prev ran but done, not time, R": {R, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0},
|
||||
"prev ran but done, is time, A": {A, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1},
|
||||
"prev ran but done, is time, F": {f, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1},
|
||||
"prev ran but done, is time, R": {R, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1},
|
||||
"prev ran but done, is time, suspended": {A, T, onTheHour, noDead, T, F, justAfterTheHour(), F, F, 0},
|
||||
"prev ran but done, is time, past deadline": {A, F, onTheHour, shortDead, T, F, justAfterTheHour(), F, F, 0},
|
||||
"prev ran but done, is time, not past deadline": {A, F, onTheHour, longDead, T, F, justAfterTheHour(), T, F, 1},
|
||||
"prev ran but done, not time, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0, 0},
|
||||
"prev ran but done, not time, F": {f, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0, 0},
|
||||
"prev ran but done, not time, R": {R, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0, 0},
|
||||
"prev ran but done, is time, A": {A, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1, 0},
|
||||
"prev ran but done, is time, F": {f, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1, 0},
|
||||
"prev ran but done, is time, R": {R, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1, 0},
|
||||
"prev ran but done, is time, suspended": {A, T, onTheHour, noDead, T, F, justAfterTheHour(), F, F, 0, 0},
|
||||
"prev ran but done, is time, past deadline": {A, F, onTheHour, shortDead, T, F, justAfterTheHour(), F, F, 0, 0},
|
||||
"prev ran but done, is time, not past deadline": {A, F, onTheHour, longDead, T, F, justAfterTheHour(), T, F, 1, 0},
|
||||
|
||||
"still active, not time, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1},
|
||||
"still active, not time, F": {f, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1},
|
||||
"still active, not time, R": {R, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1},
|
||||
"still active, is time, A": {A, F, onTheHour, noDead, T, T, justAfterTheHour(), T, F, 2},
|
||||
"still active, is time, F": {f, F, onTheHour, noDead, T, T, justAfterTheHour(), F, F, 1},
|
||||
"still active, is time, R": {R, F, onTheHour, noDead, T, T, justAfterTheHour(), T, T, 1},
|
||||
"still active, is time, suspended": {A, T, onTheHour, noDead, T, T, justAfterTheHour(), F, F, 1},
|
||||
"still active, is time, past deadline": {A, F, onTheHour, shortDead, T, T, justAfterTheHour(), F, F, 1},
|
||||
"still active, is time, not past deadline": {A, F, onTheHour, longDead, T, T, justAfterTheHour(), T, F, 2},
|
||||
"still active, not time, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1, 0},
|
||||
"still active, not time, F": {f, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1, 0},
|
||||
"still active, not time, R": {R, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1, 0},
|
||||
"still active, is time, A": {A, F, onTheHour, noDead, T, T, justAfterTheHour(), T, F, 2, 0},
|
||||
"still active, is time, F": {f, F, onTheHour, noDead, T, T, justAfterTheHour(), F, F, 1, 0},
|
||||
"still active, is time, R": {R, F, onTheHour, noDead, T, T, justAfterTheHour(), T, T, 1, 0},
|
||||
"still active, is time, suspended": {A, T, onTheHour, noDead, T, T, justAfterTheHour(), F, F, 1, 0},
|
||||
"still active, is time, past deadline": {A, F, onTheHour, shortDead, T, T, justAfterTheHour(), F, F, 1, 0},
|
||||
"still active, is time, not past deadline": {A, F, onTheHour, longDead, T, T, justAfterTheHour(), T, F, 2, 0},
|
||||
|
||||
// Controller should fail to schedule these, as there are too many missed starting times
|
||||
// and either no deadline or a too long deadline.
|
||||
"prev ran but done, long overdue, not past deadline, A": {A, F, onTheHour, longDead, T, F, weekAfterTheHour(), F, F, 0},
|
||||
"prev ran but done, long overdue, not past deadline, R": {R, F, onTheHour, longDead, T, F, weekAfterTheHour(), F, F, 0},
|
||||
"prev ran but done, long overdue, not past deadline, F": {f, F, onTheHour, longDead, T, F, weekAfterTheHour(), F, F, 0},
|
||||
"prev ran but done, long overdue, no deadline, A": {A, F, onTheHour, noDead, T, F, weekAfterTheHour(), F, F, 0},
|
||||
"prev ran but done, long overdue, no deadline, R": {R, F, onTheHour, noDead, T, F, weekAfterTheHour(), F, F, 0},
|
||||
"prev ran but done, long overdue, no deadline, F": {f, F, onTheHour, noDead, T, F, weekAfterTheHour(), F, F, 0},
|
||||
"prev ran but done, long overdue, not past deadline, A": {A, F, onTheHour, longDead, T, F, weekAfterTheHour(), F, F, 0, 1},
|
||||
"prev ran but done, long overdue, not past deadline, R": {R, F, onTheHour, longDead, T, F, weekAfterTheHour(), F, F, 0, 1},
|
||||
"prev ran but done, long overdue, not past deadline, F": {f, F, onTheHour, longDead, T, F, weekAfterTheHour(), F, F, 0, 1},
|
||||
"prev ran but done, long overdue, no deadline, A": {A, F, onTheHour, noDead, T, F, weekAfterTheHour(), F, F, 0, 1},
|
||||
"prev ran but done, long overdue, no deadline, R": {R, F, onTheHour, noDead, T, F, weekAfterTheHour(), F, F, 0, 1},
|
||||
"prev ran but done, long overdue, no deadline, F": {f, F, onTheHour, noDead, T, F, weekAfterTheHour(), F, F, 0, 1},
|
||||
|
||||
"prev ran but done, long overdue, past medium deadline, A": {A, F, onTheHour, mediumDead, T, F, weekAfterTheHour(), T, F, 1},
|
||||
"prev ran but done, long overdue, past short deadline, A": {A, F, onTheHour, shortDead, T, F, weekAfterTheHour(), T, F, 1},
|
||||
"prev ran but done, long overdue, past medium deadline, A": {A, F, onTheHour, mediumDead, T, F, weekAfterTheHour(), T, F, 1, 0},
|
||||
"prev ran but done, long overdue, past short deadline, A": {A, F, onTheHour, shortDead, T, F, weekAfterTheHour(), T, F, 1, 0},
|
||||
|
||||
"prev ran but done, long overdue, past medium deadline, R": {R, F, onTheHour, mediumDead, T, F, weekAfterTheHour(), T, F, 1},
|
||||
"prev ran but done, long overdue, past short deadline, R": {R, F, onTheHour, shortDead, T, F, weekAfterTheHour(), T, F, 1},
|
||||
"prev ran but done, long overdue, past medium deadline, R": {R, F, onTheHour, mediumDead, T, F, weekAfterTheHour(), T, F, 1, 0},
|
||||
"prev ran but done, long overdue, past short deadline, R": {R, F, onTheHour, shortDead, T, F, weekAfterTheHour(), T, F, 1, 0},
|
||||
|
||||
"prev ran but done, long overdue, past medium deadline, F": {f, F, onTheHour, mediumDead, T, F, weekAfterTheHour(), T, F, 1},
|
||||
"prev ran but done, long overdue, past short deadline, F": {f, F, onTheHour, shortDead, T, F, weekAfterTheHour(), T, F, 1},
|
||||
"prev ran but done, long overdue, past medium deadline, F": {f, F, onTheHour, mediumDead, T, F, weekAfterTheHour(), T, F, 1, 0},
|
||||
"prev ran but done, long overdue, past short deadline, F": {f, F, onTheHour, shortDead, T, F, weekAfterTheHour(), T, F, 1, 0},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
sj := cronJob()
|
||||
|
@ -295,10 +297,23 @@ func TestSyncOne_RunOrNot(t *testing.T) {
|
|||
if tc.expectDelete {
|
||||
expectedEvents++
|
||||
}
|
||||
expectedEvents += tc.expectedWarnings
|
||||
|
||||
if len(recorder.Events) != expectedEvents {
|
||||
t.Errorf("%s: expected %d event, actually %v", name, expectedEvents, len(recorder.Events))
|
||||
}
|
||||
|
||||
numWarnings := 0
|
||||
for i := 1; i <= len(recorder.Events); i++ {
|
||||
e := <-recorder.Events
|
||||
if strings.HasPrefix(e, v1.EventTypeWarning) {
|
||||
numWarnings += 1
|
||||
}
|
||||
}
|
||||
if numWarnings != tc.expectedWarnings {
|
||||
t.Errorf("%s: expected %d warnings, actually %v", name, tc.expectedWarnings, numWarnings)
|
||||
}
|
||||
|
||||
if tc.expectActive != len(sjc.Updates[expectUpdates-1].Status.Active) {
|
||||
t.Errorf("%s: expected Active size %d, got %d", name, tc.expectActive, len(sjc.Updates[expectUpdates-1].Status.Active))
|
||||
}
|
||||
|
|
6
vendor/k8s.io/kubernetes/pkg/controller/cronjob/injection.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/cronjob/injection.go
generated
vendored
|
@ -177,7 +177,7 @@ func (f *fakeJobControl) Clear() {
|
|||
// created as an interface to allow testing.
|
||||
type podControlInterface interface {
|
||||
// ListPods list pods
|
||||
ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error)
|
||||
ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error)
|
||||
// DeleteJob deletes the pod identified by name.
|
||||
// TODO: delete by UID?
|
||||
DeletePod(namespace string, name string) error
|
||||
|
@ -191,7 +191,7 @@ type realPodControl struct {
|
|||
|
||||
var _ podControlInterface = &realPodControl{}
|
||||
|
||||
func (r realPodControl) ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error) {
|
||||
func (r realPodControl) ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error) {
|
||||
return r.KubeClient.Core().Pods(namespace).List(opts)
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ type fakePodControl struct {
|
|||
|
||||
var _ podControlInterface = &fakePodControl{}
|
||||
|
||||
func (f *fakePodControl) ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error) {
|
||||
func (f *fakePodControl) ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
return &v1.PodList{Items: f.Pods}, nil
|
||||
|
|
3
vendor/k8s.io/kubernetes/pkg/controller/cronjob/utils.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/cronjob/utils.go
generated
vendored
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"github.com/robfig/cron"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -195,7 +196,7 @@ func getJobFromTemplate(sj *batch.CronJob, scheduledTime time.Time) (*batch.Job,
|
|||
name := fmt.Sprintf("%s-%d", sj.Name, getTimeHash(scheduledTime))
|
||||
|
||||
job := &batch.Job{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
Name: name,
|
||||
|
|
42
vendor/k8s.io/kubernetes/pkg/controller/cronjob/utils_test.go
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/controller/cronjob/utils_test.go
generated
vendored
|
@ -35,7 +35,7 @@ func TestGetJobFromTemplate(t *testing.T) {
|
|||
var no bool = false
|
||||
|
||||
sj := batch.CronJob{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mycronjob",
|
||||
Namespace: "snazzycats",
|
||||
UID: types.UID("1a2b3c"),
|
||||
|
@ -45,7 +45,7 @@ func TestGetJobFromTemplate(t *testing.T) {
|
|||
Schedule: "* * * * ?",
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"a": "b"},
|
||||
Annotations: map[string]string{"x": "y"},
|
||||
},
|
||||
|
@ -53,7 +53,7 @@ func TestGetJobFromTemplate(t *testing.T) {
|
|||
ActiveDeadlineSeconds: &one,
|
||||
ManualSelector: &no,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
|
@ -99,16 +99,16 @@ func TestGetJobFromTemplate(t *testing.T) {
|
|||
|
||||
func TestGetParentUIDFromJob(t *testing.T) {
|
||||
j := &batch.Job{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foobar",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
|
@ -173,7 +173,7 @@ func TestGroupJobsByParent(t *testing.T) {
|
|||
{
|
||||
// Case 2: there is one controller with no job.
|
||||
sjs := []batch.CronJob{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
}
|
||||
js := []batch.Job{}
|
||||
jobsBySj := groupJobsByParent(sjs, js)
|
||||
|
@ -185,10 +185,10 @@ func TestGroupJobsByParent(t *testing.T) {
|
|||
{
|
||||
// Case 3: there is one controller with one job it created.
|
||||
sjs := []batch.CronJob{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
}
|
||||
js := []batch.Job{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
|
||||
}
|
||||
jobsBySj := groupJobsByParent(sjs, js)
|
||||
|
||||
|
@ -208,18 +208,18 @@ func TestGroupJobsByParent(t *testing.T) {
|
|||
// Case 4: Two namespaces, one has two jobs from one controller, other has 3 jobs from two controllers.
|
||||
// There are also two jobs with no created-by annotation.
|
||||
js := []batch.Job{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "b", Namespace: "x", Annotations: createdBy2}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "c", Namespace: "x", Annotations: createdBy1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "d", Namespace: "x", Annotations: noCreatedBy}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "a", Namespace: "y", Annotations: createdBy3}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "b", Namespace: "y", Annotations: createdBy3}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "d", Namespace: "y", Annotations: noCreatedBy}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "b", Namespace: "x", Annotations: createdBy2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "c", Namespace: "x", Annotations: createdBy1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "d", Namespace: "x", Annotations: noCreatedBy}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "y", Annotations: createdBy3}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "b", Namespace: "y", Annotations: createdBy3}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "d", Namespace: "y", Annotations: noCreatedBy}},
|
||||
}
|
||||
sjs := []batch.CronJob{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "f", Namespace: "x", UID: uid2}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "g", Namespace: "y", UID: uid3}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "f", Namespace: "x", UID: uid2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "g", Namespace: "y", UID: uid3}},
|
||||
}
|
||||
|
||||
jobsBySj := groupJobsByParent(sjs, js)
|
||||
|
@ -267,9 +267,9 @@ func TestGetRecentUnmetScheduleTimes(t *testing.T) {
|
|||
}
|
||||
|
||||
sj := batch.CronJob{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mycronjob",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
UID: types.UID("1a2b3c"),
|
||||
},
|
||||
Spec: batch.CronJobSpec{
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD
generated
vendored
|
@ -23,6 +23,7 @@ go_library(
|
|||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
|
@ -58,6 +59,7 @@ go_test(
|
|||
"//pkg/securitycontext:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apiserver/pkg/storage/names",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller.go
generated
vendored
|
@ -35,6 +35,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
|
@ -74,11 +75,11 @@ type DaemonSetsController struct {
|
|||
// A TTLCache of pod creates/deletes each ds expects to see
|
||||
expectations controller.ControllerExpectationsInterface
|
||||
// A store of daemon sets
|
||||
dsStore *cache.StoreToDaemonSetLister
|
||||
dsStore *listers.StoreToDaemonSetLister
|
||||
// A store of pods
|
||||
podStore *cache.StoreToPodLister
|
||||
podStore *listers.StoreToPodLister
|
||||
// A store of nodes
|
||||
nodeStore *cache.StoreToNodeLister
|
||||
nodeStore *listers.StoreToNodeLister
|
||||
// dsStoreSynced returns true if the daemonset store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
dsStoreSynced cache.InformerSynced
|
||||
|
|
33
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller_test.go
generated
vendored
33
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller_test.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
|
@ -55,14 +56,14 @@ func getKey(ds *extensions.DaemonSet, t *testing.T) string {
|
|||
func newDaemonSet(name string) *extensions.DaemonSet {
|
||||
return &extensions.DaemonSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -84,10 +85,10 @@ func newDaemonSet(name string) *extensions.DaemonSet {
|
|||
func newNode(name string, label map[string]string) *v1.Node {
|
||||
return &v1.Node{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: label,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
|
@ -109,10 +110,10 @@ func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]
|
|||
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: podName,
|
||||
Labels: label,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
|
@ -127,7 +128,7 @@ func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
|||
DNSPolicy: v1.DNSDefault,
|
||||
},
|
||||
}
|
||||
v1.GenerateName(v1.SimpleNameGenerator, &pod.ObjectMeta)
|
||||
pod.Name = names.SimpleNameGenerator.GenerateName(podName)
|
||||
return pod
|
||||
}
|
||||
|
||||
|
@ -374,9 +375,9 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
|||
node := newNode("port-conflict", nil)
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: podSpec,
|
||||
})
|
||||
|
@ -424,9 +425,9 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
|||
manager.nodeStore.Store.Add(newNode("node1", nil))
|
||||
// Create pod not controlled by a daemonset.
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"bang": "boom"},
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
|
@ -664,7 +665,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||
Spec: extensions.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: resourcePodSpec("", "50M", "0.5"),
|
||||
|
@ -680,7 +681,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||
Spec: extensions.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: resourcePodSpec("", "200M", "0.5"),
|
||||
|
@ -696,7 +697,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||
Spec: extensions.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: resourcePodSpec("other-node", "50M", "0.5"),
|
||||
|
@ -723,7 +724,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||
Spec: extensions.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
|
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD
generated
vendored
|
@ -26,11 +26,11 @@ go_library(
|
|||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/util/integer:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//pkg/util/workqueue:go_default_library",
|
||||
|
@ -42,6 +42,7 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/util/integer",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
28
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go
generated
vendored
|
@ -40,6 +40,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
|
@ -77,11 +78,11 @@ type DeploymentController struct {
|
|||
enqueueDeployment func(deployment *extensions.Deployment)
|
||||
|
||||
// A store of deployments, populated by the dController
|
||||
dLister *cache.StoreToDeploymentLister
|
||||
dLister *listers.StoreToDeploymentLister
|
||||
// A store of ReplicaSets, populated by the rsController
|
||||
rsLister *cache.StoreToReplicaSetLister
|
||||
rsLister *listers.StoreToReplicaSetLister
|
||||
// A store of pods, populated by the podController
|
||||
podLister *cache.StoreToPodLister
|
||||
podLister *listers.StoreToPodLister
|
||||
|
||||
// dListerSynced returns true if the Deployment store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
|
@ -543,7 +544,26 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
|||
return dc.syncStatusOnly(d)
|
||||
}
|
||||
|
||||
err = dc.classifyReplicaSets(deployment)
|
||||
// Why run the cleanup policy only when there is no rollback request?
|
||||
// The thing with the cleanup policy currently is that it is far from smart because it takes into account
|
||||
// the latest replica sets while it should instead retain the latest *working* replica sets. This means that
|
||||
// you can have a cleanup policy of 1 but your last known working replica set may be 2 or 3 versions back
|
||||
// in the history.
|
||||
// Eventually we will want to find a way to recognize replica sets that have worked at some point in time
|
||||
// (and chances are higher that they will work again as opposed to others that didn't) for candidates to
|
||||
// automatically roll back to (#23211) and the cleanup policy should help.
|
||||
if d.Spec.RollbackTo == nil {
|
||||
_, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// So far the cleanup policy was executed once a deployment was paused, scaled up/down, or it
|
||||
// succesfully completed deploying a replica set. Decouple it from the strategies and have it
|
||||
// run almost unconditionally - cleanupDeployment is safe by default.
|
||||
dc.cleanupDeployment(oldRSs, d)
|
||||
}
|
||||
|
||||
err = dc.classifyReplicaSets(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
16
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller_test.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller_test.go
generated
vendored
|
@ -44,10 +44,10 @@ var (
|
|||
|
||||
func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
CreationTimestamp: timestamp,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
|
@ -68,10 +68,10 @@ func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map
|
|||
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *extensions.Deployment {
|
||||
d := extensions.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(extensions.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: name,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Annotations: make(map[string]string),
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
|
@ -85,7 +85,7 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu
|
|||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: &metav1.LabelSelector{MatchLabels: selector},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: selector,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -110,9 +110,9 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu
|
|||
|
||||
func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Labels: d.Spec.Selector.MatchLabels,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
|
@ -554,7 +554,7 @@ func TestPodDeletionDoesntEnqueueRecreateDeployment(t *testing.T) {
|
|||
func generatePodFromRS(rs *extensions.ReplicaSet) *v1.Pod {
|
||||
trueVar := true
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rs.Name + "-pod",
|
||||
Namespace: rs.Namespace,
|
||||
Labels: rs.Spec.Selector.MatchLabels,
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go
generated
vendored
|
@ -73,8 +73,6 @@ func (dc *DeploymentController) rolloutRecreate(deployment *extensions.Deploymen
|
|||
return dc.syncRolloutStatus(allRSs, newRS, deployment)
|
||||
}
|
||||
|
||||
dc.cleanupDeployment(oldRSs, deployment)
|
||||
|
||||
// Sync deployment status
|
||||
return dc.syncRolloutStatus(allRSs, newRS, deployment)
|
||||
}
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/controller/deployment/rolling.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/deployment/rolling.go
generated
vendored
|
@ -21,10 +21,10 @@ import (
|
|||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/client-go/util/integer"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/util/integer"
|
||||
)
|
||||
|
||||
// rolloutRolling implements the logic for rolling a new replica set.
|
||||
|
@ -55,8 +55,6 @@ func (dc *DeploymentController) rolloutRolling(deployment *extensions.Deployment
|
|||
return dc.syncRolloutStatus(allRSs, newRS, deployment)
|
||||
}
|
||||
|
||||
dc.cleanupDeployment(oldRSs, deployment)
|
||||
|
||||
// Sync deployment status
|
||||
return dc.syncRolloutStatus(allRSs, newRS, deployment)
|
||||
}
|
||||
|
|
9
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go
generated
vendored
|
@ -58,7 +58,6 @@ func (dc *DeploymentController) sync(deployment *extensions.Deployment) error {
|
|||
// so we can abort this resync
|
||||
return err
|
||||
}
|
||||
dc.cleanupDeployment(oldRSs, deployment)
|
||||
|
||||
allRSs := append(oldRSs, newRS)
|
||||
return dc.syncDeploymentStatus(allRSs, newRS, deployment)
|
||||
|
@ -128,7 +127,7 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(deployment *ext
|
|||
// rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment targets, with pod-template-hash information synced.
|
||||
func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extensions.Deployment) ([]*extensions.ReplicaSet, *v1.PodList, error) {
|
||||
rsList, err := deploymentutil.ListReplicaSets(deployment,
|
||||
func(namespace string, options v1.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
parsed, err := labels.Parse(options.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -192,7 +191,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet)
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("error in converting selector to label selector for replica set %s: %s", updatedRS.Name, err)
|
||||
}
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
parsed, err := labels.Parse(options.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -241,7 +240,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet)
|
|||
|
||||
func (dc *DeploymentController) listPods(deployment *extensions.Deployment) (*v1.PodList, error) {
|
||||
return deploymentutil.ListPods(deployment,
|
||||
func(namespace string, options v1.ListOptions) (*v1.PodList, error) {
|
||||
func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
|
||||
parsed, err := labels.Parse(options.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -324,7 +323,7 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
|
|||
|
||||
// Create new ReplicaSet
|
||||
newRS := extensions.ReplicaSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Make the name deterministic, to ensure idempotence
|
||||
Name: deployment.Name + "-" + podTemplateSpecHash,
|
||||
Namespace: namespace,
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
|
@ -22,14 +22,13 @@ go_library(
|
|||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/retry:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//pkg/util/integer:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
|
@ -39,6 +38,7 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/util/integer",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
18
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
|
@ -31,15 +31,15 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/integer"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/annotations"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/integer"
|
||||
intstrutil "k8s.io/kubernetes/pkg/util/intstr"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
|
@ -537,7 +537,7 @@ func GetNewReplicaSet(deployment *extensions.Deployment, c clientset.Interface)
|
|||
// listReplicaSets lists all RSes the given deployment targets with the given client interface.
|
||||
func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, error) {
|
||||
return ListReplicaSets(deployment,
|
||||
func(namespace string, options v1.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
rsList, err := c.Extensions().ReplicaSets(namespace).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -553,14 +553,14 @@ func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) (
|
|||
// listReplicaSets lists all Pods the given deployment targets with the given client interface.
|
||||
func listPods(deployment *extensions.Deployment, c clientset.Interface) (*v1.PodList, error) {
|
||||
return ListPods(deployment,
|
||||
func(namespace string, options v1.ListOptions) (*v1.PodList, error) {
|
||||
func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
|
||||
return c.Core().Pods(namespace).List(options)
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: switch this to full namespacers
|
||||
type rsListFunc func(string, v1.ListOptions) ([]*extensions.ReplicaSet, error)
|
||||
type podListFunc func(string, v1.ListOptions) (*v1.PodList, error)
|
||||
type rsListFunc func(string, metav1.ListOptions) ([]*extensions.ReplicaSet, error)
|
||||
type podListFunc func(string, metav1.ListOptions) (*v1.PodList, error)
|
||||
|
||||
// ListReplicaSets returns a slice of RSes the given deployment targets.
|
||||
func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([]*extensions.ReplicaSet, error) {
|
||||
|
@ -572,7 +572,7 @@ func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
return getRSList(namespace, options)
|
||||
}
|
||||
|
||||
|
@ -583,7 +583,7 @@ func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*v1.Po
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
return getPodList(namespace, options)
|
||||
}
|
||||
|
||||
|
@ -684,7 +684,7 @@ func WaitForPodsHashPopulated(c clientset.Interface, desiredGeneration int64, na
|
|||
|
||||
// LabelPodsWithHash labels all pods in the given podList with the new hash label.
|
||||
// The returned bool value can be used to tell if all pods are actually labeled.
|
||||
func LabelPodsWithHash(podList *v1.PodList, c clientset.Interface, podLister *cache.StoreToPodLister, namespace, name, hash string) error {
|
||||
func LabelPodsWithHash(podList *v1.PodList, c clientset.Interface, podLister *listers.StoreToPodLister, namespace, name, hash string) error {
|
||||
for _, pod := range podList.Items {
|
||||
// Only label the pod that doesn't already have the new hash
|
||||
if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
|
||||
|
|
16
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util_test.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util_test.go
generated
vendored
|
@ -102,7 +102,7 @@ func newPod(now time.Time, ready bool, beforeSec int) v1.Pod {
|
|||
// generatePodFromRS creates a pod, with the input ReplicaSet's selector and its template
|
||||
func generatePodFromRS(rs extensions.ReplicaSet) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: rs.Labels,
|
||||
},
|
||||
Spec: rs.Spec.Template.Spec,
|
||||
|
@ -111,7 +111,7 @@ func generatePodFromRS(rs extensions.ReplicaSet) v1.Pod {
|
|||
|
||||
func generatePod(labels map[string]string, image string) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -129,7 +129,7 @@ func generatePod(labels map[string]string, image string) v1.Pod {
|
|||
|
||||
func generateRSWithLabel(labels map[string]string, image string) extensions.ReplicaSet {
|
||||
return extensions.ReplicaSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: v1.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
Labels: labels,
|
||||
},
|
||||
|
@ -137,7 +137,7 @@ func generateRSWithLabel(labels map[string]string, image string) extensions.Repl
|
|||
Replicas: func(i int32) *int32 { return &i }(1),
|
||||
Selector: &metav1.LabelSelector{MatchLabels: labels},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -159,7 +159,7 @@ func generateRSWithLabel(labels map[string]string, image string) extensions.Repl
|
|||
func generateRS(deployment extensions.Deployment) extensions.ReplicaSet {
|
||||
template := GetNewReplicaSetTemplate(&deployment)
|
||||
return extensions.ReplicaSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: v1.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
Labels: template.Labels,
|
||||
},
|
||||
|
@ -176,7 +176,7 @@ func generateDeployment(image string) extensions.Deployment {
|
|||
podLabels := map[string]string{"name": image}
|
||||
terminationSec := int64(30)
|
||||
return extensions.Deployment{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: image,
|
||||
Annotations: make(map[string]string),
|
||||
},
|
||||
|
@ -184,7 +184,7 @@ func generateDeployment(image string) extensions.Deployment {
|
|||
Replicas: func(i int32) *int32 { return &i }(1),
|
||||
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -364,7 +364,7 @@ func TestGetOldRCs(t *testing.T) {
|
|||
|
||||
func generatePodTemplateSpec(name, nodeName string, annotations, labels map[string]string) v1.PodTemplateSpec {
|
||||
return v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: annotations,
|
||||
Labels: labels,
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/pod_util.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/pod_util.go
generated
vendored
|
@ -25,8 +25,8 @@ import (
|
|||
errorsutil "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/retry"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
)
|
||||
|
@ -56,7 +56,7 @@ type updatePodFunc func(pod *v1.Pod) error
|
|||
|
||||
// UpdatePodWithRetries updates a pod with given applyUpdate function. Note that pod not found error is ignored.
|
||||
// The returned bool value can be used to tell if the pod is actually updated.
|
||||
func UpdatePodWithRetries(podClient v1core.PodInterface, podLister *cache.StoreToPodLister, namespace, name string, applyUpdate updatePodFunc) (*v1.Pod, error) {
|
||||
func UpdatePodWithRetries(podClient v1core.PodInterface, podLister *listers.StoreToPodLister, namespace, name string, applyUpdate updatePodFunc) (*v1.Pod, error) {
|
||||
var pod *v1.Pod
|
||||
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/replicaset_util.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/replicaset_util.go
generated
vendored
|
@ -25,8 +25,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/retry"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
|
@ -37,7 +37,7 @@ type updateRSFunc func(rs *extensions.ReplicaSet) error
|
|||
|
||||
// UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored.
|
||||
// The returned bool value can be used to tell if the RS is actually updated.
|
||||
func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsLister *cache.StoreToReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) {
|
||||
func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsLister *listers.StoreToReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) {
|
||||
var rs *extensions.ReplicaSet
|
||||
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/disruption/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/disruption/BUILD
generated
vendored
|
@ -22,6 +22,7 @@ go_library(
|
|||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/policy/v1beta1:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
|
@ -48,6 +49,7 @@ go_test(
|
|||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/apis/policy/v1beta1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
|
|
53
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption.go
generated
vendored
53
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption.go
generated
vendored
|
@ -36,6 +36,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
policyclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
|
@ -64,26 +65,26 @@ type DisruptionController struct {
|
|||
|
||||
pdbStore cache.Store
|
||||
pdbController cache.Controller
|
||||
pdbLister cache.StoreToPodDisruptionBudgetLister
|
||||
pdbLister listers.StoreToPodDisruptionBudgetLister
|
||||
|
||||
podController cache.Controller
|
||||
podLister cache.StoreToPodLister
|
||||
podLister listers.StoreToPodLister
|
||||
|
||||
rcIndexer cache.Indexer
|
||||
rcController cache.Controller
|
||||
rcLister cache.StoreToReplicationControllerLister
|
||||
rcLister listers.StoreToReplicationControllerLister
|
||||
|
||||
rsStore cache.Store
|
||||
rsController cache.Controller
|
||||
rsLister cache.StoreToReplicaSetLister
|
||||
rsLister listers.StoreToReplicaSetLister
|
||||
|
||||
dIndexer cache.Indexer
|
||||
dController cache.Controller
|
||||
dLister cache.StoreToDeploymentLister
|
||||
dLister listers.StoreToDeploymentLister
|
||||
|
||||
ssStore cache.Store
|
||||
ssController cache.Controller
|
||||
ssLister cache.StoreToStatefulSetLister
|
||||
ssLister listers.StoreToStatefulSetLister
|
||||
|
||||
// PodDisruptionBudget keys that need to be synced.
|
||||
queue workqueue.RateLimitingInterface
|
||||
|
@ -128,11 +129,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient c
|
|||
|
||||
dc.pdbStore, dc.pdbController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Policy().PodDisruptionBudgets(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Policy().PodDisruptionBudgets(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Policy().PodDisruptionBudgets(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Policy().PodDisruptionBudgets(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&policy.PodDisruptionBudget{},
|
||||
|
@ -147,11 +148,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient c
|
|||
|
||||
dc.rcIndexer, dc.rcController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Core().ReplicationControllers(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Core().ReplicationControllers(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.ReplicationController{},
|
||||
|
@ -164,11 +165,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient c
|
|||
|
||||
dc.rsLister.Indexer, dc.rsController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().ReplicaSets(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().ReplicaSets(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Extensions().ReplicaSets(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Extensions().ReplicaSets(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.ReplicaSet{},
|
||||
|
@ -180,11 +181,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient c
|
|||
|
||||
dc.dIndexer, dc.dController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().Deployments(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().Deployments(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Extensions().Deployments(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Extensions().Deployments(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.Deployment{},
|
||||
|
@ -196,11 +197,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient c
|
|||
|
||||
dc.ssStore, dc.ssController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Apps().StatefulSets(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Apps().StatefulSets(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&apps.StatefulSet{},
|
||||
|
|
37
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption_test.go
generated
vendored
37
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption_test.go
generated
vendored
|
@ -30,6 +30,7 @@ import (
|
|||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
|
@ -87,12 +88,12 @@ func newFakeDisruptionController() (*DisruptionController, *pdbStates) {
|
|||
ps := &pdbStates{}
|
||||
|
||||
dc := &DisruptionController{
|
||||
pdbLister: cache.StoreToPodDisruptionBudgetLister{Store: cache.NewStore(controller.KeyFunc)},
|
||||
podLister: cache.StoreToPodLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})},
|
||||
rcLister: cache.StoreToReplicationControllerLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
|
||||
rsLister: cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
|
||||
dLister: cache.StoreToDeploymentLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})},
|
||||
ssLister: cache.StoreToStatefulSetLister{Store: cache.NewStore(controller.KeyFunc)},
|
||||
pdbLister: listers.StoreToPodDisruptionBudgetLister{Store: cache.NewStore(controller.KeyFunc)},
|
||||
podLister: listers.StoreToPodLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})},
|
||||
rcLister: listers.StoreToReplicationControllerLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
|
||||
rsLister: listers.StoreToReplicaSetLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
|
||||
dLister: listers.StoreToDeploymentLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})},
|
||||
ssLister: listers.StoreToStatefulSetLister{Store: cache.NewStore(controller.KeyFunc)},
|
||||
getUpdater: func() updater { return ps.Set },
|
||||
broadcaster: record.NewBroadcaster(),
|
||||
}
|
||||
|
@ -118,10 +119,10 @@ func newPodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*pol
|
|||
|
||||
pdb := &policy.PodDisruptionBudget{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
|
@ -141,11 +142,11 @@ func newPodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*pol
|
|||
func newPod(t *testing.T, name string) (*v1.Pod, string) {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Annotations: make(map[string]string),
|
||||
Name: name,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
|
@ -168,10 +169,10 @@ func newPod(t *testing.T, name string) (*v1.Pod, string) {
|
|||
func newReplicationController(t *testing.T, size int32) (*v1.ReplicationController, string) {
|
||||
rc := &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
|
@ -192,10 +193,10 @@ func newReplicationController(t *testing.T, size int32) (*v1.ReplicationControll
|
|||
func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) {
|
||||
d := &extensions.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
|
@ -216,10 +217,10 @@ func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) {
|
|||
func newReplicaSet(t *testing.T, size int32) (*extensions.ReplicaSet, string) {
|
||||
rs := &extensions.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
|
@ -240,10 +241,10 @@ func newReplicaSet(t *testing.T, size int32) (*extensions.ReplicaSet, string) {
|
|||
func newStatefulSet(t *testing.T, size int32) (*apps.StatefulSet, string) {
|
||||
ss := &apps.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD
generated
vendored
|
@ -21,6 +21,7 @@ go_library(
|
|||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
|
@ -49,12 +50,12 @@ go_test(
|
|||
"//pkg/api/v1/endpoints:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
"//vendor:k8s.io/client-go/util/testing",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
17
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go
generated
vendored
|
@ -36,6 +36,7 @@ import (
|
|||
utilpod "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
@ -83,11 +84,11 @@ func NewEndpointController(podInformer cache.SharedIndexInformer, client clients
|
|||
|
||||
e.serviceStore.Indexer, e.serviceController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Core().Services(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Core().Services(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Core().Services(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Core().Services(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Service{},
|
||||
|
@ -128,8 +129,8 @@ func NewEndpointControllerFromClient(client *clientset.Clientset, resyncPeriod c
|
|||
type EndpointController struct {
|
||||
client clientset.Interface
|
||||
|
||||
serviceStore cache.StoreToServiceLister
|
||||
podStore cache.StoreToPodLister
|
||||
serviceStore listers.StoreToServiceLister
|
||||
podStore listers.StoreToPodLister
|
||||
|
||||
// internalPodInformer is used to hold a personal informer. If we're using
|
||||
// a normal shared informer, then the informer will be started for us. If
|
||||
|
@ -451,7 +452,7 @@ func (e *EndpointController) syncService(key string) error {
|
|||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
currentEndpoints = &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: service.Name,
|
||||
Labels: service.Labels,
|
||||
},
|
||||
|
@ -502,7 +503,7 @@ func (e *EndpointController) syncService(key string) error {
|
|||
// some stragglers could have been left behind if the endpoint controller
|
||||
// reboots).
|
||||
func (e *EndpointController) checkLeftoverEndpoints() {
|
||||
list, err := e.client.Core().Endpoints(v1.NamespaceAll).List(v1.ListOptions{})
|
||||
list, err := e.client.Core().Endpoints(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Unable to list endpoints (%v); orphaned endpoints will not be cleaned up. (They're pretty harmless, but you can restart this component if you want another attempt made.)", err))
|
||||
return
|
||||
|
|
82
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller_test.go
generated
vendored
82
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller_test.go
generated
vendored
|
@ -24,16 +24,16 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
endptspkg "k8s.io/kubernetes/pkg/api/v1/endpoints"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
)
|
||||
|
||||
var alwaysReady = func() bool { return true }
|
||||
|
@ -43,7 +43,7 @@ func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotRea
|
|||
for i := 0; i < nPods+nNotReady; i++ {
|
||||
p := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
|
@ -93,10 +93,10 @@ func makeTestServer(t *testing.T, namespace string, endpointsResponse serverResp
|
|||
}
|
||||
|
||||
func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
|
||||
ns := v1.NamespaceDefault
|
||||
ns := metav1.NamespaceDefault
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -111,7 +111,7 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
|
|||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 80}}},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
|
@ -119,16 +119,16 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCheckLeftoverEndpoints(t *testing.T) {
|
||||
ns := v1.NamespaceDefault
|
||||
// Note that this requests *all* endpoints, therefore the NamespaceAll
|
||||
ns := metav1.NamespaceDefault
|
||||
// Note that this requests *all* endpoints, therefore metav1.NamespaceAll
|
||||
// below.
|
||||
testServer, _ := makeTestServer(t, v1.NamespaceAll,
|
||||
testServer, _ := makeTestServer(t, metav1.NamespaceAll,
|
||||
serverResponse{http.StatusOK, &v1.EndpointsList{
|
||||
ListMeta: metav1.ListMeta{
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Items: []v1.Endpoints{{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -158,7 +158,7 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) {
|
|||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -175,7 +175,7 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) {
|
|||
|
||||
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "TCP"}},
|
||||
|
@ -184,7 +184,7 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) {
|
|||
endpoints.syncService(ns + "/foo")
|
||||
endpointsHandler.ValidateRequestCount(t, 2)
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -201,7 +201,7 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) {
|
|||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -217,7 +217,7 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) {
|
|||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "UDP"}},
|
||||
|
@ -226,7 +226,7 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) {
|
|||
endpoints.syncService(ns + "/foo")
|
||||
endpointsHandler.ValidateRequestCount(t, 2)
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -243,7 +243,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
|
|||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -256,7 +256,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
|
|||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
|
@ -264,7 +264,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
|
|||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -281,7 +281,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) {
|
|||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -294,7 +294,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) {
|
|||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, ns, 0, 1, 1)
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
|
@ -302,7 +302,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) {
|
|||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -319,7 +319,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) {
|
|||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -332,7 +332,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) {
|
|||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, ns, 1, 1, 1)
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
|
@ -340,7 +340,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) {
|
|||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -358,7 +358,7 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) {
|
|||
ns := "bar"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -374,7 +374,7 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) {
|
|||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
|
@ -382,7 +382,7 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) {
|
|||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -396,10 +396,10 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) {
|
||||
ns := v1.NamespaceDefault
|
||||
testServer, endpointsHandler := makeTestServer(t, v1.NamespaceDefault,
|
||||
ns := metav1.NamespaceDefault
|
||||
testServer, endpointsHandler := makeTestServer(t, metav1.NamespaceDefault,
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "1",
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
|
@ -413,16 +413,16 @@ func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) {
|
|||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, v1.NamespaceDefault, 1, 1, 0)
|
||||
addPods(endpoints.podStore.Indexer, metav1.NamespaceDefault, 1, 1, 0)
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: v1.NamespaceDefault},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: metav1.NamespaceDefault},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", v1.NamespaceDefault, "foo"), "GET", nil)
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", metav1.NamespaceDefault, "foo"), "GET", nil)
|
||||
}
|
||||
|
||||
func TestSyncEndpointsItems(t *testing.T) {
|
||||
|
@ -436,7 +436,7 @@ func TestSyncEndpointsItems(t *testing.T) {
|
|||
addPods(endpoints.podStore.Indexer, ns, 3, 2, 0)
|
||||
addPods(endpoints.podStore.Indexer, "blah", 5, 2, 0) // make sure these aren't found!
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []v1.ServicePort{
|
||||
|
@ -458,7 +458,7 @@ func TestSyncEndpointsItems(t *testing.T) {
|
|||
},
|
||||
}}
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "",
|
||||
},
|
||||
Subsets: endptspkg.SortSubsets(expectedSubsets),
|
||||
|
@ -479,7 +479,7 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) {
|
|||
addPods(endpoints.podStore.Indexer, ns, 3, 2, 0)
|
||||
serviceLabels := map[string]string{"foo": "bar"}
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
Labels: serviceLabels,
|
||||
|
@ -505,7 +505,7 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) {
|
|||
},
|
||||
}}
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "",
|
||||
Labels: serviceLabels,
|
||||
},
|
||||
|
@ -520,7 +520,7 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) {
|
|||
ns := "bar"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
@ -540,7 +540,7 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) {
|
|||
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
|
||||
serviceLabels := map[string]string{"baz": "blah"}
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
Labels: serviceLabels,
|
||||
|
@ -552,7 +552,7 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) {
|
|||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD
generated
vendored
|
@ -18,12 +18,10 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/typed/dynamic:go_default_library",
|
||||
"//pkg/controller/garbagecollector/metaonly:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//pkg/util/workqueue:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
|
@ -41,6 +39,7 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -53,10 +52,8 @@ go_test(
|
|||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/install:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/typed/dynamic:go_default_library",
|
||||
"//pkg/controller/garbagecollector/metaonly:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/workqueue:go_default_library",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
|
@ -65,6 +62,8 @@ go_test(
|
|||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/json",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
21
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go
generated
vendored
21
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go
generated
vendored
|
@ -35,12 +35,11 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
)
|
||||
|
||||
|
@ -213,7 +212,7 @@ func referencesDiffs(old []metav1.OwnerReference, new []metav1.OwnerReference) (
|
|||
return added, removed
|
||||
}
|
||||
|
||||
func shouldOrphanDependents(e *event, accessor meta.Object) bool {
|
||||
func shouldOrphanDependents(e *event, accessor metav1.Object) bool {
|
||||
// The delta_fifo may combine the creation and update of the object into one
|
||||
// event, so we need to check AddEvent as well.
|
||||
if e.oldObj == nil {
|
||||
|
@ -451,24 +450,24 @@ type GarbageCollector struct {
|
|||
|
||||
func gcListWatcher(client *dynamic.Client, resource schema.GroupVersionResource) *cache.ListWatch {
|
||||
return &cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
// APIResource.Kind is not used by the dynamic client, so
|
||||
// leave it empty. We want to list this resource in all
|
||||
// namespaces if it's namespace scoped, so leave
|
||||
// APIResource.Namespaced as false is all right.
|
||||
apiResource := metav1.APIResource{Name: resource.Resource}
|
||||
return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback).
|
||||
Resource(&apiResource, v1.NamespaceAll).
|
||||
Resource(&apiResource, metav1.NamespaceAll).
|
||||
List(&options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
// APIResource.Kind is not used by the dynamic client, so
|
||||
// leave it empty. We want to list this resource in all
|
||||
// namespaces if it's namespace scoped, so leave
|
||||
// APIResource.Namespaced as false is all right.
|
||||
apiResource := metav1.APIResource{Name: resource.Resource}
|
||||
return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback).
|
||||
Resource(&apiResource, v1.NamespaceAll).
|
||||
Resource(&apiResource, metav1.NamespaceAll).
|
||||
Watch(&options)
|
||||
},
|
||||
}
|
||||
|
@ -625,8 +624,8 @@ func (gc *GarbageCollector) deleteObject(item objectReference) error {
|
|||
return err
|
||||
}
|
||||
uid := item.UID
|
||||
preconditions := v1.Preconditions{UID: &uid}
|
||||
deleteOptions := v1.DeleteOptions{Preconditions: &preconditions}
|
||||
preconditions := metav1.Preconditions{UID: &uid}
|
||||
deleteOptions := metav1.DeleteOptions{Preconditions: &preconditions}
|
||||
return client.Resource(resource, item.Namespace).Delete(item.Name, &deleteOptions)
|
||||
}
|
||||
|
||||
|
@ -660,7 +659,7 @@ func (gc *GarbageCollector) patchObject(item objectReference, patch []byte) (*un
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.Resource(resource, item.Namespace).Patch(item.Name, api.StrategicMergePatchType, patch)
|
||||
return client.Resource(resource, item.Namespace).Patch(item.Name, types.StrategicMergePatchType, patch)
|
||||
}
|
||||
|
||||
func objectReferenceToUnstructured(ref objectReference) *unstructured.Unstructured {
|
||||
|
@ -679,7 +678,7 @@ func objectReferenceToMetadataOnlyObject(ref objectReference) *metaonly.Metadata
|
|||
APIVersion: ref.APIVersion,
|
||||
Kind: ref.Kind,
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ref.Namespace,
|
||||
UID: ref.UID,
|
||||
Name: ref.Name,
|
||||
|
|
12
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector_test.go
generated
vendored
|
@ -33,12 +33,12 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
)
|
||||
|
||||
|
@ -126,7 +126,7 @@ func getPod(podName string, ownerReferences []metav1.OwnerReference) *v1.Pod {
|
|||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: "ns1",
|
||||
OwnerReferences: ownerReferences,
|
||||
|
@ -237,7 +237,7 @@ func createEvent(eventType eventType, selfUID string, owners []string) event {
|
|||
return event{
|
||||
eventType: eventType,
|
||||
obj: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(selfUID),
|
||||
OwnerReferences: ownerReferences,
|
||||
},
|
||||
|
@ -349,8 +349,8 @@ func TestGCListWatcher(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
lw := gcListWatcher(client, podResource)
|
||||
lw.Watch(v1.ListOptions{ResourceVersion: "1"})
|
||||
lw.List(v1.ListOptions{ResourceVersion: "1"})
|
||||
lw.Watch(metav1.ListOptions{ResourceVersion: "1"})
|
||||
lw.List(metav1.ListOptions{ResourceVersion: "1"})
|
||||
if e, a := 2, len(testHandler.actions); e != a {
|
||||
t.Errorf("expect %d requests, got %d", e, a)
|
||||
}
|
||||
|
|
1
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD
generated
vendored
|
@ -18,7 +18,6 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//vendor:github.com/ugorji/go/codec",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly.go
generated
vendored
|
@ -34,7 +34,7 @@ type metaOnlyJSONScheme struct{}
|
|||
|
||||
// This function can be extended to mapping different gvk to different MetadataOnlyObject,
|
||||
// which embedded with different version of ObjectMeta. Currently the system
|
||||
// only supports v1.ObjectMeta.
|
||||
// only supports metav1.ObjectMeta.
|
||||
func gvkToMetadataOnlyObject(gvk schema.GroupVersionKind) runtime.Object {
|
||||
if strings.HasSuffix(gvk.Kind, "List") {
|
||||
return &MetadataOnlyObjectList{}
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly_test.go
generated
vendored
|
@ -36,7 +36,7 @@ func getPod() *v1.Pod {
|
|||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{UID: "1234"},
|
||||
|
|
112
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/types.generated.go
generated
vendored
112
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/types.generated.go
generated
vendored
|
@ -26,8 +26,7 @@ import (
|
|||
"fmt"
|
||||
codec1978 "github.com/ugorji/go/codec"
|
||||
pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pkg3_types "k8s.io/apimachinery/pkg/types"
|
||||
pkg2_v1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
pkg2_types "k8s.io/apimachinery/pkg/types"
|
||||
"reflect"
|
||||
"runtime"
|
||||
time "time"
|
||||
|
@ -64,10 +63,9 @@ func init() {
|
|||
}
|
||||
if false { // reference the types, but skip this branch at build/run time
|
||||
var v0 pkg1_v1.TypeMeta
|
||||
var v1 pkg3_types.UID
|
||||
var v2 pkg2_v1.ObjectMeta
|
||||
var v3 time.Time
|
||||
_, _, _, _ = v0, v1, v2, v3
|
||||
var v1 pkg2_types.UID
|
||||
var v2 time.Time
|
||||
_, _, _ = v0, v1, v2
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -158,7 +156,13 @@ func (x *MetadataOnlyObject) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[2] {
|
||||
yy10 := &x.ObjectMeta
|
||||
yy10.CodecEncodeSelf(e)
|
||||
yym11 := z.EncBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(yy10) {
|
||||
} else {
|
||||
z.EncFallback(yy10)
|
||||
}
|
||||
} else {
|
||||
r.EncodeNil()
|
||||
}
|
||||
|
@ -168,7 +172,13 @@ func (x *MetadataOnlyObject) CodecEncodeSelf(e *codec1978.Encoder) {
|
|||
r.EncodeString(codecSelferC_UTF81234, string("metadata"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yy12 := &x.ObjectMeta
|
||||
yy12.CodecEncodeSelf(e)
|
||||
yym13 := z.EncBinary()
|
||||
_ = yym13
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(yy12) {
|
||||
} else {
|
||||
z.EncFallback(yy12)
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
|
@ -258,10 +268,16 @@ func (x *MetadataOnlyObject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder)
|
|||
}
|
||||
case "metadata":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.ObjectMeta = pkg2_v1.ObjectMeta{}
|
||||
x.ObjectMeta = pkg1_v1.ObjectMeta{}
|
||||
} else {
|
||||
yyv8 := &x.ObjectMeta
|
||||
yyv8.CodecDecodeSelf(d)
|
||||
yym9 := z.DecBinary()
|
||||
_ = yym9
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(yyv8) {
|
||||
} else {
|
||||
z.DecFallback(yyv8, false)
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
|
@ -274,16 +290,16 @@ func (x *MetadataOnlyObject) codecDecodeSelfFromArray(l int, d *codec1978.Decode
|
|||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj9 int
|
||||
var yyb9 bool
|
||||
var yyhl9 bool = l >= 0
|
||||
yyj9++
|
||||
if yyhl9 {
|
||||
yyb9 = yyj9 > l
|
||||
var yyj10 int
|
||||
var yyb10 bool
|
||||
var yyhl10 bool = l >= 0
|
||||
yyj10++
|
||||
if yyhl10 {
|
||||
yyb10 = yyj10 > l
|
||||
} else {
|
||||
yyb9 = r.CheckBreak()
|
||||
yyb10 = r.CheckBreak()
|
||||
}
|
||||
if yyb9 {
|
||||
if yyb10 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -291,21 +307,21 @@ func (x *MetadataOnlyObject) codecDecodeSelfFromArray(l int, d *codec1978.Decode
|
|||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv10 := &x.Kind
|
||||
yym11 := z.DecBinary()
|
||||
_ = yym11
|
||||
yyv11 := &x.Kind
|
||||
yym12 := z.DecBinary()
|
||||
_ = yym12
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv10)) = r.DecodeString()
|
||||
*((*string)(yyv11)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj9++
|
||||
if yyhl9 {
|
||||
yyb9 = yyj9 > l
|
||||
yyj10++
|
||||
if yyhl10 {
|
||||
yyb10 = yyj10 > l
|
||||
} else {
|
||||
yyb9 = r.CheckBreak()
|
||||
yyb10 = r.CheckBreak()
|
||||
}
|
||||
if yyb9 {
|
||||
if yyb10 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
|
@ -313,43 +329,49 @@ func (x *MetadataOnlyObject) codecDecodeSelfFromArray(l int, d *codec1978.Decode
|
|||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv12 := &x.APIVersion
|
||||
yym13 := z.DecBinary()
|
||||
_ = yym13
|
||||
yyv13 := &x.APIVersion
|
||||
yym14 := z.DecBinary()
|
||||
_ = yym14
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv12)) = r.DecodeString()
|
||||
*((*string)(yyv13)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj9++
|
||||
if yyhl9 {
|
||||
yyb9 = yyj9 > l
|
||||
yyj10++
|
||||
if yyhl10 {
|
||||
yyb10 = yyj10 > l
|
||||
} else {
|
||||
yyb9 = r.CheckBreak()
|
||||
yyb10 = r.CheckBreak()
|
||||
}
|
||||
if yyb9 {
|
||||
if yyb10 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.ObjectMeta = pkg2_v1.ObjectMeta{}
|
||||
x.ObjectMeta = pkg1_v1.ObjectMeta{}
|
||||
} else {
|
||||
yyv14 := &x.ObjectMeta
|
||||
yyv14.CodecDecodeSelf(d)
|
||||
yyv15 := &x.ObjectMeta
|
||||
yym16 := z.DecBinary()
|
||||
_ = yym16
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(yyv15) {
|
||||
} else {
|
||||
z.DecFallback(yyv15, false)
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj9++
|
||||
if yyhl9 {
|
||||
yyb9 = yyj9 > l
|
||||
yyj10++
|
||||
if yyhl10 {
|
||||
yyb10 = yyj10 > l
|
||||
} else {
|
||||
yyb9 = r.CheckBreak()
|
||||
yyb10 = r.CheckBreak()
|
||||
}
|
||||
if yyb9 {
|
||||
if yyb10 {
|
||||
break
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
z.DecStructFieldNotFound(yyj9-1, "")
|
||||
z.DecStructFieldNotFound(yyj10-1, "")
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
|
|
3
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/types.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/types.go
generated
vendored
|
@ -18,7 +18,6 @@ package metaonly
|
|||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
// MetadataOnlyObject allows decoding only the apiVersion, kind, and metadata fields of
|
||||
|
@ -27,7 +26,7 @@ import (
|
|||
type MetadataOnlyObject struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// +optional
|
||||
v1.ObjectMeta `json:"metadata,omitempty"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// MetadataOnlyObjectList allows decoding from JSON data only the typemeta and metadata of
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metrics.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metrics.go
generated
vendored
|
@ -20,7 +20,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/client-go/util/clock"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/informers/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/informers/BUILD
generated
vendored
|
@ -31,9 +31,11 @@ go_library(
|
|||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/listers/batch/v1:go_default_library",
|
||||
"//pkg/client/listers/core/internalversion:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
|
|
10
vendor/k8s.io/kubernetes/pkg/controller/informers/batch.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/informers/batch.go
generated
vendored
|
@ -20,9 +20,9 @@ import (
|
|||
"reflect"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
@ -61,11 +61,11 @@ func (f *jobInformer) Informer() cache.SharedIndexInformer {
|
|||
func NewJobInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Batch().Jobs(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.Batch().Jobs(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Batch().Jobs(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return client.Batch().Jobs(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&batch.Job{},
|
||||
|
|
126
vendor/k8s.io/kubernetes/pkg/controller/informers/core.go
generated
vendored
126
vendor/k8s.io/kubernetes/pkg/controller/informers/core.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
"reflect"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
@ -27,6 +28,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
coreinternallisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion"
|
||||
)
|
||||
|
||||
|
@ -34,7 +36,7 @@ import (
|
|||
// Interface provides constructor for informer and lister for pods
|
||||
type PodInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToPodLister
|
||||
Lister() *listers.StoreToPodLister
|
||||
}
|
||||
|
||||
type podInformer struct {
|
||||
|
@ -59,9 +61,9 @@ func (f *podInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
|
||||
// Lister returns lister for podInformer
|
||||
func (f *podInformer) Lister() *cache.StoreToPodLister {
|
||||
func (f *podInformer) Lister() *listers.StoreToPodLister {
|
||||
informer := f.Informer()
|
||||
return &cache.StoreToPodLister{Indexer: informer.GetIndexer()}
|
||||
return &listers.StoreToPodLister{Indexer: informer.GetIndexer()}
|
||||
}
|
||||
|
||||
//*****************************************************************************
|
||||
|
@ -70,7 +72,7 @@ func (f *podInformer) Lister() *cache.StoreToPodLister {
|
|||
// Interface provides constructor for informer and lister for namsespaces
|
||||
type NamespaceInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.IndexerToNamespaceLister
|
||||
Lister() *listers.IndexerToNamespaceLister
|
||||
}
|
||||
|
||||
type namespaceInformer struct {
|
||||
|
@ -95,9 +97,9 @@ func (f *namespaceInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
|
||||
// Lister returns lister for namespaceInformer
|
||||
func (f *namespaceInformer) Lister() *cache.IndexerToNamespaceLister {
|
||||
func (f *namespaceInformer) Lister() *listers.IndexerToNamespaceLister {
|
||||
informer := f.Informer()
|
||||
return &cache.IndexerToNamespaceLister{Indexer: informer.GetIndexer()}
|
||||
return &listers.IndexerToNamespaceLister{Indexer: informer.GetIndexer()}
|
||||
}
|
||||
|
||||
//*****************************************************************************
|
||||
|
@ -142,7 +144,7 @@ func (f *internalNamespaceInformer) Lister() coreinternallisters.NamespaceLister
|
|||
// Interface provides constructor for informer and lister for nodes
|
||||
type NodeInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToNodeLister
|
||||
Lister() *listers.StoreToNodeLister
|
||||
}
|
||||
|
||||
type nodeInformer struct {
|
||||
|
@ -167,9 +169,9 @@ func (f *nodeInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
|
||||
// Lister returns lister for nodeInformer
|
||||
func (f *nodeInformer) Lister() *cache.StoreToNodeLister {
|
||||
func (f *nodeInformer) Lister() *listers.StoreToNodeLister {
|
||||
informer := f.Informer()
|
||||
return &cache.StoreToNodeLister{Store: informer.GetStore()}
|
||||
return &listers.StoreToNodeLister{Store: informer.GetStore()}
|
||||
}
|
||||
|
||||
//*****************************************************************************
|
||||
|
@ -178,7 +180,7 @@ func (f *nodeInformer) Lister() *cache.StoreToNodeLister {
|
|||
// Interface provides constructor for informer and lister for persistent volume claims
|
||||
type PVCInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToPersistentVolumeClaimLister
|
||||
Lister() *listers.StoreToPersistentVolumeClaimLister
|
||||
}
|
||||
|
||||
type pvcInformer struct {
|
||||
|
@ -203,9 +205,9 @@ func (f *pvcInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
|
||||
// Lister returns lister for pvcInformer
|
||||
func (f *pvcInformer) Lister() *cache.StoreToPersistentVolumeClaimLister {
|
||||
func (f *pvcInformer) Lister() *listers.StoreToPersistentVolumeClaimLister {
|
||||
informer := f.Informer()
|
||||
return &cache.StoreToPersistentVolumeClaimLister{Indexer: informer.GetIndexer()}
|
||||
return &listers.StoreToPersistentVolumeClaimLister{Indexer: informer.GetIndexer()}
|
||||
}
|
||||
|
||||
//*****************************************************************************
|
||||
|
@ -214,7 +216,7 @@ func (f *pvcInformer) Lister() *cache.StoreToPersistentVolumeClaimLister {
|
|||
// Interface provides constructor for informer and lister for persistent volumes
|
||||
type PVInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToPVFetcher
|
||||
Lister() *listers.StoreToPVFetcher
|
||||
}
|
||||
|
||||
type pvInformer struct {
|
||||
|
@ -239,9 +241,9 @@ func (f *pvInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
|
||||
// Lister returns lister for pvInformer
|
||||
func (f *pvInformer) Lister() *cache.StoreToPVFetcher {
|
||||
func (f *pvInformer) Lister() *listers.StoreToPVFetcher {
|
||||
informer := f.Informer()
|
||||
return &cache.StoreToPVFetcher{Store: informer.GetStore()}
|
||||
return &listers.StoreToPVFetcher{Store: informer.GetStore()}
|
||||
}
|
||||
|
||||
//*****************************************************************************
|
||||
|
@ -250,7 +252,7 @@ func (f *pvInformer) Lister() *cache.StoreToPVFetcher {
|
|||
// Interface provides constructor for informer and lister for limit ranges.
|
||||
type LimitRangeInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToLimitRangeLister
|
||||
Lister() *listers.StoreToLimitRangeLister
|
||||
}
|
||||
|
||||
type limitRangeInformer struct {
|
||||
|
@ -275,9 +277,9 @@ func (f *limitRangeInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
|
||||
// Lister returns lister for limitRangeInformer
|
||||
func (f *limitRangeInformer) Lister() *cache.StoreToLimitRangeLister {
|
||||
func (f *limitRangeInformer) Lister() *listers.StoreToLimitRangeLister {
|
||||
informer := f.Informer()
|
||||
return &cache.StoreToLimitRangeLister{Indexer: informer.GetIndexer()}
|
||||
return &listers.StoreToLimitRangeLister{Indexer: informer.GetIndexer()}
|
||||
}
|
||||
|
||||
//*****************************************************************************
|
||||
|
@ -322,7 +324,7 @@ func (f *internalLimitRangeInformer) Lister() coreinternallisters.LimitRangeList
|
|||
// Interface provides constructor for informer and lister for replication controllers.
|
||||
type ReplicationControllerInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToReplicationControllerLister
|
||||
Lister() *listers.StoreToReplicationControllerLister
|
||||
}
|
||||
|
||||
type replicationControllerInformer struct {
|
||||
|
@ -347,9 +349,9 @@ func (f *replicationControllerInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
|
||||
// Lister returns lister for replicationControllerInformer
|
||||
func (f *replicationControllerInformer) Lister() *cache.StoreToReplicationControllerLister {
|
||||
func (f *replicationControllerInformer) Lister() *listers.StoreToReplicationControllerLister {
|
||||
informer := f.Informer()
|
||||
return &cache.StoreToReplicationControllerLister{Indexer: informer.GetIndexer()}
|
||||
return &listers.StoreToReplicationControllerLister{Indexer: informer.GetIndexer()}
|
||||
}
|
||||
|
||||
//*****************************************************************************
|
||||
|
@ -358,11 +360,11 @@ func (f *replicationControllerInformer) Lister() *cache.StoreToReplicationContro
|
|||
func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().Pods(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().Pods(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().Pods(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().Pods(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Pod{},
|
||||
|
@ -377,10 +379,10 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) cach
|
|||
func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().Nodes().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().Nodes().Watch(options)
|
||||
},
|
||||
},
|
||||
|
@ -395,11 +397,11 @@ func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cac
|
|||
func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().PersistentVolumeClaims(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().PersistentVolumeClaims(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().PersistentVolumeClaims(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().PersistentVolumeClaims(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.PersistentVolumeClaim{},
|
||||
|
@ -414,10 +416,10 @@ func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) cach
|
|||
func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().PersistentVolumes().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().PersistentVolumes().Watch(options)
|
||||
},
|
||||
},
|
||||
|
@ -432,10 +434,10 @@ func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) cache
|
|||
func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().Namespaces().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().Namespaces().Watch(options)
|
||||
},
|
||||
},
|
||||
|
@ -450,15 +452,11 @@ func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration
|
|||
func NewInternalNamespaceInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return client.Core().Namespaces().List(internalOptions)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().Namespaces().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return client.Core().Namespaces().Watch(internalOptions)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().Namespaces().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Namespace{},
|
||||
|
@ -472,11 +470,11 @@ func NewInternalNamespaceInformer(client internalclientset.Interface, resyncPeri
|
|||
func NewLimitRangeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().LimitRanges(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().LimitRanges(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().LimitRanges(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().LimitRanges(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.LimitRange{},
|
||||
|
@ -490,15 +488,11 @@ func NewLimitRangeInformer(client clientset.Interface, resyncPeriod time.Duratio
|
|||
func NewInternalLimitRangeInformer(internalclient internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return internalclient.Core().LimitRanges(v1.NamespaceAll).List(internalOptions)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return internalclient.Core().LimitRanges(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return internalclient.Core().LimitRanges(v1.NamespaceAll).Watch(internalOptions)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return internalclient.Core().LimitRanges(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.LimitRange{},
|
||||
|
@ -512,11 +506,11 @@ func NewInternalLimitRangeInformer(internalclient internalclientset.Interface, r
|
|||
func NewReplicationControllerInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().ReplicationControllers(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().ReplicationControllers(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().ReplicationControllers(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().ReplicationControllers(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.ReplicationController{},
|
||||
|
@ -533,7 +527,7 @@ func NewReplicationControllerInformer(client clientset.Interface, resyncPeriod t
|
|||
// Interface provides constructor for informer and lister for ServiceAccounts
|
||||
type ServiceAccountInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToServiceAccountLister
|
||||
Lister() *listers.StoreToServiceAccountLister
|
||||
}
|
||||
|
||||
type serviceAccountInformer struct {
|
||||
|
@ -558,20 +552,20 @@ func (f *serviceAccountInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
|
||||
// Lister returns lister for ServiceAccountInformer
|
||||
func (f *serviceAccountInformer) Lister() *cache.StoreToServiceAccountLister {
|
||||
func (f *serviceAccountInformer) Lister() *listers.StoreToServiceAccountLister {
|
||||
informer := f.Informer()
|
||||
return &cache.StoreToServiceAccountLister{Indexer: informer.GetIndexer()}
|
||||
return &listers.StoreToServiceAccountLister{Indexer: informer.GetIndexer()}
|
||||
}
|
||||
|
||||
// NewServiceAccountInformer returns a SharedIndexInformer that lists and watches all ServiceAccounts
|
||||
func NewServiceAccountInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().ServiceAccounts(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().ServiceAccounts(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().ServiceAccounts(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().ServiceAccounts(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.ServiceAccount{},
|
||||
|
|
45
vendor/k8s.io/kubernetes/pkg/controller/informers/extensions.go
generated
vendored
45
vendor/k8s.io/kubernetes/pkg/controller/informers/extensions.go
generated
vendored
|
@ -19,18 +19,19 @@ package informers
|
|||
import (
|
||||
"reflect"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
)
|
||||
|
||||
// DaemonSetInformer is type of SharedIndexInformer which watches and lists all pods.
|
||||
// Interface provides constructor for informer and lister for pods
|
||||
type DaemonSetInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToDaemonSetLister
|
||||
Lister() *listers.StoreToDaemonSetLister
|
||||
}
|
||||
|
||||
type daemonSetInformer struct {
|
||||
|
@ -48,11 +49,11 @@ func (f *daemonSetInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Extensions().DaemonSets(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Extensions().DaemonSets(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Extensions().DaemonSets(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Extensions().DaemonSets(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.DaemonSet{},
|
||||
|
@ -64,15 +65,15 @@ func (f *daemonSetInformer) Informer() cache.SharedIndexInformer {
|
|||
return informer
|
||||
}
|
||||
|
||||
func (f *daemonSetInformer) Lister() *cache.StoreToDaemonSetLister {
|
||||
func (f *daemonSetInformer) Lister() *listers.StoreToDaemonSetLister {
|
||||
informer := f.Informer()
|
||||
return &cache.StoreToDaemonSetLister{Store: informer.GetIndexer()}
|
||||
return &listers.StoreToDaemonSetLister{Store: informer.GetIndexer()}
|
||||
}
|
||||
|
||||
// DeploymentInformer is a type of SharedIndexInformer which watches and lists all deployments.
|
||||
type DeploymentInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToDeploymentLister
|
||||
Lister() *listers.StoreToDeploymentLister
|
||||
}
|
||||
|
||||
type deploymentInformer struct {
|
||||
|
@ -90,11 +91,11 @@ func (f *deploymentInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Extensions().Deployments(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Extensions().Deployments(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Extensions().Deployments(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Extensions().Deployments(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.Deployment{},
|
||||
|
@ -106,15 +107,15 @@ func (f *deploymentInformer) Informer() cache.SharedIndexInformer {
|
|||
return informer
|
||||
}
|
||||
|
||||
func (f *deploymentInformer) Lister() *cache.StoreToDeploymentLister {
|
||||
func (f *deploymentInformer) Lister() *listers.StoreToDeploymentLister {
|
||||
informer := f.Informer()
|
||||
return &cache.StoreToDeploymentLister{Indexer: informer.GetIndexer()}
|
||||
return &listers.StoreToDeploymentLister{Indexer: informer.GetIndexer()}
|
||||
}
|
||||
|
||||
// ReplicaSetInformer is a type of SharedIndexInformer which watches and lists all replicasets.
|
||||
type ReplicaSetInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() *cache.StoreToReplicaSetLister
|
||||
Lister() *listers.StoreToReplicaSetLister
|
||||
}
|
||||
|
||||
type replicaSetInformer struct {
|
||||
|
@ -132,11 +133,11 @@ func (f *replicaSetInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Extensions().ReplicaSets(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Extensions().ReplicaSets(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Extensions().ReplicaSets(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Extensions().ReplicaSets(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.ReplicaSet{},
|
||||
|
@ -148,7 +149,7 @@ func (f *replicaSetInformer) Informer() cache.SharedIndexInformer {
|
|||
return informer
|
||||
}
|
||||
|
||||
func (f *replicaSetInformer) Lister() *cache.StoreToReplicaSetLister {
|
||||
func (f *replicaSetInformer) Lister() *listers.StoreToReplicaSetLister {
|
||||
informer := f.Informer()
|
||||
return &cache.StoreToReplicaSetLister{Indexer: informer.GetIndexer()}
|
||||
return &listers.StoreToReplicaSetLister{Indexer: informer.GetIndexer()}
|
||||
}
|
||||
|
|
55
vendor/k8s.io/kubernetes/pkg/controller/informers/rbac.go
generated
vendored
55
vendor/k8s.io/kubernetes/pkg/controller/informers/rbac.go
generated
vendored
|
@ -19,17 +19,18 @@ package informers
|
|||
import (
|
||||
"reflect"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
)
|
||||
|
||||
type ClusterRoleInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() cache.ClusterRoleLister
|
||||
Lister() listers.ClusterRoleLister
|
||||
}
|
||||
|
||||
type clusterRoleInformer struct {
|
||||
|
@ -47,10 +48,10 @@ func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return f.internalclient.Rbac().ClusterRoles().List(convertListOptionsOrDie(options))
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return f.internalclient.Rbac().ClusterRoles().Watch(convertListOptionsOrDie(options))
|
||||
},
|
||||
},
|
||||
|
@ -63,13 +64,13 @@ func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer {
|
|||
return informer
|
||||
}
|
||||
|
||||
func (f *clusterRoleInformer) Lister() cache.ClusterRoleLister {
|
||||
return cache.NewClusterRoleLister(f.Informer().GetIndexer())
|
||||
func (f *clusterRoleInformer) Lister() listers.ClusterRoleLister {
|
||||
return listers.NewClusterRoleLister(f.Informer().GetIndexer())
|
||||
}
|
||||
|
||||
type ClusterRoleBindingInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() cache.ClusterRoleBindingLister
|
||||
Lister() listers.ClusterRoleBindingLister
|
||||
}
|
||||
|
||||
type clusterRoleBindingInformer struct {
|
||||
|
@ -87,10 +88,10 @@ func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return f.internalclient.Rbac().ClusterRoleBindings().List(convertListOptionsOrDie(options))
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return f.internalclient.Rbac().ClusterRoleBindings().Watch(convertListOptionsOrDie(options))
|
||||
},
|
||||
},
|
||||
|
@ -103,13 +104,13 @@ func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer {
|
|||
return informer
|
||||
}
|
||||
|
||||
func (f *clusterRoleBindingInformer) Lister() cache.ClusterRoleBindingLister {
|
||||
return cache.NewClusterRoleBindingLister(f.Informer().GetIndexer())
|
||||
func (f *clusterRoleBindingInformer) Lister() listers.ClusterRoleBindingLister {
|
||||
return listers.NewClusterRoleBindingLister(f.Informer().GetIndexer())
|
||||
}
|
||||
|
||||
type RoleInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() cache.RoleLister
|
||||
Lister() listers.RoleLister
|
||||
}
|
||||
|
||||
type roleInformer struct {
|
||||
|
@ -127,11 +128,11 @@ func (f *roleInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return f.internalclient.Rbac().Roles(v1.NamespaceAll).List(convertListOptionsOrDie(options))
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return f.internalclient.Rbac().Roles(metav1.NamespaceAll).List(convertListOptionsOrDie(options))
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return f.internalclient.Rbac().Roles(v1.NamespaceAll).Watch(convertListOptionsOrDie(options))
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return f.internalclient.Rbac().Roles(metav1.NamespaceAll).Watch(convertListOptionsOrDie(options))
|
||||
},
|
||||
},
|
||||
&rbac.Role{},
|
||||
|
@ -143,13 +144,13 @@ func (f *roleInformer) Informer() cache.SharedIndexInformer {
|
|||
return informer
|
||||
}
|
||||
|
||||
func (f *roleInformer) Lister() cache.RoleLister {
|
||||
return cache.NewRoleLister(f.Informer().GetIndexer())
|
||||
func (f *roleInformer) Lister() listers.RoleLister {
|
||||
return listers.NewRoleLister(f.Informer().GetIndexer())
|
||||
}
|
||||
|
||||
type RoleBindingInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() cache.RoleBindingLister
|
||||
Lister() listers.RoleBindingLister
|
||||
}
|
||||
|
||||
type roleBindingInformer struct {
|
||||
|
@ -167,11 +168,11 @@ func (f *roleBindingInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return f.internalclient.Rbac().RoleBindings(v1.NamespaceAll).List(convertListOptionsOrDie(options))
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return f.internalclient.Rbac().RoleBindings(metav1.NamespaceAll).List(convertListOptionsOrDie(options))
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return f.internalclient.Rbac().RoleBindings(v1.NamespaceAll).Watch(convertListOptionsOrDie(options))
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return f.internalclient.Rbac().RoleBindings(metav1.NamespaceAll).Watch(convertListOptionsOrDie(options))
|
||||
},
|
||||
},
|
||||
&rbac.RoleBinding{},
|
||||
|
@ -183,12 +184,12 @@ func (f *roleBindingInformer) Informer() cache.SharedIndexInformer {
|
|||
return informer
|
||||
}
|
||||
|
||||
func (f *roleBindingInformer) Lister() cache.RoleBindingLister {
|
||||
return cache.NewRoleBindingLister(f.Informer().GetIndexer())
|
||||
func (f *roleBindingInformer) Lister() listers.RoleBindingLister {
|
||||
return listers.NewRoleBindingLister(f.Informer().GetIndexer())
|
||||
}
|
||||
|
||||
func convertListOptionsOrDie(in v1.ListOptions) api.ListOptions {
|
||||
out := api.ListOptions{}
|
||||
func convertListOptionsOrDie(in metav1.ListOptions) metav1.ListOptions {
|
||||
out := metav1.ListOptions{}
|
||||
if err := api.Scheme.Convert(&in, &out, nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
13
vendor/k8s.io/kubernetes/pkg/controller/informers/storage.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/controller/informers/storage.go
generated
vendored
|
@ -19,18 +19,19 @@ package informers
|
|||
import (
|
||||
"reflect"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
)
|
||||
|
||||
// StorageClassInformer is type of SharedIndexInformer which watches and lists all storage classes.
|
||||
// Interface provides constructor for informer and lister for storage classes
|
||||
type StorageClassInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() cache.StorageClassLister
|
||||
Lister() listers.StorageClassLister
|
||||
}
|
||||
|
||||
type storageClassInformer struct {
|
||||
|
@ -48,10 +49,10 @@ func (f *storageClassInformer) Informer() cache.SharedIndexInformer {
|
|||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Storage().StorageClasses().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Storage().StorageClasses().Watch(options)
|
||||
},
|
||||
},
|
||||
|
@ -64,7 +65,7 @@ func (f *storageClassInformer) Informer() cache.SharedIndexInformer {
|
|||
return informer
|
||||
}
|
||||
|
||||
func (f *storageClassInformer) Lister() cache.StorageClassLister {
|
||||
func (f *storageClassInformer) Lister() listers.StorageClassLister {
|
||||
informer := f.Informer()
|
||||
return cache.NewStorageClassLister(informer.GetIndexer())
|
||||
return listers.NewStorageClassLister(informer.GetIndexer())
|
||||
}
|
||||
|
|
3
vendor/k8s.io/kubernetes/pkg/controller/job/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/job/BUILD
generated
vendored
|
@ -22,6 +22,7 @@ go_library(
|
|||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/listers/batch/v1:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
|
@ -51,7 +52,6 @@ go_test(
|
|||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/testing/core:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
|
@ -59,6 +59,7 @@ go_test(
|
|||
"//vendor:k8s.io/apimachinery/pkg/util/rand",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
3
vendor/k8s.io/kubernetes/pkg/controller/job/jobcontroller.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/job/jobcontroller.go
generated
vendored
|
@ -32,6 +32,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
batchv1listers "k8s.io/kubernetes/pkg/client/listers/batch/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
@ -63,7 +64,7 @@ type JobController struct {
|
|||
jobLister batchv1listers.JobLister
|
||||
|
||||
// A store of pods, populated by the podController
|
||||
podStore cache.StoreToPodLister
|
||||
podStore listers.StoreToPodLister
|
||||
|
||||
// Jobs that need to be updated
|
||||
queue workqueue.RateLimitingInterface
|
||||
|
|
22
vendor/k8s.io/kubernetes/pkg/controller/job/jobcontroller_test.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/controller/job/jobcontroller_test.go
generated
vendored
|
@ -25,13 +25,13 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
|
@ -41,16 +41,16 @@ var alwaysReady = func() bool { return true }
|
|||
|
||||
func newJob(parallelism, completions int32) *batch.Job {
|
||||
j := &batch.Job{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foobar",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
|
@ -99,7 +99,7 @@ func newPodList(count int32, status v1.PodPhase, job *batch.Job) []v1.Pod {
|
|||
pods := []v1.Pod{}
|
||||
for i := int32(0); i < count; i++ {
|
||||
newPod := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%v", rand.String(10)),
|
||||
Labels: job.Spec.Selector.MatchLabels,
|
||||
Namespace: job.Namespace,
|
||||
|
@ -523,17 +523,17 @@ func TestJobPodLookup(t *testing.T) {
|
|||
// pods without labels don't match any job
|
||||
{
|
||||
job: &batch.Job{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "basic"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "basic"},
|
||||
},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo1", Namespace: v1.NamespaceAll},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: metav1.NamespaceAll},
|
||||
},
|
||||
expectedName: "",
|
||||
},
|
||||
// matching labels, different namespace
|
||||
{
|
||||
job: &batch.Job{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
|
||||
Spec: batch.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
|
@ -541,7 +541,7 @@ func TestJobPodLookup(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo2",
|
||||
Namespace: "ns",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
|
@ -552,7 +552,7 @@ func TestJobPodLookup(t *testing.T) {
|
|||
// matching ns and labels returns
|
||||
{
|
||||
job: &batch.Job{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
Spec: batch.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
|
@ -566,7 +566,7 @@ func TestJobPodLookup(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo3",
|
||||
Namespace: "ns",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go
generated
vendored
|
@ -21,12 +21,12 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
)
|
||||
|
||||
type objectWithMeta interface {
|
||||
meta.Object
|
||||
metav1.Object
|
||||
}
|
||||
|
||||
// keyFunc returns the key of an object, which is used to look up in the cache for it's matching object.
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/namespace/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/namespace/BUILD
generated
vendored
|
@ -48,7 +48,6 @@ go_test(
|
|||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/testing/core:go_default_library",
|
||||
"//pkg/client/typed/discovery:go_default_library",
|
||||
"//pkg/client/typed/dynamic:go_default_library",
|
||||
|
@ -57,6 +56,7 @@ go_test(
|
|||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller.go
generated
vendored
|
@ -129,10 +129,10 @@ func NewNamespaceController(
|
|||
// configure the backing store/controller
|
||||
store, controller := cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Core().Namespaces().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return kubeClient.Core().Namespaces().Watch(options)
|
||||
},
|
||||
},
|
||||
|
|
10
vendor/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller_test.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller_test.go
generated
vendored
|
@ -30,11 +30,11 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/client/typed/discovery"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
|
@ -58,7 +58,7 @@ func TestFinalized(t *testing.T) {
|
|||
func TestFinalizeNamespaceFunc(t *testing.T) {
|
||||
mockClient := &fake.Clientset{}
|
||||
testNamespace := &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
|
@ -87,7 +87,7 @@ func testSyncNamespaceThatIsTerminating(t *testing.T, versions *metav1.APIVersio
|
|||
now := metav1.Now()
|
||||
namespaceName := "test"
|
||||
testNamespacePendingFinalize := &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespaceName,
|
||||
ResourceVersion: "1",
|
||||
DeletionTimestamp: &now,
|
||||
|
@ -100,7 +100,7 @@ func testSyncNamespaceThatIsTerminating(t *testing.T, versions *metav1.APIVersio
|
|||
},
|
||||
}
|
||||
testNamespaceFinalizeComplete := &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespaceName,
|
||||
ResourceVersion: "1",
|
||||
DeletionTimestamp: &now,
|
||||
|
@ -233,7 +233,7 @@ func TestSyncNamespaceThatIsTerminatingV1Beta1(t *testing.T) {
|
|||
func TestSyncNamespaceThatIsActive(t *testing.T) {
|
||||
mockClient := &fake.Clientset{}
|
||||
testNamespace := &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
|
|
10
vendor/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller_utils.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller_utils.go
generated
vendored
|
@ -178,7 +178,7 @@ func deleteCollection(
|
|||
// resource deletions generically. it will ensure all resources in the namespace are purged prior to releasing
|
||||
// namespace itself.
|
||||
orphanDependents := false
|
||||
err := dynamicClient.Resource(&apiResource, namespace).DeleteCollection(&v1.DeleteOptions{OrphanDependents: &orphanDependents}, &v1.ListOptions{})
|
||||
err := dynamicClient.Resource(&apiResource, namespace).DeleteCollection(&metav1.DeleteOptions{OrphanDependents: &orphanDependents}, &metav1.ListOptions{})
|
||||
|
||||
if err == nil {
|
||||
return true, nil
|
||||
|
@ -220,7 +220,7 @@ func listCollection(
|
|||
}
|
||||
|
||||
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
|
||||
obj, err := dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{})
|
||||
obj, err := dynamicClient.Resource(&apiResource, namespace).List(&metav1.ListOptions{})
|
||||
if err == nil {
|
||||
unstructuredList, ok := obj.(*unstructured.UnstructuredList)
|
||||
if !ok {
|
||||
|
@ -406,10 +406,10 @@ func syncNamespace(
|
|||
|
||||
// if the namespace is already finalized, delete it
|
||||
if finalized(namespace) {
|
||||
var opts *v1.DeleteOptions
|
||||
var opts *metav1.DeleteOptions
|
||||
uid := namespace.UID
|
||||
if len(uid) > 0 {
|
||||
opts = &v1.DeleteOptions{Preconditions: &v1.Preconditions{UID: &uid}}
|
||||
opts = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}
|
||||
}
|
||||
err = kubeClient.Core().Namespaces().Delete(namespace.Name, opts)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
|
@ -486,7 +486,7 @@ func estimateGracefulTermination(kubeClient clientset.Interface, groupVersionRes
|
|||
func estimateGracefulTerminationForPods(kubeClient clientset.Interface, ns string) (int64, error) {
|
||||
glog.V(5).Infof("namespace controller - estimateGracefulTerminationForPods - namespace %s", ns)
|
||||
estimate := int64(0)
|
||||
items, err := kubeClient.Core().Pods(ns).List(v1.ListOptions{})
|
||||
items, err := kubeClient.Core().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return estimate, err
|
||||
}
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/controller/node/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/node/BUILD
generated
vendored
|
@ -26,12 +26,11 @@ go_library(
|
|||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
|
@ -40,12 +39,14 @@ go_library(
|
|||
"//vendor:github.com/prometheus/client_golang/prometheus",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -73,7 +74,6 @@ go_test(
|
|||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/controller/node/testutil:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
|
@ -81,6 +81,7 @@ go_test(
|
|||
"//vendor:k8s.io/apimachinery/pkg/util/diff",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
15
vendor/k8s.io/kubernetes/pkg/controller/node/cidr_allocator_test.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/controller/node/cidr_allocator_test.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
|
@ -55,7 +56,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
|
@ -75,7 +76,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
|
@ -99,7 +100,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
|
@ -185,7 +186,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
|
@ -268,7 +269,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
|
@ -291,7 +292,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
|
@ -359,7 +360,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
|||
|
||||
for _, cidrToRelease := range tc.cidrsToRelease {
|
||||
nodeToRelease := v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
}
|
||||
|
|
16
vendor/k8s.io/kubernetes/pkg/controller/node/controller_utils.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/controller/node/controller_utils.go
generated
vendored
|
@ -21,6 +21,8 @@ import (
|
|||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
@ -28,9 +30,9 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
|
@ -45,11 +47,11 @@ const (
|
|||
|
||||
// deletePods will delete all pods from master running on given node, and return true
|
||||
// if any pods were deleted, or were found pending deletion.
|
||||
func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore cache.StoreToDaemonSetLister) (bool, error) {
|
||||
func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore listers.StoreToDaemonSetLister) (bool, error) {
|
||||
remaining := false
|
||||
selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String()
|
||||
options := v1.ListOptions{FieldSelector: selector}
|
||||
pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(options)
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
pods, err := kubeClient.Core().Pods(metav1.NamespaceAll).List(options)
|
||||
var updateErrList []error
|
||||
|
||||
if err != nil {
|
||||
|
@ -120,7 +122,7 @@ func setPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
|
|||
func forcefullyDeletePod(c clientset.Interface, pod *v1.Pod) error {
|
||||
var zero int64
|
||||
glog.Infof("NodeController is force deleting Pod: %v:%v", pod.Namespace, pod.Name)
|
||||
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
if err == nil {
|
||||
glog.V(4).Infof("forceful deletion of %s succeeded", pod.Name)
|
||||
}
|
||||
|
@ -202,8 +204,8 @@ func markAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
|
|||
}
|
||||
nodeName := node.Name
|
||||
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
|
||||
opts := v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
|
||||
pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(opts)
|
||||
opts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
|
||||
pods, err := kubeClient.Core().Pods(metav1.NamespaceAll).List(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
13
vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go
generated
vendored
|
@ -25,20 +25,21 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
|
@ -135,9 +136,9 @@ type NodeController struct {
|
|||
nodeInformer informers.NodeInformer
|
||||
daemonSetInformer informers.DaemonSetInformer
|
||||
|
||||
podStore cache.StoreToPodLister
|
||||
nodeStore cache.StoreToNodeLister
|
||||
daemonSetStore cache.StoreToDaemonSetLister
|
||||
podStore listers.StoreToPodLister
|
||||
nodeStore listers.StoreToNodeLister
|
||||
daemonSetStore listers.StoreToDaemonSetLister
|
||||
// allocate/recycle CIDRs for node if allocateNodeCIDRs == true
|
||||
cidrAllocator CIDRAllocator
|
||||
|
||||
|
@ -252,7 +253,7 @@ func NewNodeController(
|
|||
// We must poll because apiserver might not be up. This error causes
|
||||
// controller manager to restart.
|
||||
if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) {
|
||||
nodeList, err = kubeClient.Core().Nodes().List(v1.ListOptions{
|
||||
nodeList, err = kubeClient.Core().Nodes().List(metav1.ListOptions{
|
||||
FieldSelector: fields.Everything().String(),
|
||||
LabelSelector: labels.Everything().String(),
|
||||
})
|
||||
|
|
116
vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller_test.go
generated
vendored
116
vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller_test.go
generated
vendored
|
@ -80,7 +80,7 @@ func NewNodeControllerFromClient(
|
|||
}
|
||||
|
||||
func syncNodeStore(nc *NodeController, fakeNodeHandler *testutil.FakeNodeHandler) error {
|
||||
nodes, err := fakeNodeHandler.List(v1.ListOptions{})
|
||||
nodes, err := fakeNodeHandler.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: fakeNow,
|
||||
Labels: map[string]string{
|
||||
|
@ -134,7 +134,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -168,7 +168,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -188,7 +188,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -232,7 +232,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -252,7 +252,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -276,7 +276,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
&v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod0",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{"daemon": "yes"},
|
||||
|
@ -291,7 +291,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
daemonSets: []extensions.DaemonSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ds0",
|
||||
Namespace: "default",
|
||||
},
|
||||
|
@ -323,7 +323,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -343,7 +343,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -387,7 +387,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -407,7 +407,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -451,7 +451,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -471,7 +471,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -594,7 +594,7 @@ func TestPodStatusChange(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -614,7 +614,7 @@ func TestPodStatusChange(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -749,7 +749,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
{
|
||||
nodeList: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -769,7 +769,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -804,7 +804,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
{
|
||||
nodeList: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -824,7 +824,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -866,7 +866,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
{
|
||||
nodeList: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -886,7 +886,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -927,7 +927,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
{
|
||||
nodeList: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -947,7 +947,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-master",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -986,7 +986,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
{
|
||||
nodeList: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1006,7 +1006,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1048,7 +1048,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
{
|
||||
nodeList: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1068,7 +1068,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1088,7 +1088,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1108,7 +1108,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node3",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1128,7 +1128,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node4",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
|
@ -1245,7 +1245,7 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
|
|||
fnh := &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1309,7 +1309,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1320,7 +1320,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
expectedRequestCount: 2, // List+Update
|
||||
expectedNodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1353,7 +1353,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: fakeNow,
|
||||
},
|
||||
|
@ -1370,7 +1370,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1429,7 +1429,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
},
|
||||
expectedNodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1469,7 +1469,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1547,7 +1547,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: fakeNow,
|
||||
},
|
||||
|
@ -1563,7 +1563,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1597,7 +1597,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1667,7 +1667,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
|
@ -1772,7 +1772,7 @@ func TestNodeEventGeneration(t *testing.T) {
|
|||
fakeNodeHandler := &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
UID: "1234567890",
|
||||
CreationTimestamp: metav1.Date(2015, 8, 10, 0, 0, 0, 0, time.UTC),
|
||||
|
@ -1839,70 +1839,70 @@ func TestCheckPod(t *testing.T) {
|
|||
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: nil},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: nil},
|
||||
Spec: v1.PodSpec{NodeName: "new"},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: nil},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: nil},
|
||||
Spec: v1.PodSpec{NodeName: "old"},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: nil},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: nil},
|
||||
Spec: v1.PodSpec{NodeName: ""},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: nil},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: nil},
|
||||
Spec: v1.PodSpec{NodeName: "nonexistant"},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "new"},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "old"},
|
||||
},
|
||||
prune: true,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "older"},
|
||||
},
|
||||
prune: true,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "oldest"},
|
||||
},
|
||||
prune: true,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: ""},
|
||||
},
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{}},
|
||||
Spec: v1.PodSpec{NodeName: "nonexistant"},
|
||||
},
|
||||
prune: false,
|
||||
|
@ -1912,7 +1912,7 @@ func TestCheckPod(t *testing.T) {
|
|||
nc, _ := NewNodeControllerFromClient(nil, fake.NewSimpleClientset(), 0, 0, 0, 0, 0, 0, 0, 0, nil, nil, 0, false)
|
||||
nc.nodeStore.Store = cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
nc.nodeStore.Store.Add(&v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "new",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
|
@ -1922,7 +1922,7 @@ func TestCheckPod(t *testing.T) {
|
|||
},
|
||||
})
|
||||
nc.nodeStore.Store.Add(&v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "old",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
|
@ -1932,7 +1932,7 @@ func TestCheckPod(t *testing.T) {
|
|||
},
|
||||
})
|
||||
nc.nodeStore.Store.Add(&v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "older",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
|
@ -1942,7 +1942,7 @@ func TestCheckPod(t *testing.T) {
|
|||
},
|
||||
})
|
||||
nc.nodeStore.Store.Add(&v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "oldest",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue.go
generated
vendored
|
@ -22,7 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue_test.go
generated
vendored
|
@ -22,7 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
)
|
||||
|
||||
func CheckQueueEq(lhs []string, rhs TimedQueue) bool {
|
||||
|
|
3
vendor/k8s.io/kubernetes/pkg/controller/node/testutil/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/node/testutil/BUILD
generated
vendored
|
@ -17,13 +17,14 @@ go_library(
|
|||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
23
vendor/k8s.io/kubernetes/pkg/controller/node/testutil/test_utils.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/controller/node/testutil/test_utils.go
generated
vendored
|
@ -25,14 +25,15 @@ import (
|
|||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
|
@ -129,7 +130,7 @@ func (m *FakeNodeHandler) Get(name string, opts metav1.GetOptions) (*v1.Node, er
|
|||
}
|
||||
|
||||
// List returns a list of Nodes from the fake store.
|
||||
func (m *FakeNodeHandler) List(opts v1.ListOptions) (*v1.NodeList, error) {
|
||||
func (m *FakeNodeHandler) List(opts metav1.ListOptions) (*v1.NodeList, error) {
|
||||
m.lock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
|
@ -159,7 +160,7 @@ func (m *FakeNodeHandler) List(opts v1.ListOptions) (*v1.NodeList, error) {
|
|||
}
|
||||
|
||||
// Delete delets a Node from the fake store.
|
||||
func (m *FakeNodeHandler) Delete(id string, opt *v1.DeleteOptions) error {
|
||||
func (m *FakeNodeHandler) Delete(id string, opt *metav1.DeleteOptions) error {
|
||||
m.lock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
|
@ -173,7 +174,7 @@ func (m *FakeNodeHandler) Delete(id string, opt *v1.DeleteOptions) error {
|
|||
}
|
||||
|
||||
// DeleteCollection deletes a collection of Nodes from the fake store.
|
||||
func (m *FakeNodeHandler) DeleteCollection(opt *v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
func (m *FakeNodeHandler) DeleteCollection(opt *metav1.DeleteOptions, listOpts metav1.ListOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -214,12 +215,12 @@ func (m *FakeNodeHandler) PatchStatus(nodeName string, data []byte) (*v1.Node, e
|
|||
}
|
||||
|
||||
// Watch watches Nodes in a fake store.
|
||||
func (m *FakeNodeHandler) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
func (m *FakeNodeHandler) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return watch.NewFake(), nil
|
||||
}
|
||||
|
||||
// Patch patches a Node in the fake store.
|
||||
func (m *FakeNodeHandler) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*v1.Node, error) {
|
||||
func (m *FakeNodeHandler) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1.Node, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -262,10 +263,10 @@ func (f *FakeRecorder) makeEvent(ref *v1.ObjectReference, eventtype, reason, mes
|
|||
t := metav1.Time{Time: f.clock.Now()}
|
||||
namespace := ref.Namespace
|
||||
if namespace == "" {
|
||||
namespace = v1.NamespaceDefault
|
||||
namespace = metav1.NamespaceDefault
|
||||
}
|
||||
return &v1.Event{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
|
||||
Namespace: namespace,
|
||||
},
|
||||
|
@ -291,7 +292,7 @@ func NewFakeRecorder() *FakeRecorder {
|
|||
// NewNode is a helper function for creating Nodes for testing.
|
||||
func NewNode(name string) *v1.Node {
|
||||
return &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: name},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: name,
|
||||
},
|
||||
|
@ -307,7 +308,7 @@ func NewNode(name string) *v1.Node {
|
|||
// NewPod is a helper function for creating Pods for testing.
|
||||
func NewPod(name, host string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
|
@ -338,7 +339,7 @@ func contains(node *v1.Node, nodes []*v1.Node) bool {
|
|||
|
||||
// GetZones returns list of zones for all Nodes stored in FakeNodeHandler
|
||||
func GetZones(nodeHandler *FakeNodeHandler) []string {
|
||||
nodes, _ := nodeHandler.List(v1.ListOptions{})
|
||||
nodes, _ := nodeHandler.List(metav1.ListOptions{})
|
||||
zones := sets.NewString()
|
||||
for _, node := range nodes.Items {
|
||||
zones.Insert(utilnode.GetZoneKey(&node))
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/BUILD
generated
vendored
|
@ -54,7 +54,6 @@ go_test(
|
|||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/testing/core:go_default_library",
|
||||
"//pkg/controller/podautoscaler/metrics:go_default_library",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
|
@ -62,6 +61,7 @@ go_test(
|
|||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
"//vendor:k8s.io/heapster/metrics/api/v1/types",
|
||||
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
|
||||
],
|
||||
|
|
8
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
|
@ -75,11 +75,11 @@ var upscaleForbiddenWindow = 3 * time.Minute
|
|||
func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, cache.Controller) {
|
||||
return cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return controller.hpaNamespacer.HorizontalPodAutoscalers(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return controller.hpaNamespacer.HorizontalPodAutoscalers(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return controller.hpaNamespacer.HorizontalPodAutoscalers(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return controller.hpaNamespacer.HorizontalPodAutoscalers(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&autoscaling.HorizontalPodAutoscaler{},
|
||||
|
|
12
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go
generated
vendored
|
@ -30,6 +30,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -38,7 +39,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
|
||||
|
@ -153,7 +153,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
obj := &autoscaling.HorizontalPodAutoscalerList{
|
||||
Items: []autoscaling.HorizontalPodAutoscaler{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: hpaName,
|
||||
Namespace: namespace,
|
||||
SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName,
|
||||
|
@ -194,7 +194,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
defer tc.Unlock()
|
||||
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
|
@ -214,7 +214,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
defer tc.Unlock()
|
||||
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
|
@ -234,7 +234,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
defer tc.Unlock()
|
||||
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
|
@ -270,7 +270,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD
generated
vendored
|
@ -20,6 +20,7 @@ go_library(
|
|||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/heapster/metrics/api/v1/types",
|
||||
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
|
||||
|
@ -36,11 +37,12 @@ go_test(
|
|||
"//pkg/api/unversioned:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/testing/core:go_default_library",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
"//vendor:k8s.io/heapster/metrics/api/v1/types",
|
||||
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
|
||||
],
|
||||
|
|
9
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go
generated
vendored
|
@ -23,13 +23,14 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
|
||||
)
|
||||
|
||||
// PodResourceInfo contains pod resourcemetric values as a map from pod names to
|
||||
|
@ -128,7 +129,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
|
|||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
podList, err := h.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
|
||||
podList, err := h.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
|
||||
}
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client_test.go
generated
vendored
|
@ -23,13 +23,14 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
|
@ -161,7 +162,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
|
||||
func buildPod(namespace, podName string, podLabels map[string]string, phase v1.PodPhase, request string) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: podLabels,
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"math"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -48,7 +49,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
|
|||
return 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
|
||||
}
|
||||
|
||||
podList, err := c.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
|
||||
podList, err := c.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
|
||||
}
|
||||
|
@ -156,7 +157,7 @@ func (c *ReplicaCalculator) GetMetricReplicas(currentReplicas int32, targetUtili
|
|||
return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v", metricName, err)
|
||||
}
|
||||
|
||||
podList, err := c.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
|
||||
podList, err := c.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
|
||||
}
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator_test.go
generated
vendored
|
@ -27,11 +27,11 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
|
||||
|
@ -100,7 +100,7 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: testNamespace,
|
||||
Labels: map[string]string{
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/podgc/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/podgc/BUILD
generated
vendored
|
@ -19,10 +19,12 @@ go_library(
|
|||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
|
|
8
vendor/k8s.io/kubernetes/pkg/controller/podgc/gc_controller.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/controller/podgc/gc_controller.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
@ -28,6 +29,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
@ -49,7 +51,7 @@ type PodGCController struct {
|
|||
// will be null
|
||||
internalPodInformer cache.SharedIndexInformer
|
||||
|
||||
podStore cache.StoreToPodLister
|
||||
podStore listers.StoreToPodLister
|
||||
podController cache.Controller
|
||||
|
||||
deletePod func(namespace, name string) error
|
||||
|
@ -65,7 +67,7 @@ func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInfor
|
|||
terminatedPodThreshold: terminatedPodThreshold,
|
||||
deletePod: func(namespace, name string) error {
|
||||
glog.Infof("PodGC is force deleting Pod: %v:%v", namespace, name)
|
||||
return kubeClient.Core().Pods(namespace).Delete(name, v1.NewDeleteOptions(0))
|
||||
return kubeClient.Core().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0))
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -155,7 +157,7 @@ func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) {
|
|||
func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) {
|
||||
glog.V(4).Infof("GC'ing orphaned")
|
||||
// We want to get list of Nodes from the etcd, to make sure that it's as fresh as possible.
|
||||
nodes, err := gcc.kubeClient.Core().Nodes().List(v1.ListOptions{})
|
||||
nodes, err := gcc.kubeClient.Core().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
|
6
vendor/k8s.io/kubernetes/pkg/controller/podgc/gc_controller_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/podgc/gc_controller_test.go
generated
vendored
|
@ -113,7 +113,7 @@ func TestGCTerminated(t *testing.T) {
|
|||
for _, pod := range test.pods {
|
||||
creationTime = creationTime.Add(1 * time.Hour)
|
||||
gcc.podStore.Indexer.Add(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}},
|
||||
Status: v1.PodStatus{Phase: pod.phase},
|
||||
Spec: v1.PodSpec{NodeName: "node"},
|
||||
})
|
||||
|
@ -182,7 +182,7 @@ func TestGCOrphaned(t *testing.T) {
|
|||
for _, pod := range test.pods {
|
||||
creationTime = creationTime.Add(1 * time.Hour)
|
||||
gcc.podStore.Indexer.Add(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}},
|
||||
Status: v1.PodStatus{Phase: pod.phase},
|
||||
Spec: v1.PodSpec{NodeName: "node"},
|
||||
})
|
||||
|
@ -261,7 +261,7 @@ func TestGCUnscheduledTerminating(t *testing.T) {
|
|||
for _, pod := range test.pods {
|
||||
creationTime = creationTime.Add(1 * time.Hour)
|
||||
gcc.podStore.Indexer.Add(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime},
|
||||
DeletionTimestamp: pod.deletionTimeStamp},
|
||||
Status: v1.PodStatus{Phase: pod.phase},
|
||||
Spec: v1.PodSpec{NodeName: pod.nodeName},
|
||||
|
|
6
vendor/k8s.io/kubernetes/pkg/controller/replicaset/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/controller/replicaset/BUILD
generated
vendored
|
@ -24,6 +24,7 @@ go_library(
|
|||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
|
@ -53,18 +54,19 @@ go_test(
|
|||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/testing/core:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/util/uuid:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
"//vendor:k8s.io/client-go/util/testing",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go
generated
vendored
|
@ -39,6 +39,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
|
@ -75,9 +76,9 @@ type ReplicaSetController struct {
|
|||
expectations *controller.UIDTrackingControllerExpectations
|
||||
|
||||
// A store of ReplicaSets, populated by the rsController
|
||||
rsLister *cache.StoreToReplicaSetLister
|
||||
rsLister *listers.StoreToReplicaSetLister
|
||||
// A store of pods, populated by the podController
|
||||
podLister *cache.StoreToPodLister
|
||||
podLister *listers.StoreToPodLister
|
||||
// podListerSynced returns true if the pod store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
podListerSynced cache.InformerSynced
|
||||
|
|
35
vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go
generated
vendored
35
vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go
generated
vendored
|
@ -33,6 +33,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -41,20 +43,19 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
)
|
||||
|
||||
func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh chan struct{}, burstReplicas int, lookupCacheSize int) *ReplicaSetController {
|
||||
informers := informers.NewSharedInformerFactory(client, nil, controller.NoResyncPeriodFunc())
|
||||
ret := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, burstReplicas, lookupCacheSize, false)
|
||||
ret.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
ret.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
ret.podLister = &listers.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
ret.rsLister = &listers.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
informers.Start(stopCh)
|
||||
return ret
|
||||
}
|
||||
|
@ -99,17 +100,17 @@ func getKey(rs *extensions.ReplicaSet, t *testing.T) string {
|
|||
func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.ReplicaSet {
|
||||
rs := &extensions.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: &metav1.LabelSelector{MatchLabels: selectorMap},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"type": "production",
|
||||
|
@ -147,7 +148,7 @@ func newPod(name string, rs *extensions.ReplicaSet, status v1.PodPhase, lastTran
|
|||
conditions = append(conditions, condition)
|
||||
}
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: rs.Namespace,
|
||||
Labels: rs.Spec.Selector.MatchLabels,
|
||||
|
@ -463,22 +464,22 @@ func TestPodControllerLookup(t *testing.T) {
|
|||
// pods without labels don't match any ReplicaSets
|
||||
{
|
||||
inRSs: []*extensions.ReplicaSet{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "basic"}}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo1", Namespace: v1.NamespaceAll}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "basic"}}},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: metav1.NamespaceAll}},
|
||||
outRSName: "",
|
||||
},
|
||||
// Matching labels, not namespace
|
||||
{
|
||||
inRSs: []*extensions.ReplicaSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
|
||||
outRSName: "",
|
||||
},
|
||||
|
@ -486,14 +487,14 @@ func TestPodControllerLookup(t *testing.T) {
|
|||
{
|
||||
inRSs: []*extensions.ReplicaSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
|
||||
outRSName: "bar",
|
||||
},
|
||||
|
@ -827,7 +828,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
for _, key := range expectedDels.List() {
|
||||
nsName := strings.Split(key, "/")
|
||||
podsToDelete = append(podsToDelete, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nsName[1],
|
||||
Namespace: nsName[0],
|
||||
Labels: rsSpec.Spec.Selector.MatchLabels,
|
||||
|
@ -868,7 +869,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
}
|
||||
nsName := strings.Split(expectedDel.List()[0], "/")
|
||||
lastPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nsName[1],
|
||||
Namespace: nsName[0],
|
||||
Labels: rsSpec.Spec.Selector.MatchLabels,
|
||||
|
@ -1082,7 +1083,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
// An update to the pod (including an update to the deletion timestamp)
|
||||
// should not be counted as a second delete.
|
||||
secondPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: pod.Namespace,
|
||||
Name: "secondPod",
|
||||
Labels: pod.Labels,
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/controller/replication/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/replication/BUILD
generated
vendored
|
@ -21,6 +21,7 @@ go_library(
|
|||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
|
@ -50,18 +51,18 @@ go_test(
|
|||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/testing/core:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/util/uuid:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
"//vendor:k8s.io/client-go/util/testing",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go
generated
vendored
|
@ -36,6 +36,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
|
@ -82,9 +83,9 @@ type ReplicationManager struct {
|
|||
expectations *controller.UIDTrackingControllerExpectations
|
||||
|
||||
// A store of replication controllers, populated by the rcController
|
||||
rcLister cache.StoreToReplicationControllerLister
|
||||
rcLister listers.StoreToReplicationControllerLister
|
||||
// A store of pods, populated by the podController
|
||||
podLister cache.StoreToPodLister
|
||||
podLister listers.StoreToPodLister
|
||||
// Watches changes to all pods
|
||||
podController cache.Controller
|
||||
// podListerSynced returns true if the pod store has been synced at least once.
|
||||
|
|
38
vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_test.go
generated
vendored
38
vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_test.go
generated
vendored
|
@ -32,6 +32,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -39,12 +41,10 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
)
|
||||
|
||||
|
@ -62,17 +62,17 @@ func getKey(rc *v1.ReplicationController, t *testing.T) string {
|
|||
func newReplicationController(replicas int) *v1.ReplicationController {
|
||||
rc := &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"type": "production",
|
||||
|
@ -110,7 +110,7 @@ func newPod(name string, rc *v1.ReplicationController, status v1.PodPhase, lastT
|
|||
conditions = append(conditions, condition)
|
||||
}
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: rc.Spec.Selector,
|
||||
Namespace: rc.Namespace,
|
||||
|
@ -397,22 +397,22 @@ func TestPodControllerLookup(t *testing.T) {
|
|||
// pods without labels don't match any rcs
|
||||
{
|
||||
inRCs: []*v1.ReplicationController{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "basic"}}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo1", Namespace: v1.NamespaceAll}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "basic"}}},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: metav1.NamespaceAll}},
|
||||
outRCName: "",
|
||||
},
|
||||
// Matching labels, not namespace
|
||||
{
|
||||
inRCs: []*v1.ReplicationController{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
|
||||
outRCName: "",
|
||||
},
|
||||
|
@ -420,14 +420,14 @@ func TestPodControllerLookup(t *testing.T) {
|
|||
{
|
||||
inRCs: []*v1.ReplicationController{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
|
||||
outRCName: "bar",
|
||||
},
|
||||
|
@ -752,7 +752,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
for _, key := range expectedDels.List() {
|
||||
nsName := strings.Split(key, "/")
|
||||
podsToDelete = append(podsToDelete, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nsName[1],
|
||||
Namespace: nsName[0],
|
||||
Labels: controllerSpec.Spec.Selector,
|
||||
|
@ -793,7 +793,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
}
|
||||
nsName := strings.Split(expectedDel.List()[0], "/")
|
||||
lastPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nsName[1],
|
||||
Namespace: nsName[0],
|
||||
Labels: controllerSpec.Spec.Selector,
|
||||
|
@ -994,7 +994,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
// An update to the pod (including an update to the deletion timestamp)
|
||||
// should not be counted as a second delete.
|
||||
secondPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: pod.Namespace,
|
||||
Name: "secondPod",
|
||||
Labels: pod.Labels,
|
||||
|
@ -1047,7 +1047,7 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
|||
for k := 0; k < 10; k++ {
|
||||
podName := fmt.Sprintf("pod-%d-%d", j, k)
|
||||
pods = append(pods, v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{"rcName": rcName},
|
||||
|
@ -1062,7 +1062,7 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
|||
for j := 0; j < 10; j++ {
|
||||
rcName := fmt.Sprintf("rc-%d", j)
|
||||
manager.rcLister.Indexer.Add(&v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{Name: rcName, Namespace: ns},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: rcName, Namespace: ns},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{"rcName": rcName},
|
||||
},
|
||||
|
@ -1092,7 +1092,7 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
|||
for j := 0; j < replicaNum; j++ {
|
||||
podName := fmt.Sprintf("pod-%d-%d", i, j)
|
||||
pods = append(pods, v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: "foo",
|
||||
Labels: map[string]string{"rcName": rcName},
|
||||
|
@ -1104,7 +1104,7 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
|||
for i := 0; i < rcNum; i++ {
|
||||
rcName := fmt.Sprintf("rc-%d", i)
|
||||
manager.rcLister.Indexer.Add(&v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{Name: rcName, Namespace: "foo"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: rcName, Namespace: "foo"},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{"rcName": rcName},
|
||||
},
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/BUILD
generated
vendored
|
@ -29,6 +29,7 @@ go_library(
|
|||
"//pkg/util/workqueue:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/meta",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
|
@ -55,6 +56,7 @@ go_test(
|
|||
"//pkg/quota/generic:go_default_library",
|
||||
"//pkg/quota/install:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
|
|
41
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller.go
generated
vendored
41
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
@ -148,11 +149,11 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
|||
// TODO move to informer when defined
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().Services(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().Services(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().Services(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().Services(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Service{},
|
||||
|
@ -166,11 +167,11 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
|||
// TODO move to informer when defined
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().ReplicationControllers(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().ReplicationControllers(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.ReplicationController{},
|
||||
|
@ -189,11 +190,11 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
|||
// TODO (derekwaynecarr) remove me when we can require a sharedInformerFactory in all code paths...
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().PersistentVolumeClaims(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().PersistentVolumeClaims(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.PersistentVolumeClaim{},
|
||||
|
@ -206,11 +207,11 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
|||
// TODO move to informer when defined
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().Secrets(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().Secrets(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().Secrets(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().Secrets(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Secret{},
|
||||
|
@ -223,11 +224,11 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
|||
// TODO move to informer when defined
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().ConfigMaps(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().ConfigMaps(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().ConfigMaps(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().ConfigMaps(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.ConfigMap{},
|
||||
|
|
15
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller_test.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller_test.go
generated
vendored
|
@ -19,6 +19,7 @@ package resourcequota
|
|||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
@ -47,11 +48,11 @@ func TestPodReplenishmentUpdateFunc(t *testing.T) {
|
|||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
}
|
||||
oldPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
}
|
||||
newPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
Status: v1.PodStatus{Phase: v1.PodFailed},
|
||||
}
|
||||
updateFunc := PodReplenishmentUpdateFunc(&options)
|
||||
|
@ -72,7 +73,7 @@ func TestObjectReplenishmentDeleteFunc(t *testing.T) {
|
|||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
}
|
||||
oldPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
}
|
||||
deleteFunc := ObjectReplenishmentDeleteFunc(&options)
|
||||
|
@ -93,7 +94,7 @@ func TestServiceReplenishmentUpdateFunc(t *testing.T) {
|
|||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
}
|
||||
oldService := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{{
|
||||
|
@ -103,7 +104,7 @@ func TestServiceReplenishmentUpdateFunc(t *testing.T) {
|
|||
},
|
||||
}
|
||||
newService := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
Ports: []v1.ServicePort{{
|
||||
|
@ -127,7 +128,7 @@ func TestServiceReplenishmentUpdateFunc(t *testing.T) {
|
|||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
}
|
||||
oldService = &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{{
|
||||
|
@ -137,7 +138,7 @@ func TestServiceReplenishmentUpdateFunc(t *testing.T) {
|
|||
},
|
||||
}
|
||||
newService = &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{{
|
||||
|
|
11
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
@ -94,11 +95,11 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
|
|||
// build the controller that observes quota
|
||||
rq.rqIndexer, rq.rqController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return rq.kubeClient.Core().ResourceQuotas(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return rq.kubeClient.Core().ResourceQuotas(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return rq.kubeClient.Core().ResourceQuotas(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return rq.kubeClient.Core().ResourceQuotas(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.ResourceQuota{},
|
||||
|
@ -300,7 +301,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota v1.Resource
|
|||
// Create a usage object that is based on the quota resource version that will handle updates
|
||||
// by default, we preserve the past usage observation, and set hard to the current spec
|
||||
usage := api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: resourceQuota.Name,
|
||||
Namespace: resourceQuota.Namespace,
|
||||
ResourceVersion: resourceQuota.ResourceVersion,
|
||||
|
|
25
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller_test.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller_test.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
@ -54,7 +55,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
|||
podList := v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "pod-running", Namespace: "testing"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod-running", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
|
@ -62,7 +63,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "pod-running-2", Namespace: "testing"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod-running-2", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
|
@ -70,7 +71,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "pod-failed", Namespace: "testing"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod-failed", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodFailed},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
|
@ -80,7 +81,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
|||
},
|
||||
}
|
||||
resourceQuota := v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
|
@ -159,7 +160,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
|||
|
||||
func TestSyncResourceQuotaSpecChange(t *testing.T) {
|
||||
resourceQuota := v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
|
@ -245,7 +246,7 @@ func TestSyncResourceQuotaSpecChange(t *testing.T) {
|
|||
}
|
||||
func TestSyncResourceQuotaSpecHardChange(t *testing.T) {
|
||||
resourceQuota := v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
|
@ -346,7 +347,7 @@ func TestSyncResourceQuotaSpecHardChange(t *testing.T) {
|
|||
|
||||
func TestSyncResourceQuotaNoChange(t *testing.T) {
|
||||
resourceQuota := v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
|
@ -424,7 +425,7 @@ func TestAddQuota(t *testing.T) {
|
|||
name: "no status",
|
||||
expectedPriority: true,
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
|
@ -439,7 +440,7 @@ func TestAddQuota(t *testing.T) {
|
|||
name: "status, no usage",
|
||||
expectedPriority: true,
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
|
@ -459,7 +460,7 @@ func TestAddQuota(t *testing.T) {
|
|||
name: "status, mismatch",
|
||||
expectedPriority: true,
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
|
@ -482,7 +483,7 @@ func TestAddQuota(t *testing.T) {
|
|||
name: "status, missing usage, but don't care",
|
||||
expectedPriority: false,
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
|
@ -502,7 +503,7 @@ func TestAddQuota(t *testing.T) {
|
|||
name: "ready",
|
||||
expectedPriority: false,
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
|
|
2
vendor/k8s.io/kubernetes/pkg/controller/route/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/route/BUILD
generated
vendored
|
@ -19,6 +19,7 @@ go_library(
|
|||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
|
@ -44,6 +45,7 @@ go_test(
|
|||
"//pkg/client/testing/core:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/controller/route/routecontroller.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/route/routecontroller.go
generated
vendored
|
@ -32,6 +32,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
@ -55,7 +56,7 @@ type RouteController struct {
|
|||
clusterCIDR *net.IPNet
|
||||
// Node framework and store
|
||||
nodeController cache.Controller
|
||||
nodeStore cache.StoreToNodeLister
|
||||
nodeStore listers.StoreToNodeLister
|
||||
}
|
||||
|
||||
func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterName string, clusterCIDR *net.IPNet) *RouteController {
|
||||
|
@ -71,10 +72,10 @@ func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterNam
|
|||
|
||||
rc.nodeStore.Store, rc.nodeController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return rc.kubeClient.Core().Nodes().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return rc.kubeClient.Core().Nodes().Watch(options)
|
||||
},
|
||||
},
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/controller/route/routecontroller_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/route/routecontroller_test.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
|
@ -70,9 +71,9 @@ func TestIsResponsibleForRoute(t *testing.T) {
|
|||
|
||||
func TestReconcile(t *testing.T) {
|
||||
cluster := "my-k8s"
|
||||
node1 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "node-1", UID: "01"}, Spec: v1.NodeSpec{PodCIDR: "10.120.0.0/24"}}
|
||||
node2 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "node-2", UID: "02"}, Spec: v1.NodeSpec{PodCIDR: "10.120.1.0/24"}}
|
||||
nodeNoCidr := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "node-2", UID: "02"}, Spec: v1.NodeSpec{PodCIDR: ""}}
|
||||
node1 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-1", UID: "01"}, Spec: v1.NodeSpec{PodCIDR: "10.120.0.0/24"}}
|
||||
node2 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-2", UID: "02"}, Spec: v1.NodeSpec{PodCIDR: "10.120.1.0/24"}}
|
||||
nodeNoCidr := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-2", UID: "02"}, Spec: v1.NodeSpec{PodCIDR: ""}}
|
||||
|
||||
testCases := []struct {
|
||||
nodes []v1.Node
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/controller/service/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/service/BUILD
generated
vendored
|
@ -20,14 +20,16 @@ go_library(
|
|||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//pkg/util/workqueue:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
|
@ -45,6 +47,7 @@ go_test(
|
|||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
|
22
vendor/k8s.io/kubernetes/pkg/controller/service/servicecontroller.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/controller/service/servicecontroller.go
generated
vendored
|
@ -26,6 +26,8 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
pkgruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -34,10 +36,10 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
)
|
||||
|
@ -85,12 +87,12 @@ type ServiceController struct {
|
|||
zone cloudprovider.Zone
|
||||
cache *serviceCache
|
||||
// A store of services, populated by the serviceController
|
||||
serviceStore cache.StoreToServiceLister
|
||||
serviceStore listers.StoreToServiceLister
|
||||
// Watches changes to all services
|
||||
serviceController cache.Controller
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
nodeLister cache.StoreToNodeLister
|
||||
nodeLister listers.StoreToNodeLister
|
||||
// services that need to be synced
|
||||
workingQueue workqueue.DelayingInterface
|
||||
}
|
||||
|
@ -114,18 +116,18 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
|
|||
cache: &serviceCache{serviceMap: make(map[string]*cachedService)},
|
||||
eventBroadcaster: broadcaster,
|
||||
eventRecorder: recorder,
|
||||
nodeLister: cache.StoreToNodeLister{
|
||||
nodeLister: listers.StoreToNodeLister{
|
||||
Store: cache.NewStore(cache.MetaNamespaceKeyFunc),
|
||||
},
|
||||
workingQueue: workqueue.NewDelayingQueue(),
|
||||
}
|
||||
s.serviceStore.Indexer, s.serviceController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
|
||||
return s.kubeClient.Core().Services(v1.NamespaceAll).List(options)
|
||||
ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) {
|
||||
return s.kubeClient.Core().Services(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return s.kubeClient.Core().Services(v1.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return s.kubeClient.Core().Services(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Service{},
|
||||
|
@ -175,7 +177,7 @@ func (s *ServiceController) Run(workers int) {
|
|||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(s.worker, time.Second, wait.NeverStop)
|
||||
}
|
||||
nodeLW := cache.NewListWatchFromClient(s.kubeClient.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
|
||||
nodeLW := cache.NewListWatchFromClient(s.kubeClient.Core().RESTClient(), "nodes", metav1.NamespaceAll, fields.Everything())
|
||||
cache.NewReflector(nodeLW, &v1.Node{}, s.nodeLister.Store, 0).Run()
|
||||
go wait.Until(s.nodeSyncLoop, nodeSyncPeriod, wait.NeverStop)
|
||||
}
|
||||
|
@ -600,7 +602,7 @@ func includeNodeFromNodeList(node *v1.Node) bool {
|
|||
return !node.Spec.Unschedulable
|
||||
}
|
||||
|
||||
func getNodeConditionPredicate() cache.NodeConditionPredicate {
|
||||
func getNodeConditionPredicate() listers.NodeConditionPredicate {
|
||||
return func(node *v1.Node) bool {
|
||||
// We add the master to the node list, but its unschedulable. So we use this to filter
|
||||
// the master.
|
||||
|
|
15
vendor/k8s.io/kubernetes/pkg/controller/service/servicecontroller_test.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/controller/service/servicecontroller_test.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -30,7 +31,7 @@ import (
|
|||
const region = "us-central"
|
||||
|
||||
func newService(name string, uid types.UID, serviceType v1.ServiceType) *v1.Service {
|
||||
return &v1.Service{ObjectMeta: v1.ObjectMeta{Name: name, Namespace: "namespace", UID: uid, SelfLink: testapi.Default.SelfLink("services", name)}, Spec: v1.ServiceSpec{Type: serviceType}}
|
||||
return &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "namespace", UID: uid, SelfLink: testapi.Default.SelfLink("services", name)}, Spec: v1.ServiceSpec{Type: serviceType}}
|
||||
}
|
||||
|
||||
func TestCreateExternalLoadBalancer(t *testing.T) {
|
||||
|
@ -41,7 +42,7 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
service: &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "no-external-balancer",
|
||||
Namespace: "default",
|
||||
},
|
||||
|
@ -54,7 +55,7 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
|
|||
},
|
||||
{
|
||||
service: &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "udp-service",
|
||||
Namespace: "default",
|
||||
SelfLink: testapi.Default.SelfLink("services", "udp-service"),
|
||||
|
@ -72,7 +73,7 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
|
|||
},
|
||||
{
|
||||
service: &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "basic-service1",
|
||||
Namespace: "default",
|
||||
SelfLink: testapi.Default.SelfLink("services", "basic-service1"),
|
||||
|
@ -146,9 +147,9 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
|
|||
// TODO: Finish converting and update comments
|
||||
func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
|
||||
nodes := []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "node0"}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "node1"}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "node73"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node0"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node73"}},
|
||||
}
|
||||
table := []struct {
|
||||
services []*v1.Service
|
||||
|
|
9
vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/BUILD
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/BUILD
generated
vendored
|
@ -22,14 +22,14 @@ go_library(
|
|||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/retry:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/genericapiserver/registry/generic:go_default_library",
|
||||
"//pkg/registry/core/secret:go_default_library",
|
||||
"//pkg/registry/core/secret/storage:go_default_library",
|
||||
"//pkg/registry/core/serviceaccount:go_default_library",
|
||||
"//pkg/registry/core/serviceaccount/storage:go_default_library",
|
||||
"//pkg/registry/generic:go_default_library",
|
||||
"//pkg/serviceaccount:go_default_library",
|
||||
"//pkg/storage/storagebackend:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
|
@ -38,6 +38,7 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/meta",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
|
@ -45,7 +46,7 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/apiserver/pkg/request",
|
||||
"//vendor:k8s.io/apiserver/pkg/endpoints/request",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -62,12 +63,14 @@ go_test(
|
|||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/client/testing/core:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//vendor:github.com/davecgh/go-spew/spew",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/rand",
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue