Switch to github.com/golang/dep for vendoring

Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
Mrunal Patel 2017-01-31 16:45:59 -08:00
parent d6ab91be27
commit 8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions

View file

@ -0,0 +1,74 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"namespace_controller.go",
"namespace_controller_utils.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/typed/discovery:go_default_library",
"//pkg/client/typed/dynamic:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:k8s.io/apimachinery/pkg/api/errors",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1/unstructured",
"//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
"//vendor:k8s.io/apimachinery/pkg/util/wait",
"//vendor:k8s.io/apimachinery/pkg/watch",
],
)
go_test(
name = "go_default_test",
srcs = ["namespace_controller_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/client/typed/discovery:go_default_library",
"//pkg/client/typed/dynamic:go_default_library",
"//vendor:k8s.io/apimachinery/pkg/api/errors",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

3
vendor/k8s.io/kubernetes/pkg/controller/namespace/OWNERS generated vendored Executable file
View file

@ -0,0 +1,3 @@
reviewers:
- derekwaynecarr
- smarterclayton

View file

@ -0,0 +1,18 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// namespace contains a controller that handles namespace lifecycle
package namespace // import "k8s.io/kubernetes/pkg/controller/namespace"

View file

@ -0,0 +1,240 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package namespace
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/metrics"
"k8s.io/kubernetes/pkg/util/workqueue"
"github.com/golang/glog"
)
const (
// namespaceDeletionGracePeriod is the time period to wait before processing a received namespace event.
// This allows time for the following to occur:
// * lifecycle admission plugins on HA apiservers to also observe a namespace
// deletion and prevent new objects from being created in the terminating namespace
// * non-leader etcd servers to observe last-minute object creations in a namespace
// so this controller's cleanup can actually clean up all objects
namespaceDeletionGracePeriod = 5 * time.Second
)
// NamespaceController is responsible for performing actions dependent upon a namespace phase
type NamespaceController struct {
// client that purges namespace content, must have list/delete privileges on all content
kubeClient clientset.Interface
// clientPool manages a pool of dynamic clients
clientPool dynamic.ClientPool
// store that holds the namespaces
store cache.Store
// controller that observes the namespaces
controller cache.Controller
// namespaces that have been queued up for processing by workers
queue workqueue.RateLimitingInterface
// function to list of preferred resources for namespace deletion
discoverResourcesFn func() ([]*metav1.APIResourceList, error)
// opCache is a cache to remember if a particular operation is not supported to aid dynamic client.
opCache *operationNotSupportedCache
// finalizerToken is the finalizer token managed by this controller
finalizerToken v1.FinalizerName
}
// NewNamespaceController creates a new NamespaceController
func NewNamespaceController(
kubeClient clientset.Interface,
clientPool dynamic.ClientPool,
discoverResourcesFn func() ([]*metav1.APIResourceList, error),
resyncPeriod time.Duration,
finalizerToken v1.FinalizerName) *NamespaceController {
opCache := &operationNotSupportedCache{
m: make(map[operationKey]bool),
}
// pre-fill opCache with the discovery info
//
// TODO(sttts): get rid of opCache and http 405 logic around it and trust discovery info
resources, err := discoverResourcesFn()
if err != nil {
glog.Fatalf("Failed to get supported resources: %v", err)
}
deletableGroupVersionResources := []schema.GroupVersionResource{}
for _, rl := range resources {
gv, err := schema.ParseGroupVersion(rl.GroupVersion)
if err != nil {
glog.Errorf("Failed to parse GroupVersion %q, skipping: %v", rl.GroupVersion, err)
continue
}
for _, r := range rl.APIResources {
gvr := schema.GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: r.Name}
verbs := sets.NewString([]string(r.Verbs)...)
if !verbs.Has("delete") {
glog.V(6).Infof("Skipping resource %v because it cannot be deleted.", gvr)
}
for _, op := range []operation{operationList, operationDeleteCollection} {
if !verbs.Has(string(op)) {
opCache.setNotSupported(operationKey{op: op, gvr: gvr})
}
}
deletableGroupVersionResources = append(deletableGroupVersionResources, gvr)
}
}
// create the controller so we can inject the enqueue function
namespaceController := &NamespaceController{
kubeClient: kubeClient,
clientPool: clientPool,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
discoverResourcesFn: discoverResourcesFn,
opCache: opCache,
finalizerToken: finalizerToken,
}
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("namespace_controller", kubeClient.Core().RESTClient().GetRateLimiter())
}
// configure the backing store/controller
store, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return kubeClient.Core().Namespaces().List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return kubeClient.Core().Namespaces().Watch(options)
},
},
&v1.Namespace{},
resyncPeriod,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
namespace := obj.(*v1.Namespace)
namespaceController.enqueueNamespace(namespace)
},
UpdateFunc: func(oldObj, newObj interface{}) {
namespace := newObj.(*v1.Namespace)
namespaceController.enqueueNamespace(namespace)
},
},
)
namespaceController.store = store
namespaceController.controller = controller
return namespaceController
}
// enqueueNamespace adds an object to the controller work queue
// obj could be an *v1.Namespace, or a DeletionFinalStateUnknown item.
func (nm *NamespaceController) enqueueNamespace(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
return
}
// delay processing namespace events to allow HA api servers to observe namespace deletion,
// and HA etcd servers to observe last minute object creations inside the namespace
nm.queue.AddAfter(key, namespaceDeletionGracePeriod)
}
// worker processes the queue of namespace objects.
// Each namespace can be in the queue at most once.
// The system ensures that no two workers can process
// the same namespace at the same time.
func (nm *NamespaceController) worker() {
workFunc := func() bool {
key, quit := nm.queue.Get()
if quit {
return true
}
defer nm.queue.Done(key)
err := nm.syncNamespaceFromKey(key.(string))
if err == nil {
// no error, forget this entry and return
nm.queue.Forget(key)
return false
}
if estimate, ok := err.(*contentRemainingError); ok {
t := estimate.Estimate/2 + 1
glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", key, t)
nm.queue.AddAfter(key, time.Duration(t)*time.Second)
} else {
// rather than wait for a full resync, re-add the namespace to the queue to be processed
nm.queue.AddRateLimited(key)
utilruntime.HandleError(err)
}
return false
}
for {
quit := workFunc()
if quit {
return
}
}
}
// syncNamespaceFromKey looks for a namespace with the specified key in its store and synchronizes it
func (nm *NamespaceController) syncNamespaceFromKey(key string) (err error) {
startTime := time.Now()
defer glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime))
obj, exists, err := nm.store.GetByKey(key)
if !exists {
glog.Infof("Namespace has been deleted %v", key)
return nil
}
if err != nil {
glog.Errorf("Unable to retrieve namespace %v from store: %v", key, err)
nm.queue.Add(key)
return err
}
namespace := obj.(*v1.Namespace)
return syncNamespace(nm.kubeClient, nm.clientPool, nm.opCache, nm.discoverResourcesFn, namespace, nm.finalizerToken)
}
// Run starts observing the system with the specified number of workers.
func (nm *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
go nm.controller.Run(stopCh)
for i := 0; i < workers; i++ {
go wait.Until(nm.worker, time.Second, stopCh)
}
<-stopCh
glog.Infof("Shutting down NamespaceController")
nm.queue.ShutDown()
}

View file

@ -0,0 +1,332 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package namespace
import (
"fmt"
"net/http"
"net/http/httptest"
"path"
"strings"
"sync"
"testing"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/client/typed/discovery"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
)
func TestFinalized(t *testing.T) {
testNamespace := &v1.Namespace{
Spec: v1.NamespaceSpec{
Finalizers: []v1.FinalizerName{"a", "b"},
},
}
if finalized(testNamespace) {
t.Errorf("Unexpected result, namespace is not finalized")
}
testNamespace.Spec.Finalizers = []v1.FinalizerName{}
if !finalized(testNamespace) {
t.Errorf("Expected object to be finalized")
}
}
func TestFinalizeNamespaceFunc(t *testing.T) {
mockClient := &fake.Clientset{}
testNamespace := &v1.Namespace{
ObjectMeta: v1.ObjectMeta{
Name: "test",
ResourceVersion: "1",
},
Spec: v1.NamespaceSpec{
Finalizers: []v1.FinalizerName{"kubernetes", "other"},
},
}
finalizeNamespace(mockClient, testNamespace, v1.FinalizerKubernetes)
actions := mockClient.Actions()
if len(actions) != 1 {
t.Errorf("Expected 1 mock client action, but got %v", len(actions))
}
if !actions[0].Matches("create", "namespaces") || actions[0].GetSubresource() != "finalize" {
t.Errorf("Expected finalize-namespace action %v", actions[0])
}
finalizers := actions[0].(core.CreateAction).GetObject().(*v1.Namespace).Spec.Finalizers
if len(finalizers) != 1 {
t.Errorf("There should be a single finalizer remaining")
}
if "other" != string(finalizers[0]) {
t.Errorf("Unexpected finalizer value, %v", finalizers[0])
}
}
func testSyncNamespaceThatIsTerminating(t *testing.T, versions *metav1.APIVersions) {
now := metav1.Now()
namespaceName := "test"
testNamespacePendingFinalize := &v1.Namespace{
ObjectMeta: v1.ObjectMeta{
Name: namespaceName,
ResourceVersion: "1",
DeletionTimestamp: &now,
},
Spec: v1.NamespaceSpec{
Finalizers: []v1.FinalizerName{"kubernetes"},
},
Status: v1.NamespaceStatus{
Phase: v1.NamespaceTerminating,
},
}
testNamespaceFinalizeComplete := &v1.Namespace{
ObjectMeta: v1.ObjectMeta{
Name: namespaceName,
ResourceVersion: "1",
DeletionTimestamp: &now,
},
Spec: v1.NamespaceSpec{},
Status: v1.NamespaceStatus{
Phase: v1.NamespaceTerminating,
},
}
// when doing a delete all of content, we will do a GET of a collection, and DELETE of a collection by default
dynamicClientActionSet := sets.NewString()
resources := testResources()
groupVersionResources, _ := discovery.GroupVersionResources(resources)
for groupVersionResource := range groupVersionResources {
urlPath := path.Join([]string{
dynamic.LegacyAPIPathResolverFunc(schema.GroupVersionKind{Group: groupVersionResource.Group, Version: groupVersionResource.Version}),
groupVersionResource.Group,
groupVersionResource.Version,
"namespaces",
namespaceName,
groupVersionResource.Resource,
}...)
dynamicClientActionSet.Insert((&fakeAction{method: "GET", path: urlPath}).String())
dynamicClientActionSet.Insert((&fakeAction{method: "DELETE", path: urlPath}).String())
}
scenarios := map[string]struct {
testNamespace *v1.Namespace
kubeClientActionSet sets.String
dynamicClientActionSet sets.String
gvrError error
}{
"pending-finalize": {
testNamespace: testNamespacePendingFinalize,
kubeClientActionSet: sets.NewString(
strings.Join([]string{"get", "namespaces", ""}, "-"),
strings.Join([]string{"create", "namespaces", "finalize"}, "-"),
strings.Join([]string{"list", "pods", ""}, "-"),
strings.Join([]string{"delete", "namespaces", ""}, "-"),
),
dynamicClientActionSet: dynamicClientActionSet,
},
"complete-finalize": {
testNamespace: testNamespaceFinalizeComplete,
kubeClientActionSet: sets.NewString(
strings.Join([]string{"get", "namespaces", ""}, "-"),
strings.Join([]string{"delete", "namespaces", ""}, "-"),
),
dynamicClientActionSet: sets.NewString(),
},
"groupVersionResourceErr": {
testNamespace: testNamespaceFinalizeComplete,
kubeClientActionSet: sets.NewString(
strings.Join([]string{"get", "namespaces", ""}, "-"),
strings.Join([]string{"delete", "namespaces", ""}, "-"),
),
dynamicClientActionSet: sets.NewString(),
gvrError: fmt.Errorf("test error"),
},
}
for scenario, testInput := range scenarios {
testHandler := &fakeActionHandler{statusCode: 200}
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
defer srv.Close()
mockClient := fake.NewSimpleClientset(testInput.testNamespace)
clientPool := dynamic.NewClientPool(clientConfig, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
fn := func() ([]*metav1.APIResourceList, error) {
return resources, nil
}
err := syncNamespace(mockClient, clientPool, &operationNotSupportedCache{m: make(map[operationKey]bool)}, fn, testInput.testNamespace, v1.FinalizerKubernetes)
if err != nil {
t.Errorf("scenario %s - Unexpected error when synching namespace %v", scenario, err)
}
// validate traffic from kube client
actionSet := sets.NewString()
for _, action := range mockClient.Actions() {
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
}
if !actionSet.Equal(testInput.kubeClientActionSet) {
t.Errorf("scenario %s - mock client expected actions:\n%v\n but got:\n%v\nDifference:\n%v", scenario,
testInput.kubeClientActionSet, actionSet, testInput.kubeClientActionSet.Difference(actionSet))
}
// validate traffic from dynamic client
actionSet = sets.NewString()
for _, action := range testHandler.actions {
actionSet.Insert(action.String())
}
if !actionSet.Equal(testInput.dynamicClientActionSet) {
t.Errorf("scenario %s - dynamic client expected actions:\n%v\n but got:\n%v\nDifference:\n%v", scenario,
testInput.dynamicClientActionSet, actionSet, testInput.dynamicClientActionSet.Difference(actionSet))
}
}
}
func TestRetryOnConflictError(t *testing.T) {
mockClient := &fake.Clientset{}
numTries := 0
retryOnce := func(kubeClient clientset.Interface, namespace *v1.Namespace) (*v1.Namespace, error) {
numTries++
if numTries <= 1 {
return namespace, errors.NewConflict(api.Resource("namespaces"), namespace.Name, fmt.Errorf("ERROR!"))
}
return namespace, nil
}
namespace := &v1.Namespace{}
_, err := retryOnConflictError(mockClient, namespace, retryOnce)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
if numTries != 2 {
t.Errorf("Expected %v, but got %v", 2, numTries)
}
}
func TestSyncNamespaceThatIsTerminatingNonExperimental(t *testing.T) {
testSyncNamespaceThatIsTerminating(t, &metav1.APIVersions{})
}
func TestSyncNamespaceThatIsTerminatingV1Beta1(t *testing.T) {
testSyncNamespaceThatIsTerminating(t, &metav1.APIVersions{Versions: []string{"extensions/v1beta1"}})
}
func TestSyncNamespaceThatIsActive(t *testing.T) {
mockClient := &fake.Clientset{}
testNamespace := &v1.Namespace{
ObjectMeta: v1.ObjectMeta{
Name: "test",
ResourceVersion: "1",
},
Spec: v1.NamespaceSpec{
Finalizers: []v1.FinalizerName{"kubernetes"},
},
Status: v1.NamespaceStatus{
Phase: v1.NamespaceActive,
},
}
fn := func() ([]*metav1.APIResourceList, error) {
return testResources(), nil
}
err := syncNamespace(mockClient, nil, &operationNotSupportedCache{m: make(map[operationKey]bool)}, fn, testNamespace, v1.FinalizerKubernetes)
if err != nil {
t.Errorf("Unexpected error when synching namespace %v", err)
}
if len(mockClient.Actions()) != 0 {
t.Errorf("Expected no action from controller, but got: %v", mockClient.Actions())
}
}
// testServerAndClientConfig returns a server that listens and a config that can reference it
func testServerAndClientConfig(handler func(http.ResponseWriter, *http.Request)) (*httptest.Server, *restclient.Config) {
srv := httptest.NewServer(http.HandlerFunc(handler))
config := &restclient.Config{
Host: srv.URL,
}
return srv, config
}
// fakeAction records information about requests to aid in testing.
type fakeAction struct {
method string
path string
}
// String returns method=path to aid in testing
func (f *fakeAction) String() string {
return strings.Join([]string{f.method, f.path}, "=")
}
// fakeActionHandler holds a list of fakeActions received
type fakeActionHandler struct {
// statusCode returned by this handler
statusCode int
lock sync.Mutex
actions []fakeAction
}
// ServeHTTP logs the action that occurred and always returns the associated status code
func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
f.lock.Lock()
defer f.lock.Unlock()
f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path})
response.Header().Set("Content-Type", runtime.ContentTypeJSON)
response.WriteHeader(f.statusCode)
response.Write([]byte("{\"kind\": \"List\",\"items\":null}"))
}
// testResources returns a mocked up set of resources across different api groups for testing namespace controller.
func testResources() []*metav1.APIResourceList {
results := []*metav1.APIResourceList{
{
GroupVersion: "v1",
APIResources: []metav1.APIResource{
{
Name: "pods",
Namespaced: true,
Kind: "Pod",
Verbs: []string{"get", "list", "delete", "deletecollection", "create", "update"},
},
{
Name: "services",
Namespaced: true,
Kind: "Service",
Verbs: []string{"get", "list", "delete", "deletecollection", "create", "update"},
},
},
},
{
GroupVersion: "extensions/v1beta1",
APIResources: []metav1.APIResource{
{
Name: "deployments",
Namespaced: true,
Kind: "Deployment",
Verbs: []string{"get", "list", "delete", "deletecollection", "create", "update"},
},
},
},
}
return results
}

View file

@ -0,0 +1,507 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package namespace
import (
"fmt"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/typed/discovery"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
)
// contentRemainingError is used to inform the caller that content is not fully removed from the namespace
type contentRemainingError struct {
Estimate int64
}
func (e *contentRemainingError) Error() string {
return fmt.Sprintf("some content remains in the namespace, estimate %d seconds before it is removed", e.Estimate)
}
// operation is used for caching if an operation is supported on a dynamic client.
type operation string
const (
operationDeleteCollection operation = "deleteCollection"
operationList operation = "list"
// assume a default estimate for finalizers to complete when found on items pending deletion.
finalizerEstimateSeconds int64 = int64(15)
)
// operationKey is an entry in a cache.
type operationKey struct {
op operation
gvr schema.GroupVersionResource
}
// operationNotSupportedCache is a simple cache to remember if an operation is not supported for a resource.
// if the operationKey maps to true, it means the operation is not supported.
type operationNotSupportedCache struct {
lock sync.RWMutex
m map[operationKey]bool
}
// isSupported returns true if the operation is supported
func (o *operationNotSupportedCache) isSupported(key operationKey) bool {
o.lock.RLock()
defer o.lock.RUnlock()
return !o.m[key]
}
func (o *operationNotSupportedCache) setNotSupported(key operationKey) {
o.lock.Lock()
defer o.lock.Unlock()
o.m[key] = true
}
// updateNamespaceFunc is a function that makes an update to a namespace
type updateNamespaceFunc func(kubeClient clientset.Interface, namespace *v1.Namespace) (*v1.Namespace, error)
// retryOnConflictError retries the specified fn if there was a conflict error
// it will return an error if the UID for an object changes across retry operations.
// TODO RetryOnConflict should be a generic concept in client code
func retryOnConflictError(kubeClient clientset.Interface, namespace *v1.Namespace, fn updateNamespaceFunc) (result *v1.Namespace, err error) {
latestNamespace := namespace
for {
result, err = fn(kubeClient, latestNamespace)
if err == nil {
return result, nil
}
if !errors.IsConflict(err) {
return nil, err
}
prevNamespace := latestNamespace
latestNamespace, err = kubeClient.Core().Namespaces().Get(latestNamespace.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if prevNamespace.UID != latestNamespace.UID {
return nil, fmt.Errorf("namespace uid has changed across retries")
}
}
}
// updateNamespaceStatusFunc will verify that the status of the namespace is correct
func updateNamespaceStatusFunc(kubeClient clientset.Interface, namespace *v1.Namespace) (*v1.Namespace, error) {
if namespace.DeletionTimestamp.IsZero() || namespace.Status.Phase == v1.NamespaceTerminating {
return namespace, nil
}
newNamespace := v1.Namespace{}
newNamespace.ObjectMeta = namespace.ObjectMeta
newNamespace.Status = namespace.Status
newNamespace.Status.Phase = v1.NamespaceTerminating
return kubeClient.Core().Namespaces().UpdateStatus(&newNamespace)
}
// finalized returns true if the namespace.Spec.Finalizers is an empty list
func finalized(namespace *v1.Namespace) bool {
return len(namespace.Spec.Finalizers) == 0
}
// finalizeNamespaceFunc returns a function that knows how to finalize a namespace for specified token.
func finalizeNamespaceFunc(finalizerToken v1.FinalizerName) updateNamespaceFunc {
return func(kubeClient clientset.Interface, namespace *v1.Namespace) (*v1.Namespace, error) {
return finalizeNamespace(kubeClient, namespace, finalizerToken)
}
}
// finalizeNamespace removes the specified finalizerToken and finalizes the namespace
func finalizeNamespace(kubeClient clientset.Interface, namespace *v1.Namespace, finalizerToken v1.FinalizerName) (*v1.Namespace, error) {
namespaceFinalize := v1.Namespace{}
namespaceFinalize.ObjectMeta = namespace.ObjectMeta
namespaceFinalize.Spec = namespace.Spec
finalizerSet := sets.NewString()
for i := range namespace.Spec.Finalizers {
if namespace.Spec.Finalizers[i] != finalizerToken {
finalizerSet.Insert(string(namespace.Spec.Finalizers[i]))
}
}
namespaceFinalize.Spec.Finalizers = make([]v1.FinalizerName, 0, len(finalizerSet))
for _, value := range finalizerSet.List() {
namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, v1.FinalizerName(value))
}
namespace, err := kubeClient.Core().Namespaces().Finalize(&namespaceFinalize)
if err != nil {
// it was removed already, so life is good
if errors.IsNotFound(err) {
return namespace, nil
}
}
return namespace, err
}
// deleteCollection is a helper function that will delete the collection of resources
// it returns true if the operation was supported on the server.
// it returns an error if the operation was supported on the server but was unable to complete.
func deleteCollection(
dynamicClient *dynamic.Client,
opCache *operationNotSupportedCache,
gvr schema.GroupVersionResource,
namespace string,
) (bool, error) {
glog.V(5).Infof("namespace controller - deleteCollection - namespace: %s, gvr: %v", namespace, gvr)
key := operationKey{op: operationDeleteCollection, gvr: gvr}
if !opCache.isSupported(key) {
glog.V(5).Infof("namespace controller - deleteCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr)
return false, nil
}
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
// namespace controller does not want the garbage collector to insert the orphan finalizer since it calls
// resource deletions generically. it will ensure all resources in the namespace are purged prior to releasing
// namespace itself.
orphanDependents := false
err := dynamicClient.Resource(&apiResource, namespace).DeleteCollection(&v1.DeleteOptions{OrphanDependents: &orphanDependents}, &v1.ListOptions{})
if err == nil {
return true, nil
}
// this is strange, but we need to special case for both MethodNotSupported and NotFound errors
// TODO: https://github.com/kubernetes/kubernetes/issues/22413
// we have a resource returned in the discovery API that supports no top-level verbs:
// /apis/extensions/v1beta1/namespaces/default/replicationcontrollers
// when working with this resource type, we will get a literal not found error rather than expected method not supported
// remember next time that this resource does not support delete collection...
if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) {
glog.V(5).Infof("namespace controller - deleteCollection not supported - namespace: %s, gvr: %v", namespace, gvr)
opCache.setNotSupported(key)
return false, nil
}
glog.V(5).Infof("namespace controller - deleteCollection unexpected error - namespace: %s, gvr: %v, error: %v", namespace, gvr, err)
return true, err
}
// listCollection will list the items in the specified namespace
// it returns the following:
// the list of items in the collection (if found)
// a boolean if the operation is supported
// an error if the operation is supported but could not be completed.
func listCollection(
dynamicClient *dynamic.Client,
opCache *operationNotSupportedCache,
gvr schema.GroupVersionResource,
namespace string,
) (*unstructured.UnstructuredList, bool, error) {
glog.V(5).Infof("namespace controller - listCollection - namespace: %s, gvr: %v", namespace, gvr)
key := operationKey{op: operationList, gvr: gvr}
if !opCache.isSupported(key) {
glog.V(5).Infof("namespace controller - listCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr)
return nil, false, nil
}
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
obj, err := dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{})
if err == nil {
unstructuredList, ok := obj.(*unstructured.UnstructuredList)
if !ok {
return nil, false, fmt.Errorf("resource: %s, expected *unstructured.UnstructuredList, got %#v", apiResource.Name, obj)
}
return unstructuredList, true, nil
}
// this is strange, but we need to special case for both MethodNotSupported and NotFound errors
// TODO: https://github.com/kubernetes/kubernetes/issues/22413
// we have a resource returned in the discovery API that supports no top-level verbs:
// /apis/extensions/v1beta1/namespaces/default/replicationcontrollers
// when working with this resource type, we will get a literal not found error rather than expected method not supported
// remember next time that this resource does not support delete collection...
if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) {
glog.V(5).Infof("namespace controller - listCollection not supported - namespace: %s, gvr: %v", namespace, gvr)
opCache.setNotSupported(key)
return nil, false, nil
}
return nil, true, err
}
// deleteEachItem is a helper function that will list the collection of resources and delete each item 1 by 1.
func deleteEachItem(
dynamicClient *dynamic.Client,
opCache *operationNotSupportedCache,
gvr schema.GroupVersionResource,
namespace string,
) error {
glog.V(5).Infof("namespace controller - deleteEachItem - namespace: %s, gvr: %v", namespace, gvr)
unstructuredList, listSupported, err := listCollection(dynamicClient, opCache, gvr, namespace)
if err != nil {
return err
}
if !listSupported {
return nil
}
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
for _, item := range unstructuredList.Items {
if err = dynamicClient.Resource(&apiResource, namespace).Delete(item.GetName(), nil); err != nil && !errors.IsNotFound(err) && !errors.IsMethodNotSupported(err) {
return err
}
}
return nil
}
// deleteAllContentForGroupVersionResource will use the dynamic client to delete each resource identified in gvr.
// It returns an estimate of the time remaining before the remaining resources are deleted.
// If estimate > 0, not all resources are guaranteed to be gone.
func deleteAllContentForGroupVersionResource(
kubeClient clientset.Interface,
clientPool dynamic.ClientPool,
opCache *operationNotSupportedCache,
gvr schema.GroupVersionResource,
namespace string,
namespaceDeletedAt metav1.Time,
) (int64, error) {
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - namespace: %s, gvr: %v", namespace, gvr)
// estimate how long it will take for the resource to be deleted (needed for objects that support graceful delete)
estimate, err := estimateGracefulTermination(kubeClient, gvr, namespace, namespaceDeletedAt)
if err != nil {
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - unable to estimate - namespace: %s, gvr: %v, err: %v", namespace, gvr, err)
return estimate, err
}
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - estimate - namespace: %s, gvr: %v, estimate: %v", namespace, gvr, estimate)
// get a client for this group version...
dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr)
if err != nil {
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - unable to get client - namespace: %s, gvr: %v, err: %v", namespace, gvr, err)
return estimate, err
}
// first try to delete the entire collection
deleteCollectionSupported, err := deleteCollection(dynamicClient, opCache, gvr, namespace)
if err != nil {
return estimate, err
}
// delete collection was not supported, so we list and delete each item...
if !deleteCollectionSupported {
err = deleteEachItem(dynamicClient, opCache, gvr, namespace)
if err != nil {
return estimate, err
}
}
// verify there are no more remaining items
// it is not an error condition for there to be remaining items if local estimate is non-zero
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - checking for no more items in namespace: %s, gvr: %v", namespace, gvr)
unstructuredList, listSupported, err := listCollection(dynamicClient, opCache, gvr, namespace)
if err != nil {
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - error verifying no items in namespace: %s, gvr: %v, err: %v", namespace, gvr, err)
return estimate, err
}
if !listSupported {
return estimate, nil
}
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining - namespace: %s, gvr: %v, items: %v", namespace, gvr, len(unstructuredList.Items))
if len(unstructuredList.Items) != 0 && estimate == int64(0) {
// if any item has a finalizer, we treat that as a normal condition, and use a default estimation to allow for GC to complete.
for _, item := range unstructuredList.Items {
if len(item.GetFinalizers()) > 0 {
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining with finalizers - namespace: %s, gvr: %v, finalizers: %v", namespace, gvr, item.GetFinalizers())
return finalizerEstimateSeconds, nil
}
}
// nothing reported a finalizer, so something was unexpected as it should have been deleted.
return estimate, fmt.Errorf("unexpected items still remain in namespace: %s for gvr: %v", namespace, gvr)
}
return estimate, nil
}
// deleteAllContent will use the dynamic client to delete each resource identified in groupVersionResources.
// It returns an estimate of the time remaining before the remaining resources are deleted.
// If estimate > 0, not all resources are guaranteed to be gone.
func deleteAllContent(
kubeClient clientset.Interface,
clientPool dynamic.ClientPool,
opCache *operationNotSupportedCache,
groupVersionResources map[schema.GroupVersionResource]struct{},
namespace string,
namespaceDeletedAt metav1.Time,
) (int64, error) {
estimate := int64(0)
glog.V(4).Infof("namespace controller - deleteAllContent - namespace: %s, gvrs: %v", namespace, groupVersionResources)
for gvr := range groupVersionResources {
gvrEstimate, err := deleteAllContentForGroupVersionResource(kubeClient, clientPool, opCache, gvr, namespace, namespaceDeletedAt)
if err != nil {
return estimate, err
}
if gvrEstimate > estimate {
estimate = gvrEstimate
}
}
glog.V(4).Infof("namespace controller - deleteAllContent - namespace: %s, estimate: %v", namespace, estimate)
return estimate, nil
}
// syncNamespace orchestrates deletion of a Namespace and its associated content.
func syncNamespace(
kubeClient clientset.Interface,
clientPool dynamic.ClientPool,
opCache *operationNotSupportedCache,
discoverResourcesFn func() ([]*metav1.APIResourceList, error),
namespace *v1.Namespace,
finalizerToken v1.FinalizerName,
) error {
if namespace.DeletionTimestamp == nil {
return nil
}
// multiple controllers may edit a namespace during termination
// first get the latest state of the namespace before proceeding
// if the namespace was deleted already, don't do anything
namespace, err := kubeClient.Core().Namespaces().Get(namespace.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
glog.V(5).Infof("namespace controller - syncNamespace - namespace: %s, finalizerToken: %s", namespace.Name, finalizerToken)
// ensure that the status is up to date on the namespace
// if we get a not found error, we assume the namespace is truly gone
namespace, err = retryOnConflictError(kubeClient, namespace, updateNamespaceStatusFunc)
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
// the latest view of the namespace asserts that namespace is no longer deleting..
if namespace.DeletionTimestamp.IsZero() {
return nil
}
// if the namespace is already finalized, delete it
if finalized(namespace) {
var opts *v1.DeleteOptions
uid := namespace.UID
if len(uid) > 0 {
opts = &v1.DeleteOptions{Preconditions: &v1.Preconditions{UID: &uid}}
}
err = kubeClient.Core().Namespaces().Delete(namespace.Name, opts)
if err != nil && !errors.IsNotFound(err) {
return err
}
return nil
}
// there may still be content for us to remove
resources, err := discoverResourcesFn()
if err != nil {
return err
}
// TODO(sttts): get rid of opCache and pass the verbs (especially "deletecollection") down into the deleter
deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete"}}, resources)
groupVersionResources, err := discovery.GroupVersionResources(deletableResources)
if err != nil {
return err
}
estimate, err := deleteAllContent(kubeClient, clientPool, opCache, groupVersionResources, namespace.Name, *namespace.DeletionTimestamp)
if err != nil {
return err
}
if estimate > 0 {
return &contentRemainingError{estimate}
}
// we have removed content, so mark it finalized by us
result, err := retryOnConflictError(kubeClient, namespace, finalizeNamespaceFunc(finalizerToken))
if err != nil {
// in normal practice, this should not be possible, but if a deployment is running
// two controllers to do namespace deletion that share a common finalizer token it's
// possible that a not found could occur since the other controller would have finished the delete.
if errors.IsNotFound(err) {
return nil
}
return err
}
// now check if all finalizers have reported that we delete now
if finalized(result) {
err = kubeClient.Core().Namespaces().Delete(namespace.Name, nil)
if err != nil && !errors.IsNotFound(err) {
return err
}
}
return nil
}
// estimateGrracefulTermination will estimate the graceful termination required for the specific entity in the namespace
func estimateGracefulTermination(kubeClient clientset.Interface, groupVersionResource schema.GroupVersionResource, ns string, namespaceDeletedAt metav1.Time) (int64, error) {
groupResource := groupVersionResource.GroupResource()
glog.V(5).Infof("namespace controller - estimateGracefulTermination - group %s, resource: %s", groupResource.Group, groupResource.Resource)
estimate := int64(0)
var err error
switch groupResource {
case schema.GroupResource{Group: "", Resource: "pods"}:
estimate, err = estimateGracefulTerminationForPods(kubeClient, ns)
}
if err != nil {
return estimate, err
}
// determine if the estimate is greater than the deletion timestamp
duration := time.Since(namespaceDeletedAt.Time)
allowedEstimate := time.Duration(estimate) * time.Second
if duration >= allowedEstimate {
estimate = int64(0)
}
return estimate, nil
}
// estimateGracefulTerminationForPods determines the graceful termination period for pods in the namespace
func estimateGracefulTerminationForPods(kubeClient clientset.Interface, ns string) (int64, error) {
glog.V(5).Infof("namespace controller - estimateGracefulTerminationForPods - namespace %s", ns)
estimate := int64(0)
items, err := kubeClient.Core().Pods(ns).List(v1.ListOptions{})
if err != nil {
return estimate, err
}
for i := range items.Items {
// filter out terminal pods
phase := items.Items[i].Status.Phase
if v1.PodSucceeded == phase || v1.PodFailed == phase {
continue
}
if items.Items[i].Spec.TerminationGracePeriodSeconds != nil {
grace := *items.Items[i].Spec.TerminationGracePeriodSeconds
if grace > estimate {
estimate = grace
}
}
}
return estimate, nil
}