Switch to github.com/golang/dep for vendoring
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
parent
d6ab91be27
commit
8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions
85
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD
generated
vendored
Normal file
85
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"garbagecollector.go",
|
||||
"metrics.go",
|
||||
"rate_limiter_helper.go",
|
||||
"uid_cache.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/typed/dynamic:go_default_library",
|
||||
"//pkg/controller/garbagecollector/metaonly:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//pkg/util/workqueue:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/golang/groupcache/lru",
|
||||
"//vendor:github.com/prometheus/client_golang/prometheus",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/meta",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1/unstructured",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["garbagecollector_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/install:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/typed/dynamic:go_default_library",
|
||||
"//pkg/controller/garbagecollector/metaonly:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/workqueue:go_default_library",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/serializer",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/json",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/controller/garbagecollector/metaonly:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
4
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/OWNERS
generated
vendored
Executable file
4
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/OWNERS
generated
vendored
Executable file
|
@ -0,0 +1,4 @@
|
|||
reviewers:
|
||||
- caesarxuchao
|
||||
- lavalamp
|
||||
- deads2k
|
809
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go
generated
vendored
Normal file
809
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go
generated
vendored
Normal file
|
@ -0,0 +1,809 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package garbagecollector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
)
|
||||
|
||||
const ResourceResyncTime time.Duration = 0
|
||||
|
||||
type monitor struct {
|
||||
store cache.Store
|
||||
controller cache.Controller
|
||||
}
|
||||
|
||||
type objectReference struct {
|
||||
metav1.OwnerReference
|
||||
// This is needed by the dynamic client
|
||||
Namespace string
|
||||
}
|
||||
|
||||
func (s objectReference) String() string {
|
||||
return fmt.Sprintf("[%s/%s, namespace: %s, name: %s, uid: %s]", s.APIVersion, s.Kind, s.Namespace, s.Name, s.UID)
|
||||
}
|
||||
|
||||
// node does not require a lock to protect. The single-threaded
|
||||
// Propagator.processEvent() is the sole writer of the nodes. The multi-threaded
|
||||
// GarbageCollector.processItem() reads the nodes, but it only reads the fields
|
||||
// that never get changed by Propagator.processEvent().
|
||||
type node struct {
|
||||
identity objectReference
|
||||
// dependents will be read by the orphan() routine, we need to protect it with a lock.
|
||||
dependentsLock sync.RWMutex
|
||||
dependents map[*node]struct{}
|
||||
// When processing an Update event, we need to compare the updated
|
||||
// ownerReferences with the owners recorded in the graph.
|
||||
owners []metav1.OwnerReference
|
||||
}
|
||||
|
||||
func (ownerNode *node) addDependent(dependent *node) {
|
||||
ownerNode.dependentsLock.Lock()
|
||||
defer ownerNode.dependentsLock.Unlock()
|
||||
ownerNode.dependents[dependent] = struct{}{}
|
||||
}
|
||||
|
||||
func (ownerNode *node) deleteDependent(dependent *node) {
|
||||
ownerNode.dependentsLock.Lock()
|
||||
defer ownerNode.dependentsLock.Unlock()
|
||||
delete(ownerNode.dependents, dependent)
|
||||
}
|
||||
|
||||
type eventType int
|
||||
|
||||
const (
|
||||
addEvent eventType = iota
|
||||
updateEvent
|
||||
deleteEvent
|
||||
)
|
||||
|
||||
type event struct {
|
||||
eventType eventType
|
||||
obj interface{}
|
||||
// the update event comes with an old object, but it's not used by the garbage collector.
|
||||
oldObj interface{}
|
||||
}
|
||||
|
||||
type concurrentUIDToNode struct {
|
||||
*sync.RWMutex
|
||||
uidToNode map[types.UID]*node
|
||||
}
|
||||
|
||||
func (m *concurrentUIDToNode) Write(node *node) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.uidToNode[node.identity.UID] = node
|
||||
}
|
||||
|
||||
func (m *concurrentUIDToNode) Read(uid types.UID) (*node, bool) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
n, ok := m.uidToNode[uid]
|
||||
return n, ok
|
||||
}
|
||||
|
||||
func (m *concurrentUIDToNode) Delete(uid types.UID) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
delete(m.uidToNode, uid)
|
||||
}
|
||||
|
||||
type Propagator struct {
|
||||
eventQueue *workqueue.TimedWorkQueue
|
||||
// uidToNode doesn't require a lock to protect, because only the
|
||||
// single-threaded Propagator.processEvent() reads/writes it.
|
||||
uidToNode *concurrentUIDToNode
|
||||
gc *GarbageCollector
|
||||
}
|
||||
|
||||
// addDependentToOwners adds n to owners' dependents list. If the owner does not
|
||||
// exist in the p.uidToNode yet, a "virtual" node will be created to represent
|
||||
// the owner. The "virtual" node will be enqueued to the dirtyQueue, so that
|
||||
// processItem() will verify if the owner exists according to the API server.
|
||||
func (p *Propagator) addDependentToOwners(n *node, owners []metav1.OwnerReference) {
|
||||
for _, owner := range owners {
|
||||
ownerNode, ok := p.uidToNode.Read(owner.UID)
|
||||
if !ok {
|
||||
// Create a "virtual" node in the graph for the owner if it doesn't
|
||||
// exist in the graph yet. Then enqueue the virtual node into the
|
||||
// dirtyQueue. The garbage processor will enqueue a virtual delete
|
||||
// event to delete it from the graph if API server confirms this
|
||||
// owner doesn't exist.
|
||||
ownerNode = &node{
|
||||
identity: objectReference{
|
||||
OwnerReference: owner,
|
||||
Namespace: n.identity.Namespace,
|
||||
},
|
||||
dependents: make(map[*node]struct{}),
|
||||
}
|
||||
glog.V(6).Infof("add virtual node.identity: %s\n\n", ownerNode.identity)
|
||||
p.uidToNode.Write(ownerNode)
|
||||
p.gc.dirtyQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: p.gc.clock.Now(), Object: ownerNode})
|
||||
}
|
||||
ownerNode.addDependent(n)
|
||||
}
|
||||
}
|
||||
|
||||
// insertNode insert the node to p.uidToNode; then it finds all owners as listed
|
||||
// in n.owners, and adds the node to their dependents list.
|
||||
func (p *Propagator) insertNode(n *node) {
|
||||
p.uidToNode.Write(n)
|
||||
p.addDependentToOwners(n, n.owners)
|
||||
}
|
||||
|
||||
// removeDependentFromOwners remove n from owners' dependents list.
|
||||
func (p *Propagator) removeDependentFromOwners(n *node, owners []metav1.OwnerReference) {
|
||||
for _, owner := range owners {
|
||||
ownerNode, ok := p.uidToNode.Read(owner.UID)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
ownerNode.deleteDependent(n)
|
||||
}
|
||||
}
|
||||
|
||||
// removeNode removes the node from p.uidToNode, then finds all
|
||||
// owners as listed in n.owners, and removes n from their dependents list.
|
||||
func (p *Propagator) removeNode(n *node) {
|
||||
p.uidToNode.Delete(n.identity.UID)
|
||||
p.removeDependentFromOwners(n, n.owners)
|
||||
}
|
||||
|
||||
// TODO: profile this function to see if a naive N^2 algorithm performs better
|
||||
// when the number of references is small.
|
||||
func referencesDiffs(old []metav1.OwnerReference, new []metav1.OwnerReference) (added []metav1.OwnerReference, removed []metav1.OwnerReference) {
|
||||
oldUIDToRef := make(map[string]metav1.OwnerReference)
|
||||
for i := 0; i < len(old); i++ {
|
||||
oldUIDToRef[string(old[i].UID)] = old[i]
|
||||
}
|
||||
oldUIDSet := sets.StringKeySet(oldUIDToRef)
|
||||
newUIDToRef := make(map[string]metav1.OwnerReference)
|
||||
for i := 0; i < len(new); i++ {
|
||||
newUIDToRef[string(new[i].UID)] = new[i]
|
||||
}
|
||||
newUIDSet := sets.StringKeySet(newUIDToRef)
|
||||
|
||||
addedUID := newUIDSet.Difference(oldUIDSet)
|
||||
removedUID := oldUIDSet.Difference(newUIDSet)
|
||||
|
||||
for uid := range addedUID {
|
||||
added = append(added, newUIDToRef[uid])
|
||||
}
|
||||
for uid := range removedUID {
|
||||
removed = append(removed, oldUIDToRef[uid])
|
||||
}
|
||||
return added, removed
|
||||
}
|
||||
|
||||
func shouldOrphanDependents(e *event, accessor meta.Object) bool {
|
||||
// The delta_fifo may combine the creation and update of the object into one
|
||||
// event, so we need to check AddEvent as well.
|
||||
if e.oldObj == nil {
|
||||
if accessor.GetDeletionTimestamp() == nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
oldAccessor, err := meta.Accessor(e.oldObj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("cannot access oldObj: %v", err))
|
||||
return false
|
||||
}
|
||||
// ignore the event if it's not updating DeletionTimestamp from non-nil to nil.
|
||||
if accessor.GetDeletionTimestamp() == nil || oldAccessor.GetDeletionTimestamp() != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
finalizers := accessor.GetFinalizers()
|
||||
for _, finalizer := range finalizers {
|
||||
if finalizer == v1.FinalizerOrphan {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// dependents are copies of pointers to the owner's dependents, they don't need to be locked.
|
||||
func (gc *GarbageCollector) orhpanDependents(owner objectReference, dependents []*node) error {
|
||||
var failedDependents []objectReference
|
||||
var errorsSlice []error
|
||||
for _, dependent := range dependents {
|
||||
// the dependent.identity.UID is used as precondition
|
||||
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, owner.UID, dependent.identity.UID)
|
||||
_, err := gc.patchObject(dependent.identity, []byte(deleteOwnerRefPatch))
|
||||
// note that if the target ownerReference doesn't exist in the
|
||||
// dependent, strategic merge patch will NOT return an error.
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
errorsSlice = append(errorsSlice, fmt.Errorf("orphaning %s failed with %v", dependent.identity, err))
|
||||
}
|
||||
}
|
||||
if len(failedDependents) != 0 {
|
||||
return fmt.Errorf("failed to orphan dependents of owner %s, got errors: %s", owner, utilerrors.NewAggregate(errorsSlice).Error())
|
||||
}
|
||||
glog.V(6).Infof("successfully updated all dependents")
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Using Patch when strategicmerge supports deleting an entry from a
|
||||
// slice of a base type.
|
||||
func (gc *GarbageCollector) removeOrphanFinalizer(owner *node) error {
|
||||
const retries = 5
|
||||
for count := 0; count < retries; count++ {
|
||||
ownerObject, err := gc.getObject(owner.identity)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot finalize owner %s, because cannot get it. The garbage collector will retry later.", owner.identity)
|
||||
}
|
||||
accessor, err := meta.Accessor(ownerObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot access the owner object: %v. The garbage collector will retry later.", err)
|
||||
}
|
||||
finalizers := accessor.GetFinalizers()
|
||||
var newFinalizers []string
|
||||
found := false
|
||||
for _, f := range finalizers {
|
||||
if f == v1.FinalizerOrphan {
|
||||
found = true
|
||||
break
|
||||
} else {
|
||||
newFinalizers = append(newFinalizers, f)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
glog.V(6).Infof("the orphan finalizer is already removed from object %s", owner.identity)
|
||||
return nil
|
||||
}
|
||||
// remove the owner from dependent's OwnerReferences
|
||||
ownerObject.SetFinalizers(newFinalizers)
|
||||
_, err = gc.updateObject(owner.identity, ownerObject)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if err != nil && !errors.IsConflict(err) {
|
||||
return fmt.Errorf("cannot update the finalizers of owner %s, with error: %v, tried %d times", owner.identity, err, count+1)
|
||||
}
|
||||
// retry if it's a conflict
|
||||
glog.V(6).Infof("got conflict updating the owner object %s, tried %d times", owner.identity, count+1)
|
||||
}
|
||||
return fmt.Errorf("updateMaxRetries(%d) has reached. The garbage collector will retry later for owner %v.", retries, owner.identity)
|
||||
}
|
||||
|
||||
// orphanFinalizer dequeues a node from the orphanQueue, then finds its dependents
|
||||
// based on the graph maintained by the GC, then removes it from the
|
||||
// OwnerReferences of its dependents, and finally updates the owner to remove
|
||||
// the "Orphan" finalizer. The node is add back into the orphanQueue if any of
|
||||
// these steps fail.
|
||||
func (gc *GarbageCollector) orphanFinalizer() {
|
||||
timedItem, quit := gc.orphanQueue.Get()
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
defer gc.orphanQueue.Done(timedItem)
|
||||
owner, ok := timedItem.Object.(*node)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("expect *node, got %#v", timedItem.Object))
|
||||
}
|
||||
// we don't need to lock each element, because they never get updated
|
||||
owner.dependentsLock.RLock()
|
||||
dependents := make([]*node, 0, len(owner.dependents))
|
||||
for dependent := range owner.dependents {
|
||||
dependents = append(dependents, dependent)
|
||||
}
|
||||
owner.dependentsLock.RUnlock()
|
||||
|
||||
err := gc.orhpanDependents(owner.identity, dependents)
|
||||
if err != nil {
|
||||
glog.V(6).Infof("orphanDependents for %s failed with %v", owner.identity, err)
|
||||
gc.orphanQueue.Add(timedItem)
|
||||
return
|
||||
}
|
||||
// update the owner, remove "orphaningFinalizer" from its finalizers list
|
||||
err = gc.removeOrphanFinalizer(owner)
|
||||
if err != nil {
|
||||
glog.V(6).Infof("removeOrphanFinalizer for %s failed with %v", owner.identity, err)
|
||||
gc.orphanQueue.Add(timedItem)
|
||||
}
|
||||
OrphanProcessingLatency.Observe(sinceInMicroseconds(gc.clock, timedItem.StartTime))
|
||||
}
|
||||
|
||||
// Dequeueing an event from eventQueue, updating graph, populating dirty_queue.
|
||||
func (p *Propagator) processEvent() {
|
||||
timedItem, quit := p.eventQueue.Get()
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
defer p.eventQueue.Done(timedItem)
|
||||
event, ok := timedItem.Object.(*event)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("expect a *event, got %v", timedItem.Object))
|
||||
return
|
||||
}
|
||||
obj := event.obj
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err))
|
||||
return
|
||||
}
|
||||
typeAccessor, err := meta.TypeAccessor(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err))
|
||||
return
|
||||
}
|
||||
glog.V(6).Infof("Propagator process object: %s/%s, namespace %s, name %s, event type %s", typeAccessor.GetAPIVersion(), typeAccessor.GetKind(), accessor.GetNamespace(), accessor.GetName(), event.eventType)
|
||||
// Check if the node already exsits
|
||||
existingNode, found := p.uidToNode.Read(accessor.GetUID())
|
||||
switch {
|
||||
case (event.eventType == addEvent || event.eventType == updateEvent) && !found:
|
||||
newNode := &node{
|
||||
identity: objectReference{
|
||||
OwnerReference: metav1.OwnerReference{
|
||||
APIVersion: typeAccessor.GetAPIVersion(),
|
||||
Kind: typeAccessor.GetKind(),
|
||||
UID: accessor.GetUID(),
|
||||
Name: accessor.GetName(),
|
||||
},
|
||||
Namespace: accessor.GetNamespace(),
|
||||
},
|
||||
dependents: make(map[*node]struct{}),
|
||||
owners: accessor.GetOwnerReferences(),
|
||||
}
|
||||
p.insertNode(newNode)
|
||||
// the underlying delta_fifo may combine a creation and deletion into one event
|
||||
if shouldOrphanDependents(event, accessor) {
|
||||
glog.V(6).Infof("add %s to the orphanQueue", newNode.identity)
|
||||
p.gc.orphanQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: p.gc.clock.Now(), Object: newNode})
|
||||
}
|
||||
case (event.eventType == addEvent || event.eventType == updateEvent) && found:
|
||||
// caveat: if GC observes the creation of the dependents later than the
|
||||
// deletion of the owner, then the orphaning finalizer won't be effective.
|
||||
if shouldOrphanDependents(event, accessor) {
|
||||
glog.V(6).Infof("add %s to the orphanQueue", existingNode.identity)
|
||||
p.gc.orphanQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: p.gc.clock.Now(), Object: existingNode})
|
||||
}
|
||||
// add/remove owner refs
|
||||
added, removed := referencesDiffs(existingNode.owners, accessor.GetOwnerReferences())
|
||||
if len(added) == 0 && len(removed) == 0 {
|
||||
glog.V(6).Infof("The updateEvent %#v doesn't change node references, ignore", event)
|
||||
return
|
||||
}
|
||||
// update the node itself
|
||||
existingNode.owners = accessor.GetOwnerReferences()
|
||||
// Add the node to its new owners' dependent lists.
|
||||
p.addDependentToOwners(existingNode, added)
|
||||
// remove the node from the dependent list of node that are no long in
|
||||
// the node's owners list.
|
||||
p.removeDependentFromOwners(existingNode, removed)
|
||||
case event.eventType == deleteEvent:
|
||||
if !found {
|
||||
glog.V(6).Infof("%v doesn't exist in the graph, this shouldn't happen", accessor.GetUID())
|
||||
return
|
||||
}
|
||||
p.removeNode(existingNode)
|
||||
existingNode.dependentsLock.RLock()
|
||||
defer existingNode.dependentsLock.RUnlock()
|
||||
if len(existingNode.dependents) > 0 {
|
||||
p.gc.absentOwnerCache.Add(accessor.GetUID())
|
||||
}
|
||||
for dep := range existingNode.dependents {
|
||||
p.gc.dirtyQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: p.gc.clock.Now(), Object: dep})
|
||||
}
|
||||
}
|
||||
EventProcessingLatency.Observe(sinceInMicroseconds(p.gc.clock, timedItem.StartTime))
|
||||
}
|
||||
|
||||
// GarbageCollector is responsible for carrying out cascading deletion, and
|
||||
// removing ownerReferences from the dependents if the owner is deleted with
|
||||
// DeleteOptions.OrphanDependents=true.
|
||||
type GarbageCollector struct {
|
||||
restMapper meta.RESTMapper
|
||||
// metaOnlyClientPool uses a special codec, which removes fields except for
|
||||
// apiVersion, kind, and metadata during decoding.
|
||||
metaOnlyClientPool dynamic.ClientPool
|
||||
// clientPool uses the regular dynamicCodec. We need it to update
|
||||
// finalizers. It can be removed if we support patching finalizers.
|
||||
clientPool dynamic.ClientPool
|
||||
dirtyQueue *workqueue.TimedWorkQueue
|
||||
orphanQueue *workqueue.TimedWorkQueue
|
||||
monitors []monitor
|
||||
propagator *Propagator
|
||||
clock clock.Clock
|
||||
registeredRateLimiter *RegisteredRateLimiter
|
||||
registeredRateLimiterForMonitors *RegisteredRateLimiter
|
||||
// GC caches the owners that do not exist according to the API server.
|
||||
absentOwnerCache *UIDCache
|
||||
}
|
||||
|
||||
func gcListWatcher(client *dynamic.Client, resource schema.GroupVersionResource) *cache.ListWatch {
|
||||
return &cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
// APIResource.Kind is not used by the dynamic client, so
|
||||
// leave it empty. We want to list this resource in all
|
||||
// namespaces if it's namespace scoped, so leave
|
||||
// APIResource.Namespaced as false is all right.
|
||||
apiResource := metav1.APIResource{Name: resource.Resource}
|
||||
return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback).
|
||||
Resource(&apiResource, v1.NamespaceAll).
|
||||
List(&options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
// APIResource.Kind is not used by the dynamic client, so
|
||||
// leave it empty. We want to list this resource in all
|
||||
// namespaces if it's namespace scoped, so leave
|
||||
// APIResource.Namespaced as false is all right.
|
||||
apiResource := metav1.APIResource{Name: resource.Resource}
|
||||
return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback).
|
||||
Resource(&apiResource, v1.NamespaceAll).
|
||||
Watch(&options)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) monitorFor(resource schema.GroupVersionResource, kind schema.GroupVersionKind) (monitor, error) {
|
||||
// TODO: consider store in one storage.
|
||||
glog.V(6).Infof("create storage for resource %s", resource)
|
||||
var monitor monitor
|
||||
client, err := gc.metaOnlyClientPool.ClientForGroupVersionKind(kind)
|
||||
if err != nil {
|
||||
return monitor, err
|
||||
}
|
||||
gc.registeredRateLimiterForMonitors.registerIfNotPresent(resource.GroupVersion(), client, "garbage_collector_monitoring")
|
||||
setObjectTypeMeta := func(obj interface{}) {
|
||||
runtimeObject, ok := obj.(runtime.Object)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("expected runtime.Object, got %#v", obj))
|
||||
}
|
||||
runtimeObject.GetObjectKind().SetGroupVersionKind(kind)
|
||||
}
|
||||
monitor.store, monitor.controller = cache.NewInformer(
|
||||
gcListWatcher(client, resource),
|
||||
nil,
|
||||
ResourceResyncTime,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
// add the event to the propagator's eventQueue.
|
||||
AddFunc: func(obj interface{}) {
|
||||
setObjectTypeMeta(obj)
|
||||
event := &event{
|
||||
eventType: addEvent,
|
||||
obj: obj,
|
||||
}
|
||||
gc.propagator.eventQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: gc.clock.Now(), Object: event})
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
setObjectTypeMeta(newObj)
|
||||
event := &event{updateEvent, newObj, oldObj}
|
||||
gc.propagator.eventQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: gc.clock.Now(), Object: event})
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
// delta fifo may wrap the object in a cache.DeletedFinalStateUnknown, unwrap it
|
||||
if deletedFinalStateUnknown, ok := obj.(cache.DeletedFinalStateUnknown); ok {
|
||||
obj = deletedFinalStateUnknown.Obj
|
||||
}
|
||||
setObjectTypeMeta(obj)
|
||||
event := &event{
|
||||
eventType: deleteEvent,
|
||||
obj: obj,
|
||||
}
|
||||
gc.propagator.eventQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: gc.clock.Now(), Object: event})
|
||||
},
|
||||
},
|
||||
)
|
||||
return monitor, nil
|
||||
}
|
||||
|
||||
var ignoredResources = map[schema.GroupVersionResource]struct{}{
|
||||
schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "replicationcontrollers"}: {},
|
||||
schema.GroupVersionResource{Group: "", Version: "v1", Resource: "bindings"}: {},
|
||||
schema.GroupVersionResource{Group: "", Version: "v1", Resource: "componentstatuses"}: {},
|
||||
schema.GroupVersionResource{Group: "", Version: "v1", Resource: "events"}: {},
|
||||
schema.GroupVersionResource{Group: "authentication.k8s.io", Version: "v1beta1", Resource: "tokenreviews"}: {},
|
||||
schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1beta1", Resource: "subjectaccessreviews"}: {},
|
||||
schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1beta1", Resource: "selfsubjectaccessreviews"}: {},
|
||||
schema.GroupVersionResource{Group: "authorization.k8s.io", Version: "v1beta1", Resource: "localsubjectaccessreviews"}: {},
|
||||
}
|
||||
|
||||
func NewGarbageCollector(metaOnlyClientPool dynamic.ClientPool, clientPool dynamic.ClientPool, mapper meta.RESTMapper, deletableResources map[schema.GroupVersionResource]struct{}) (*GarbageCollector, error) {
|
||||
gc := &GarbageCollector{
|
||||
metaOnlyClientPool: metaOnlyClientPool,
|
||||
clientPool: clientPool,
|
||||
restMapper: mapper,
|
||||
clock: clock.RealClock{},
|
||||
dirtyQueue: workqueue.NewTimedWorkQueue(),
|
||||
orphanQueue: workqueue.NewTimedWorkQueue(),
|
||||
registeredRateLimiter: NewRegisteredRateLimiter(deletableResources),
|
||||
registeredRateLimiterForMonitors: NewRegisteredRateLimiter(deletableResources),
|
||||
absentOwnerCache: NewUIDCache(500),
|
||||
}
|
||||
gc.propagator = &Propagator{
|
||||
eventQueue: workqueue.NewTimedWorkQueue(),
|
||||
uidToNode: &concurrentUIDToNode{
|
||||
RWMutex: &sync.RWMutex{},
|
||||
uidToNode: make(map[types.UID]*node),
|
||||
},
|
||||
gc: gc,
|
||||
}
|
||||
for resource := range deletableResources {
|
||||
if _, ok := ignoredResources[resource]; ok {
|
||||
glog.V(6).Infof("ignore resource %#v", resource)
|
||||
continue
|
||||
}
|
||||
kind, err := gc.restMapper.KindFor(resource)
|
||||
if err != nil {
|
||||
if _, ok := err.(*meta.NoResourceMatchError); ok {
|
||||
// ignore NoResourceMatchErrors for now because TPRs won't be registered
|
||||
// and hence the RestMapper does not know about them. The deletableResources
|
||||
// though are using discovery which included TPRs.
|
||||
// TODO: use dynamic discovery for RestMapper and deletableResources
|
||||
glog.Warningf("ignore NoResourceMatchError for %v", resource)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
monitor, err := gc.monitorFor(resource, kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gc.monitors = append(gc.monitors, monitor)
|
||||
}
|
||||
return gc, nil
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) worker() {
|
||||
timedItem, quit := gc.dirtyQueue.Get()
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
defer gc.dirtyQueue.Done(timedItem)
|
||||
err := gc.processItem(timedItem.Object.(*node))
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Error syncing item %#v: %v", timedItem.Object, err))
|
||||
// retry if garbage collection of an object failed.
|
||||
gc.dirtyQueue.Add(timedItem)
|
||||
return
|
||||
}
|
||||
DirtyProcessingLatency.Observe(sinceInMicroseconds(gc.clock, timedItem.StartTime))
|
||||
}
|
||||
|
||||
// apiResource consults the REST mapper to translate an <apiVersion, kind,
|
||||
// namespace> tuple to a metav1.APIResource struct.
|
||||
func (gc *GarbageCollector) apiResource(apiVersion, kind string, namespaced bool) (*metav1.APIResource, error) {
|
||||
fqKind := schema.FromAPIVersionAndKind(apiVersion, kind)
|
||||
mapping, err := gc.restMapper.RESTMapping(fqKind.GroupKind(), apiVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get REST mapping for kind: %s, version: %s", kind, apiVersion)
|
||||
}
|
||||
glog.V(6).Infof("map kind %s, version %s to resource %s", kind, apiVersion, mapping.Resource)
|
||||
resource := metav1.APIResource{
|
||||
Name: mapping.Resource,
|
||||
Namespaced: namespaced,
|
||||
Kind: kind,
|
||||
}
|
||||
return &resource, nil
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) deleteObject(item objectReference) error {
|
||||
fqKind := schema.FromAPIVersionAndKind(item.APIVersion, item.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
gc.registeredRateLimiter.registerIfNotPresent(fqKind.GroupVersion(), client, "garbage_collector_operation")
|
||||
resource, err := gc.apiResource(item.APIVersion, item.Kind, len(item.Namespace) != 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uid := item.UID
|
||||
preconditions := v1.Preconditions{UID: &uid}
|
||||
deleteOptions := v1.DeleteOptions{Preconditions: &preconditions}
|
||||
return client.Resource(resource, item.Namespace).Delete(item.Name, &deleteOptions)
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) getObject(item objectReference) (*unstructured.Unstructured, error) {
|
||||
fqKind := schema.FromAPIVersionAndKind(item.APIVersion, item.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
gc.registeredRateLimiter.registerIfNotPresent(fqKind.GroupVersion(), client, "garbage_collector_operation")
|
||||
resource, err := gc.apiResource(item.APIVersion, item.Kind, len(item.Namespace) != 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.Resource(resource, item.Namespace).Get(item.Name)
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) updateObject(item objectReference, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
fqKind := schema.FromAPIVersionAndKind(item.APIVersion, item.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
gc.registeredRateLimiter.registerIfNotPresent(fqKind.GroupVersion(), client, "garbage_collector_operation")
|
||||
resource, err := gc.apiResource(item.APIVersion, item.Kind, len(item.Namespace) != 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.Resource(resource, item.Namespace).Update(obj)
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) patchObject(item objectReference, patch []byte) (*unstructured.Unstructured, error) {
|
||||
fqKind := schema.FromAPIVersionAndKind(item.APIVersion, item.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
gc.registeredRateLimiter.registerIfNotPresent(fqKind.GroupVersion(), client, "garbage_collector_operation")
|
||||
resource, err := gc.apiResource(item.APIVersion, item.Kind, len(item.Namespace) != 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.Resource(resource, item.Namespace).Patch(item.Name, api.StrategicMergePatchType, patch)
|
||||
}
|
||||
|
||||
func objectReferenceToUnstructured(ref objectReference) *unstructured.Unstructured {
|
||||
ret := &unstructured.Unstructured{}
|
||||
ret.SetKind(ref.Kind)
|
||||
ret.SetAPIVersion(ref.APIVersion)
|
||||
ret.SetUID(ref.UID)
|
||||
ret.SetNamespace(ref.Namespace)
|
||||
ret.SetName(ref.Name)
|
||||
return ret
|
||||
}
|
||||
|
||||
func objectReferenceToMetadataOnlyObject(ref objectReference) *metaonly.MetadataOnlyObject {
|
||||
return &metaonly.MetadataOnlyObject{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: ref.APIVersion,
|
||||
Kind: ref.Kind,
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: ref.Namespace,
|
||||
UID: ref.UID,
|
||||
Name: ref.Name,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) processItem(item *node) error {
|
||||
// Get the latest item from the API server
|
||||
latest, err := gc.getObject(item.identity)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// the Propagator can add "virtual" node for an owner that doesn't
|
||||
// exist yet, so we need to enqueue a virtual Delete event to remove
|
||||
// the virtual node from Propagator.uidToNode.
|
||||
glog.V(6).Infof("item %v not found, generating a virtual delete event", item.identity)
|
||||
event := &event{
|
||||
eventType: deleteEvent,
|
||||
obj: objectReferenceToMetadataOnlyObject(item.identity),
|
||||
}
|
||||
glog.V(6).Infof("generating virtual delete event for %s\n\n", event.obj)
|
||||
gc.propagator.eventQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: gc.clock.Now(), Object: event})
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if latest.GetUID() != item.identity.UID {
|
||||
glog.V(6).Infof("UID doesn't match, item %v not found, generating a virtual delete event", item.identity)
|
||||
event := &event{
|
||||
eventType: deleteEvent,
|
||||
obj: objectReferenceToMetadataOnlyObject(item.identity),
|
||||
}
|
||||
glog.V(6).Infof("generating virtual delete event for %s\n\n", event.obj)
|
||||
gc.propagator.eventQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: gc.clock.Now(), Object: event})
|
||||
return nil
|
||||
}
|
||||
ownerReferences := latest.GetOwnerReferences()
|
||||
if len(ownerReferences) == 0 {
|
||||
glog.V(6).Infof("object %s's doesn't have an owner, continue on next item", item.identity)
|
||||
return nil
|
||||
}
|
||||
// TODO: we need to remove dangling references if the object is not to be
|
||||
// deleted.
|
||||
for _, reference := range ownerReferences {
|
||||
if gc.absentOwnerCache.Has(reference.UID) {
|
||||
glog.V(6).Infof("according to the absentOwnerCache, object %s's owner %s/%s, %s does not exist", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
|
||||
continue
|
||||
}
|
||||
// TODO: we need to verify the reference resource is supported by the
|
||||
// system. If it's not a valid resource, the garbage collector should i)
|
||||
// ignore the reference when decide if the object should be deleted, and
|
||||
// ii) should update the object to remove such references. This is to
|
||||
// prevent objects having references to an old resource from being
|
||||
// deleted during a cluster upgrade.
|
||||
fqKind := schema.FromAPIVersionAndKind(reference.APIVersion, reference.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resource, err := gc.apiResource(reference.APIVersion, reference.Kind, len(item.identity.Namespace) != 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
owner, err := client.Resource(resource, item.identity.Namespace).Get(reference.Name)
|
||||
if err == nil {
|
||||
if owner.GetUID() != reference.UID {
|
||||
glog.V(6).Infof("object %s's owner %s/%s, %s is not found, UID mismatch", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
|
||||
gc.absentOwnerCache.Add(reference.UID)
|
||||
continue
|
||||
}
|
||||
glog.V(6).Infof("object %s has at least an existing owner, will not garbage collect", item.identity.UID)
|
||||
return nil
|
||||
} else if errors.IsNotFound(err) {
|
||||
gc.absentOwnerCache.Add(reference.UID)
|
||||
glog.V(6).Infof("object %s's owner %s/%s, %s is not found", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("none of object %s's owners exist any more, will garbage collect it", item.identity)
|
||||
return gc.deleteObject(item.identity)
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
|
||||
glog.Infof("Garbage Collector: Initializing")
|
||||
for _, monitor := range gc.monitors {
|
||||
go monitor.controller.Run(stopCh)
|
||||
}
|
||||
|
||||
wait.PollInfinite(10*time.Second, func() (bool, error) {
|
||||
for _, monitor := range gc.monitors {
|
||||
if !monitor.controller.HasSynced() {
|
||||
glog.Infof("Garbage Collector: Waiting for resource monitors to be synced...")
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
glog.Infof("Garbage Collector: All monitored resources synced. Proceeding to collect garbage")
|
||||
|
||||
// worker
|
||||
go wait.Until(gc.propagator.processEvent, 0, stopCh)
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(gc.worker, 0, stopCh)
|
||||
go wait.Until(gc.orphanFinalizer, 0, stopCh)
|
||||
}
|
||||
Register()
|
||||
<-stopCh
|
||||
glog.Infof("Garbage Collector: Shutting down")
|
||||
gc.dirtyQueue.ShutDown()
|
||||
gc.orphanQueue.ShutDown()
|
||||
gc.propagator.eventQueue.ShutDown()
|
||||
}
|
||||
|
||||
// *FOR TEST USE ONLY* It's not safe to call this function when the GC is still
|
||||
// busy.
|
||||
// GraphHasUID returns if the Propagator has a particular UID store in its
|
||||
// uidToNode graph. It's useful for debugging.
|
||||
func (gc *GarbageCollector) GraphHasUID(UIDs []types.UID) bool {
|
||||
for _, u := range UIDs {
|
||||
if _, ok := gc.propagator.uidToNode.Read(u); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
476
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector_test.go
generated
vendored
Normal file
476
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector_test.go
generated
vendored
Normal file
|
@ -0,0 +1,476 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package garbagecollector
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
_ "k8s.io/kubernetes/pkg/api/install"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
)
|
||||
|
||||
func TestNewGarbageCollector(t *testing.T) {
|
||||
config := &restclient.Config{}
|
||||
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
|
||||
metaOnlyClientPool := dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
config.ContentConfig.NegotiatedSerializer = nil
|
||||
clientPool := dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
podResource := map[schema.GroupVersionResource]struct{}{schema.GroupVersionResource{Version: "v1", Resource: "pods"}: {}}
|
||||
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, api.Registry.RESTMapper(), podResource)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, 1, len(gc.monitors))
|
||||
}
|
||||
|
||||
// fakeAction records information about requests to aid in testing.
|
||||
type fakeAction struct {
|
||||
method string
|
||||
path string
|
||||
query string
|
||||
}
|
||||
|
||||
// String returns method=path to aid in testing
|
||||
func (f *fakeAction) String() string {
|
||||
return strings.Join([]string{f.method, f.path}, "=")
|
||||
}
|
||||
|
||||
type FakeResponse struct {
|
||||
statusCode int
|
||||
content []byte
|
||||
}
|
||||
|
||||
// fakeActionHandler holds a list of fakeActions received
|
||||
type fakeActionHandler struct {
|
||||
// statusCode and content returned by this handler for different method + path.
|
||||
response map[string]FakeResponse
|
||||
|
||||
lock sync.Mutex
|
||||
actions []fakeAction
|
||||
}
|
||||
|
||||
// ServeHTTP logs the action that occurred and always returns the associated status code
|
||||
func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path, query: request.URL.RawQuery})
|
||||
fakeResponse, ok := f.response[request.Method+request.URL.Path]
|
||||
if !ok {
|
||||
fakeResponse.statusCode = 200
|
||||
fakeResponse.content = []byte("{\"kind\": \"List\"}")
|
||||
}
|
||||
response.Header().Set("Content-Type", "application/json")
|
||||
response.WriteHeader(fakeResponse.statusCode)
|
||||
response.Write(fakeResponse.content)
|
||||
}
|
||||
|
||||
// testServerAndClientConfig returns a server that listens and a config that can reference it
|
||||
func testServerAndClientConfig(handler func(http.ResponseWriter, *http.Request)) (*httptest.Server, *restclient.Config) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(handler))
|
||||
config := &restclient.Config{
|
||||
Host: srv.URL,
|
||||
}
|
||||
return srv, config
|
||||
}
|
||||
|
||||
func setupGC(t *testing.T, config *restclient.Config) *GarbageCollector {
|
||||
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
|
||||
metaOnlyClientPool := dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
config.ContentConfig.NegotiatedSerializer = nil
|
||||
clientPool := dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
podResource := map[schema.GroupVersionResource]struct{}{schema.GroupVersionResource{Version: "v1", Resource: "pods"}: {}}
|
||||
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, api.Registry.RESTMapper(), podResource)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return gc
|
||||
}
|
||||
|
||||
func getPod(podName string, ownerReferences []metav1.OwnerReference) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: "ns1",
|
||||
OwnerReferences: ownerReferences,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func serilizeOrDie(t *testing.T, object interface{}) []byte {
|
||||
data, err := json.Marshal(object)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// test the processItem function making the expected actions.
|
||||
func TestProcessItem(t *testing.T) {
|
||||
pod := getPod("ToBeDeletedPod", []metav1.OwnerReference{
|
||||
{
|
||||
Kind: "ReplicationController",
|
||||
Name: "owner1",
|
||||
UID: "123",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
})
|
||||
testHandler := &fakeActionHandler{
|
||||
response: map[string]FakeResponse{
|
||||
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/owner1": {
|
||||
404,
|
||||
[]byte{},
|
||||
},
|
||||
"GET" + "/api/v1/namespaces/ns1/pods/ToBeDeletedPod": {
|
||||
200,
|
||||
serilizeOrDie(t, pod),
|
||||
},
|
||||
},
|
||||
}
|
||||
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
|
||||
defer srv.Close()
|
||||
gc := setupGC(t, clientConfig)
|
||||
item := &node{
|
||||
identity: objectReference{
|
||||
OwnerReference: metav1.OwnerReference{
|
||||
Kind: pod.Kind,
|
||||
APIVersion: pod.APIVersion,
|
||||
Name: pod.Name,
|
||||
UID: pod.UID,
|
||||
},
|
||||
Namespace: pod.Namespace,
|
||||
},
|
||||
// owners are intentionally left empty. The processItem routine should get the latest item from the server.
|
||||
owners: nil,
|
||||
}
|
||||
err := gc.processItem(item)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected Error: %v", err)
|
||||
}
|
||||
expectedActionSet := sets.NewString()
|
||||
expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/replicationcontrollers/owner1")
|
||||
expectedActionSet.Insert("DELETE=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
|
||||
expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
|
||||
|
||||
actualActionSet := sets.NewString()
|
||||
for _, action := range testHandler.actions {
|
||||
actualActionSet.Insert(action.String())
|
||||
}
|
||||
if !expectedActionSet.Equal(actualActionSet) {
|
||||
t.Errorf("expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet,
|
||||
actualActionSet, expectedActionSet.Difference(actualActionSet))
|
||||
}
|
||||
}
|
||||
|
||||
// verifyGraphInvariants verifies that all of a node's owners list the node as a
|
||||
// dependent and vice versa. uidToNode has all the nodes in the graph.
|
||||
func verifyGraphInvariants(scenario string, uidToNode map[types.UID]*node, t *testing.T) {
|
||||
for myUID, node := range uidToNode {
|
||||
for dependentNode := range node.dependents {
|
||||
found := false
|
||||
for _, owner := range dependentNode.owners {
|
||||
if owner.UID == myUID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("scenario: %s: node %s has node %s as a dependent, but it's not present in the latter node's owners list", scenario, node.identity, dependentNode.identity)
|
||||
}
|
||||
}
|
||||
|
||||
for _, owner := range node.owners {
|
||||
ownerNode, ok := uidToNode[owner.UID]
|
||||
if !ok {
|
||||
// It's possible that the owner node doesn't exist
|
||||
continue
|
||||
}
|
||||
if _, ok := ownerNode.dependents[node]; !ok {
|
||||
t.Errorf("node %s has node %s as an owner, but it's not present in the latter node's dependents list", node.identity, ownerNode.identity)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createEvent(eventType eventType, selfUID string, owners []string) event {
|
||||
var ownerReferences []metav1.OwnerReference
|
||||
for i := 0; i < len(owners); i++ {
|
||||
ownerReferences = append(ownerReferences, metav1.OwnerReference{UID: types.UID(owners[i])})
|
||||
}
|
||||
return event{
|
||||
eventType: eventType,
|
||||
obj: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: types.UID(selfUID),
|
||||
OwnerReferences: ownerReferences,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessEvent(t *testing.T) {
|
||||
var testScenarios = []struct {
|
||||
name string
|
||||
// a series of events that will be supplied to the
|
||||
// Propagator.eventQueue.
|
||||
events []event
|
||||
}{
|
||||
{
|
||||
name: "test1",
|
||||
events: []event{
|
||||
createEvent(addEvent, "1", []string{}),
|
||||
createEvent(addEvent, "2", []string{"1"}),
|
||||
createEvent(addEvent, "3", []string{"1", "2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test2",
|
||||
events: []event{
|
||||
createEvent(addEvent, "1", []string{}),
|
||||
createEvent(addEvent, "2", []string{"1"}),
|
||||
createEvent(addEvent, "3", []string{"1", "2"}),
|
||||
createEvent(addEvent, "4", []string{"2"}),
|
||||
createEvent(deleteEvent, "2", []string{"doesn't matter"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test3",
|
||||
events: []event{
|
||||
createEvent(addEvent, "1", []string{}),
|
||||
createEvent(addEvent, "2", []string{"1"}),
|
||||
createEvent(addEvent, "3", []string{"1", "2"}),
|
||||
createEvent(addEvent, "4", []string{"3"}),
|
||||
createEvent(updateEvent, "2", []string{"4"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "reverse test2",
|
||||
events: []event{
|
||||
createEvent(addEvent, "4", []string{"2"}),
|
||||
createEvent(addEvent, "3", []string{"1", "2"}),
|
||||
createEvent(addEvent, "2", []string{"1"}),
|
||||
createEvent(addEvent, "1", []string{}),
|
||||
createEvent(deleteEvent, "2", []string{"doesn't matter"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range testScenarios {
|
||||
propagator := &Propagator{
|
||||
eventQueue: workqueue.NewTimedWorkQueue(),
|
||||
uidToNode: &concurrentUIDToNode{
|
||||
RWMutex: &sync.RWMutex{},
|
||||
uidToNode: make(map[types.UID]*node),
|
||||
},
|
||||
gc: &GarbageCollector{
|
||||
dirtyQueue: workqueue.NewTimedWorkQueue(),
|
||||
clock: clock.RealClock{},
|
||||
absentOwnerCache: NewUIDCache(2),
|
||||
},
|
||||
}
|
||||
for i := 0; i < len(scenario.events); i++ {
|
||||
propagator.eventQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: propagator.gc.clock.Now(), Object: &scenario.events[i]})
|
||||
propagator.processEvent()
|
||||
verifyGraphInvariants(scenario.name, propagator.uidToNode.uidToNode, t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDependentsRace relies on golang's data race detector to check if there is
|
||||
// data race among in the dependents field.
|
||||
func TestDependentsRace(t *testing.T) {
|
||||
gc := setupGC(t, &restclient.Config{})
|
||||
|
||||
const updates = 100
|
||||
owner := &node{dependents: make(map[*node]struct{})}
|
||||
ownerUID := types.UID("owner")
|
||||
gc.propagator.uidToNode.Write(owner)
|
||||
go func() {
|
||||
for i := 0; i < updates; i++ {
|
||||
dependent := &node{}
|
||||
gc.propagator.addDependentToOwners(dependent, []metav1.OwnerReference{{UID: ownerUID}})
|
||||
gc.propagator.removeDependentFromOwners(dependent, []metav1.OwnerReference{{UID: ownerUID}})
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
gc.orphanQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: gc.clock.Now(), Object: owner})
|
||||
for i := 0; i < updates; i++ {
|
||||
gc.orphanFinalizer()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// test the list and watch functions correctly converts the ListOptions
|
||||
func TestGCListWatcher(t *testing.T) {
|
||||
testHandler := &fakeActionHandler{}
|
||||
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
|
||||
defer srv.Close()
|
||||
clientPool := dynamic.NewClientPool(clientConfig, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
podResource := schema.GroupVersionResource{Version: "v1", Resource: "pods"}
|
||||
client, err := clientPool.ClientForGroupVersionResource(podResource)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lw := gcListWatcher(client, podResource)
|
||||
lw.Watch(v1.ListOptions{ResourceVersion: "1"})
|
||||
lw.List(v1.ListOptions{ResourceVersion: "1"})
|
||||
if e, a := 2, len(testHandler.actions); e != a {
|
||||
t.Errorf("expect %d requests, got %d", e, a)
|
||||
}
|
||||
if e, a := "resourceVersion=1", testHandler.actions[0].query; e != a {
|
||||
t.Errorf("expect %s, got %s", e, a)
|
||||
}
|
||||
if e, a := "resourceVersion=1", testHandler.actions[1].query; e != a {
|
||||
t.Errorf("expect %s, got %s", e, a)
|
||||
}
|
||||
}
|
||||
|
||||
func podToGCNode(pod *v1.Pod) *node {
|
||||
return &node{
|
||||
identity: objectReference{
|
||||
OwnerReference: metav1.OwnerReference{
|
||||
Kind: pod.Kind,
|
||||
APIVersion: pod.APIVersion,
|
||||
Name: pod.Name,
|
||||
UID: pod.UID,
|
||||
},
|
||||
Namespace: pod.Namespace,
|
||||
},
|
||||
// owners are intentionally left empty. The processItem routine should get the latest item from the server.
|
||||
owners: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func TestAbsentUIDCache(t *testing.T) {
|
||||
rc1Pod1 := getPod("rc1Pod1", []metav1.OwnerReference{
|
||||
{
|
||||
Kind: "ReplicationController",
|
||||
Name: "rc1",
|
||||
UID: "1",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
})
|
||||
rc1Pod2 := getPod("rc1Pod2", []metav1.OwnerReference{
|
||||
{
|
||||
Kind: "ReplicationController",
|
||||
Name: "rc1",
|
||||
UID: "1",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
})
|
||||
rc2Pod1 := getPod("rc2Pod1", []metav1.OwnerReference{
|
||||
{
|
||||
Kind: "ReplicationController",
|
||||
Name: "rc2",
|
||||
UID: "2",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
})
|
||||
rc3Pod1 := getPod("rc3Pod1", []metav1.OwnerReference{
|
||||
{
|
||||
Kind: "ReplicationController",
|
||||
Name: "rc3",
|
||||
UID: "3",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
})
|
||||
testHandler := &fakeActionHandler{
|
||||
response: map[string]FakeResponse{
|
||||
"GET" + "/api/v1/namespaces/ns1/pods/rc1Pod1": {
|
||||
200,
|
||||
serilizeOrDie(t, rc1Pod1),
|
||||
},
|
||||
"GET" + "/api/v1/namespaces/ns1/pods/rc1Pod2": {
|
||||
200,
|
||||
serilizeOrDie(t, rc1Pod2),
|
||||
},
|
||||
"GET" + "/api/v1/namespaces/ns1/pods/rc2Pod1": {
|
||||
200,
|
||||
serilizeOrDie(t, rc2Pod1),
|
||||
},
|
||||
"GET" + "/api/v1/namespaces/ns1/pods/rc3Pod1": {
|
||||
200,
|
||||
serilizeOrDie(t, rc3Pod1),
|
||||
},
|
||||
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc1": {
|
||||
404,
|
||||
[]byte{},
|
||||
},
|
||||
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc2": {
|
||||
404,
|
||||
[]byte{},
|
||||
},
|
||||
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc3": {
|
||||
404,
|
||||
[]byte{},
|
||||
},
|
||||
},
|
||||
}
|
||||
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
|
||||
defer srv.Close()
|
||||
gc := setupGC(t, clientConfig)
|
||||
gc.absentOwnerCache = NewUIDCache(2)
|
||||
gc.processItem(podToGCNode(rc1Pod1))
|
||||
gc.processItem(podToGCNode(rc2Pod1))
|
||||
// rc1 should already be in the cache, no request should be sent. rc1 should be promoted in the UIDCache
|
||||
gc.processItem(podToGCNode(rc1Pod2))
|
||||
// after this call, rc2 should be evicted from the UIDCache
|
||||
gc.processItem(podToGCNode(rc3Pod1))
|
||||
// check cache
|
||||
if !gc.absentOwnerCache.Has(types.UID("1")) {
|
||||
t.Errorf("expected rc1 to be in the cache")
|
||||
}
|
||||
if gc.absentOwnerCache.Has(types.UID("2")) {
|
||||
t.Errorf("expected rc2 to not exist in the cache")
|
||||
}
|
||||
if !gc.absentOwnerCache.Has(types.UID("3")) {
|
||||
t.Errorf("expected rc3 to be in the cache")
|
||||
}
|
||||
// check the request sent to the server
|
||||
count := 0
|
||||
for _, action := range testHandler.actions {
|
||||
if action.String() == "GET=/api/v1/namespaces/ns1/replicationcontrollers/rc1" {
|
||||
count++
|
||||
}
|
||||
}
|
||||
if count != 1 {
|
||||
t.Errorf("expected only 1 GET rc1 request, got %d", count)
|
||||
}
|
||||
}
|
58
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"metaonly.go",
|
||||
"types.generated.go",
|
||||
"types.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//vendor:github.com/ugorji/go/codec",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/serializer",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["metaonly_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/install:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/meta",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/serializer",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
65
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly.go
generated
vendored
Normal file
65
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metaonly
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
func (obj *MetadataOnlyObject) GetObjectKind() schema.ObjectKind { return obj }
|
||||
func (obj *MetadataOnlyObjectList) GetObjectKind() schema.ObjectKind { return obj }
|
||||
|
||||
type metaOnlyJSONScheme struct{}
|
||||
|
||||
// This function can be extended to mapping different gvk to different MetadataOnlyObject,
|
||||
// which embedded with different version of ObjectMeta. Currently the system
|
||||
// only supports v1.ObjectMeta.
|
||||
func gvkToMetadataOnlyObject(gvk schema.GroupVersionKind) runtime.Object {
|
||||
if strings.HasSuffix(gvk.Kind, "List") {
|
||||
return &MetadataOnlyObjectList{}
|
||||
} else {
|
||||
return &MetadataOnlyObject{}
|
||||
}
|
||||
}
|
||||
|
||||
func NewMetadataCodecFactory() serializer.CodecFactory {
|
||||
// populating another scheme from api.Scheme, registering every kind with
|
||||
// MetadataOnlyObject (or MetadataOnlyObjectList).
|
||||
scheme := runtime.NewScheme()
|
||||
allTypes := api.Scheme.AllKnownTypes()
|
||||
for kind := range allTypes {
|
||||
if kind.Version == runtime.APIVersionInternal {
|
||||
continue
|
||||
}
|
||||
metaOnlyObject := gvkToMetadataOnlyObject(kind)
|
||||
scheme.AddKnownTypeWithName(kind, metaOnlyObject)
|
||||
}
|
||||
scheme.AddUnversionedTypes(api.Unversioned, &metav1.Status{})
|
||||
return serializer.NewCodecFactory(scheme)
|
||||
}
|
||||
|
||||
// String converts a MetadataOnlyObject to a human-readable string.
|
||||
func (metaOnly MetadataOnlyObject) String() string {
|
||||
return fmt.Sprintf("%s/%s, name: %s, DeletionTimestamp:%v", metaOnly.TypeMeta.APIVersion, metaOnly.TypeMeta.Kind, metaOnly.ObjectMeta.Name, metaOnly.ObjectMeta.DeletionTimestamp)
|
||||
}
|
164
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly_test.go
generated
vendored
Normal file
164
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly_test.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metaonly
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
_ "k8s.io/kubernetes/pkg/api/install"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
func getPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{UID: "1234"},
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getPodJson(t *testing.T) []byte {
|
||||
data, err := json.Marshal(getPod())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func getPodListJson(t *testing.T) []byte {
|
||||
data, err := json.Marshal(&v1.PodList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PodList",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
Items: []v1.Pod{
|
||||
*getPod(),
|
||||
*getPod(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func verfiyMetadata(description string, t *testing.T, in *MetadataOnlyObject) {
|
||||
pod := getPod()
|
||||
if e, a := pod.ObjectMeta, in.ObjectMeta; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%s: expected %#v, got %#v", description, e, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeToMetadataOnlyObject(t *testing.T) {
|
||||
data := getPodJson(t)
|
||||
cf := serializer.DirectCodecFactory{CodecFactory: NewMetadataCodecFactory()}
|
||||
info, ok := runtime.SerializerInfoForMediaType(cf.SupportedMediaTypes(), runtime.ContentTypeJSON)
|
||||
if !ok {
|
||||
t.Fatalf("expected to get a JSON serializer")
|
||||
}
|
||||
codec := cf.DecoderToVersion(info.Serializer, schema.GroupVersion{Group: "SOMEGROUP", Version: "SOMEVERSION"})
|
||||
// decode with into
|
||||
into := &MetadataOnlyObject{}
|
||||
ret, _, err := codec.Decode(data, nil, into)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metaOnly, ok := ret.(*MetadataOnlyObject)
|
||||
if !ok {
|
||||
t.Fatalf("expected ret to be *runtime.MetadataOnlyObject")
|
||||
}
|
||||
verfiyMetadata("check returned metaonly with into", t, metaOnly)
|
||||
verfiyMetadata("check into", t, into)
|
||||
// decode without into
|
||||
ret, _, err = codec.Decode(data, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metaOnly, ok = ret.(*MetadataOnlyObject)
|
||||
if !ok {
|
||||
t.Fatalf("expected ret to be *runtime.MetadataOnlyObject")
|
||||
}
|
||||
verfiyMetadata("check returned metaonly without into", t, metaOnly)
|
||||
}
|
||||
|
||||
func verifyListMetadata(t *testing.T, metaOnlyList *MetadataOnlyObjectList) {
|
||||
items, err := meta.ExtractList(metaOnlyList)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, item := range items {
|
||||
metaOnly, ok := item.(*MetadataOnlyObject)
|
||||
if !ok {
|
||||
t.Fatalf("expected item to be *MetadataOnlyObject")
|
||||
}
|
||||
verfiyMetadata("check list", t, metaOnly)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeToMetadataOnlyObjectList(t *testing.T) {
|
||||
data := getPodListJson(t)
|
||||
cf := serializer.DirectCodecFactory{CodecFactory: NewMetadataCodecFactory()}
|
||||
info, ok := runtime.SerializerInfoForMediaType(cf.SupportedMediaTypes(), runtime.ContentTypeJSON)
|
||||
if !ok {
|
||||
t.Fatalf("expected to get a JSON serializer")
|
||||
}
|
||||
codec := cf.DecoderToVersion(info.Serializer, schema.GroupVersion{Group: "SOMEGROUP", Version: "SOMEVERSION"})
|
||||
// decode with into
|
||||
into := &MetadataOnlyObjectList{}
|
||||
ret, _, err := codec.Decode(data, nil, into)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metaOnlyList, ok := ret.(*MetadataOnlyObjectList)
|
||||
if !ok {
|
||||
t.Fatalf("expected ret to be *runtime.UnstructuredList")
|
||||
}
|
||||
verifyListMetadata(t, metaOnlyList)
|
||||
verifyListMetadata(t, into)
|
||||
// decode without into
|
||||
ret, _, err = codec.Decode(data, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metaOnlyList, ok = ret.(*MetadataOnlyObjectList)
|
||||
if !ok {
|
||||
t.Fatalf("expected ret to be *runtime.UnstructuredList")
|
||||
}
|
||||
verifyListMetadata(t, metaOnlyList)
|
||||
}
|
842
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/types.generated.go
generated
vendored
Normal file
842
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/types.generated.go
generated
vendored
Normal file
|
@ -0,0 +1,842 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// ************************************************************
|
||||
// DO NOT EDIT.
|
||||
// THIS FILE IS AUTO-GENERATED BY codecgen.
|
||||
// ************************************************************
|
||||
|
||||
package metaonly
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
codec1978 "github.com/ugorji/go/codec"
|
||||
pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pkg3_types "k8s.io/apimachinery/pkg/types"
|
||||
pkg2_v1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
"reflect"
|
||||
"runtime"
|
||||
time "time"
|
||||
)
|
||||
|
||||
const (
|
||||
// ----- content types ----
|
||||
codecSelferC_UTF81234 = 1
|
||||
codecSelferC_RAW1234 = 0
|
||||
// ----- value types used ----
|
||||
codecSelferValueTypeArray1234 = 10
|
||||
codecSelferValueTypeMap1234 = 9
|
||||
// ----- containerStateValues ----
|
||||
codecSelfer_containerMapKey1234 = 2
|
||||
codecSelfer_containerMapValue1234 = 3
|
||||
codecSelfer_containerMapEnd1234 = 4
|
||||
codecSelfer_containerArrayElem1234 = 6
|
||||
codecSelfer_containerArrayEnd1234 = 7
|
||||
)
|
||||
|
||||
var (
|
||||
codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits())
|
||||
codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`)
|
||||
)
|
||||
|
||||
type codecSelfer1234 struct{}
|
||||
|
||||
func init() {
|
||||
if codec1978.GenVersion != 5 {
|
||||
_, file, _, _ := runtime.Caller(0)
|
||||
err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
|
||||
5, codec1978.GenVersion, file)
|
||||
panic(err)
|
||||
}
|
||||
if false { // reference the types, but skip this branch at build/run time
|
||||
var v0 pkg1_v1.TypeMeta
|
||||
var v1 pkg3_types.UID
|
||||
var v2 pkg2_v1.ObjectMeta
|
||||
var v3 time.Time
|
||||
_, _, _, _ = v0, v1, v2, v3
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MetadataOnlyObject) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
_, _, _ = h, z, r
|
||||
if x == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym1 := z.EncBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(x) {
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [3]bool
|
||||
_, _, _ = yysep2, yyq2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[0] = x.Kind != ""
|
||||
yyq2[1] = x.APIVersion != ""
|
||||
yyq2[2] = true
|
||||
var yynn2 int
|
||||
if yyr2 || yy2arr2 {
|
||||
r.EncodeArrayStart(3)
|
||||
} else {
|
||||
yynn2 = 0
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
}
|
||||
}
|
||||
r.EncodeMapStart(yynn2)
|
||||
yynn2 = 0
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[0] {
|
||||
yym4 := z.EncBinary()
|
||||
_ = yym4
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[0] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("kind"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym5 := z.EncBinary()
|
||||
_ = yym5
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[1] {
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[1] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[2] {
|
||||
yy10 := &x.ObjectMeta
|
||||
yy10.CodecEncodeSelf(e)
|
||||
} else {
|
||||
r.EncodeNil()
|
||||
}
|
||||
} else {
|
||||
if yyq2[2] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("metadata"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yy12 := &x.ObjectMeta
|
||||
yy12.CodecEncodeSelf(e)
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MetadataOnlyObject) CodecDecodeSelf(d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
yym1 := z.DecBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(x) {
|
||||
} else {
|
||||
yyct2 := r.ContainerType()
|
||||
if yyct2 == codecSelferValueTypeMap1234 {
|
||||
yyl2 := r.ReadMapStart()
|
||||
if yyl2 == 0 {
|
||||
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
|
||||
} else {
|
||||
x.codecDecodeSelfFromMap(yyl2, d)
|
||||
}
|
||||
} else if yyct2 == codecSelferValueTypeArray1234 {
|
||||
yyl2 := r.ReadArrayStart()
|
||||
if yyl2 == 0 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
} else {
|
||||
x.codecDecodeSelfFromArray(yyl2, d)
|
||||
}
|
||||
} else {
|
||||
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MetadataOnlyObject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yys3Slc = z.DecScratchBuffer() // default slice to decode into
|
||||
_ = yys3Slc
|
||||
var yyhl3 bool = l >= 0
|
||||
for yyj3 := 0; ; yyj3++ {
|
||||
if yyhl3 {
|
||||
if yyj3 >= l {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if r.CheckBreak() {
|
||||
break
|
||||
}
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerMapKey1234)
|
||||
yys3Slc = r.DecodeBytes(yys3Slc, true, true)
|
||||
yys3 := string(yys3Slc)
|
||||
z.DecSendContainerState(codecSelfer_containerMapValue1234)
|
||||
switch yys3 {
|
||||
case "kind":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv4 := &x.Kind
|
||||
yym5 := z.DecBinary()
|
||||
_ = yym5
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv4)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "apiVersion":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv6 := &x.APIVersion
|
||||
yym7 := z.DecBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv6)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "metadata":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.ObjectMeta = pkg2_v1.ObjectMeta{}
|
||||
} else {
|
||||
yyv8 := &x.ObjectMeta
|
||||
yyv8.CodecDecodeSelf(d)
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
} // end for yyj3
|
||||
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
|
||||
}
|
||||
|
||||
func (x *MetadataOnlyObject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj9 int
|
||||
var yyb9 bool
|
||||
var yyhl9 bool = l >= 0
|
||||
yyj9++
|
||||
if yyhl9 {
|
||||
yyb9 = yyj9 > l
|
||||
} else {
|
||||
yyb9 = r.CheckBreak()
|
||||
}
|
||||
if yyb9 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv10 := &x.Kind
|
||||
yym11 := z.DecBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv10)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj9++
|
||||
if yyhl9 {
|
||||
yyb9 = yyj9 > l
|
||||
} else {
|
||||
yyb9 = r.CheckBreak()
|
||||
}
|
||||
if yyb9 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv12 := &x.APIVersion
|
||||
yym13 := z.DecBinary()
|
||||
_ = yym13
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv12)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj9++
|
||||
if yyhl9 {
|
||||
yyb9 = yyj9 > l
|
||||
} else {
|
||||
yyb9 = r.CheckBreak()
|
||||
}
|
||||
if yyb9 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.ObjectMeta = pkg2_v1.ObjectMeta{}
|
||||
} else {
|
||||
yyv14 := &x.ObjectMeta
|
||||
yyv14.CodecDecodeSelf(d)
|
||||
}
|
||||
for {
|
||||
yyj9++
|
||||
if yyhl9 {
|
||||
yyb9 = yyj9 > l
|
||||
} else {
|
||||
yyb9 = r.CheckBreak()
|
||||
}
|
||||
if yyb9 {
|
||||
break
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
z.DecStructFieldNotFound(yyj9-1, "")
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
|
||||
func (x *MetadataOnlyObjectList) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
_, _, _ = h, z, r
|
||||
if x == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym1 := z.EncBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(x) {
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [4]bool
|
||||
_, _, _ = yysep2, yyq2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[0] = x.Kind != ""
|
||||
yyq2[1] = x.APIVersion != ""
|
||||
yyq2[2] = true
|
||||
var yynn2 int
|
||||
if yyr2 || yy2arr2 {
|
||||
r.EncodeArrayStart(4)
|
||||
} else {
|
||||
yynn2 = 1
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
}
|
||||
}
|
||||
r.EncodeMapStart(yynn2)
|
||||
yynn2 = 0
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[0] {
|
||||
yym4 := z.EncBinary()
|
||||
_ = yym4
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[0] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("kind"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym5 := z.EncBinary()
|
||||
_ = yym5
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.Kind))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[1] {
|
||||
yym7 := z.EncBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
||||
}
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, "")
|
||||
}
|
||||
} else {
|
||||
if yyq2[1] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("apiVersion"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yym8 := z.EncBinary()
|
||||
_ = yym8
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if yyq2[2] {
|
||||
yy10 := &x.ListMeta
|
||||
yym11 := z.EncBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(yy10) {
|
||||
} else {
|
||||
z.EncFallback(yy10)
|
||||
}
|
||||
} else {
|
||||
r.EncodeNil()
|
||||
}
|
||||
} else {
|
||||
if yyq2[2] {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("metadata"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
yy12 := &x.ListMeta
|
||||
yym13 := z.EncBinary()
|
||||
_ = yym13
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.EncExt(yy12) {
|
||||
} else {
|
||||
z.EncFallback(yy12)
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if x.Items == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym15 := z.EncBinary()
|
||||
_ = yym15
|
||||
if false {
|
||||
} else {
|
||||
h.encSliceMetadataOnlyObject(([]MetadataOnlyObject)(x.Items), e)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapKey1234)
|
||||
r.EncodeString(codecSelferC_UTF81234, string("items"))
|
||||
z.EncSendContainerState(codecSelfer_containerMapValue1234)
|
||||
if x.Items == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym16 := z.EncBinary()
|
||||
_ = yym16
|
||||
if false {
|
||||
} else {
|
||||
h.encSliceMetadataOnlyObject(([]MetadataOnlyObject)(x.Items), e)
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
} else {
|
||||
z.EncSendContainerState(codecSelfer_containerMapEnd1234)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MetadataOnlyObjectList) CodecDecodeSelf(d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
yym1 := z.DecBinary()
|
||||
_ = yym1
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(x) {
|
||||
} else {
|
||||
yyct2 := r.ContainerType()
|
||||
if yyct2 == codecSelferValueTypeMap1234 {
|
||||
yyl2 := r.ReadMapStart()
|
||||
if yyl2 == 0 {
|
||||
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
|
||||
} else {
|
||||
x.codecDecodeSelfFromMap(yyl2, d)
|
||||
}
|
||||
} else if yyct2 == codecSelferValueTypeArray1234 {
|
||||
yyl2 := r.ReadArrayStart()
|
||||
if yyl2 == 0 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
} else {
|
||||
x.codecDecodeSelfFromArray(yyl2, d)
|
||||
}
|
||||
} else {
|
||||
panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MetadataOnlyObjectList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yys3Slc = z.DecScratchBuffer() // default slice to decode into
|
||||
_ = yys3Slc
|
||||
var yyhl3 bool = l >= 0
|
||||
for yyj3 := 0; ; yyj3++ {
|
||||
if yyhl3 {
|
||||
if yyj3 >= l {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if r.CheckBreak() {
|
||||
break
|
||||
}
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerMapKey1234)
|
||||
yys3Slc = r.DecodeBytes(yys3Slc, true, true)
|
||||
yys3 := string(yys3Slc)
|
||||
z.DecSendContainerState(codecSelfer_containerMapValue1234)
|
||||
switch yys3 {
|
||||
case "kind":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv4 := &x.Kind
|
||||
yym5 := z.DecBinary()
|
||||
_ = yym5
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv4)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "apiVersion":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv6 := &x.APIVersion
|
||||
yym7 := z.DecBinary()
|
||||
_ = yym7
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv6)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "metadata":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.ListMeta = pkg1_v1.ListMeta{}
|
||||
} else {
|
||||
yyv8 := &x.ListMeta
|
||||
yym9 := z.DecBinary()
|
||||
_ = yym9
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(yyv8) {
|
||||
} else {
|
||||
z.DecFallback(yyv8, false)
|
||||
}
|
||||
}
|
||||
case "items":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Items = nil
|
||||
} else {
|
||||
yyv10 := &x.Items
|
||||
yym11 := z.DecBinary()
|
||||
_ = yym11
|
||||
if false {
|
||||
} else {
|
||||
h.decSliceMetadataOnlyObject((*[]MetadataOnlyObject)(yyv10), d)
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
} // end for yyj3
|
||||
z.DecSendContainerState(codecSelfer_containerMapEnd1234)
|
||||
}
|
||||
|
||||
func (x *MetadataOnlyObjectList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj12 int
|
||||
var yyb12 bool
|
||||
var yyhl12 bool = l >= 0
|
||||
yyj12++
|
||||
if yyhl12 {
|
||||
yyb12 = yyj12 > l
|
||||
} else {
|
||||
yyb12 = r.CheckBreak()
|
||||
}
|
||||
if yyb12 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv13 := &x.Kind
|
||||
yym14 := z.DecBinary()
|
||||
_ = yym14
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv13)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj12++
|
||||
if yyhl12 {
|
||||
yyb12 = yyj12 > l
|
||||
} else {
|
||||
yyb12 = r.CheckBreak()
|
||||
}
|
||||
if yyb12 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv15 := &x.APIVersion
|
||||
yym16 := z.DecBinary()
|
||||
_ = yym16
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv15)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj12++
|
||||
if yyhl12 {
|
||||
yyb12 = yyj12 > l
|
||||
} else {
|
||||
yyb12 = r.CheckBreak()
|
||||
}
|
||||
if yyb12 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.ListMeta = pkg1_v1.ListMeta{}
|
||||
} else {
|
||||
yyv17 := &x.ListMeta
|
||||
yym18 := z.DecBinary()
|
||||
_ = yym18
|
||||
if false {
|
||||
} else if z.HasExtensions() && z.DecExt(yyv17) {
|
||||
} else {
|
||||
z.DecFallback(yyv17, false)
|
||||
}
|
||||
}
|
||||
yyj12++
|
||||
if yyhl12 {
|
||||
yyb12 = yyj12 > l
|
||||
} else {
|
||||
yyb12 = r.CheckBreak()
|
||||
}
|
||||
if yyb12 {
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
return
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Items = nil
|
||||
} else {
|
||||
yyv19 := &x.Items
|
||||
yym20 := z.DecBinary()
|
||||
_ = yym20
|
||||
if false {
|
||||
} else {
|
||||
h.decSliceMetadataOnlyObject((*[]MetadataOnlyObject)(yyv19), d)
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj12++
|
||||
if yyhl12 {
|
||||
yyb12 = yyj12 > l
|
||||
} else {
|
||||
yyb12 = r.CheckBreak()
|
||||
}
|
||||
if yyb12 {
|
||||
break
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
z.DecStructFieldNotFound(yyj12-1, "")
|
||||
}
|
||||
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
|
||||
func (x codecSelfer1234) encSliceMetadataOnlyObject(v []MetadataOnlyObject, e *codec1978.Encoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperEncoder(e)
|
||||
_, _, _ = h, z, r
|
||||
r.EncodeArrayStart(len(v))
|
||||
for _, yyv1 := range v {
|
||||
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
|
||||
yy2 := &yyv1
|
||||
yy2.CodecEncodeSelf(e)
|
||||
}
|
||||
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
|
||||
}
|
||||
|
||||
func (x codecSelfer1234) decSliceMetadataOnlyObject(v *[]MetadataOnlyObject, d *codec1978.Decoder) {
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
|
||||
yyv1 := *v
|
||||
yyh1, yyl1 := z.DecSliceHelperStart()
|
||||
var yyc1 bool
|
||||
_ = yyc1
|
||||
if yyl1 == 0 {
|
||||
if yyv1 == nil {
|
||||
yyv1 = []MetadataOnlyObject{}
|
||||
yyc1 = true
|
||||
} else if len(yyv1) != 0 {
|
||||
yyv1 = yyv1[:0]
|
||||
yyc1 = true
|
||||
}
|
||||
} else if yyl1 > 0 {
|
||||
var yyrr1, yyrl1 int
|
||||
var yyrt1 bool
|
||||
_, _ = yyrl1, yyrt1
|
||||
yyrr1 = yyl1 // len(yyv1)
|
||||
if yyl1 > cap(yyv1) {
|
||||
|
||||
yyrg1 := len(yyv1) > 0
|
||||
yyv21 := yyv1
|
||||
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 256)
|
||||
if yyrt1 {
|
||||
if yyrl1 <= cap(yyv1) {
|
||||
yyv1 = yyv1[:yyrl1]
|
||||
} else {
|
||||
yyv1 = make([]MetadataOnlyObject, yyrl1)
|
||||
}
|
||||
} else {
|
||||
yyv1 = make([]MetadataOnlyObject, yyrl1)
|
||||
}
|
||||
yyc1 = true
|
||||
yyrr1 = len(yyv1)
|
||||
if yyrg1 {
|
||||
copy(yyv1, yyv21)
|
||||
}
|
||||
} else if yyl1 != len(yyv1) {
|
||||
yyv1 = yyv1[:yyl1]
|
||||
yyc1 = true
|
||||
}
|
||||
yyj1 := 0
|
||||
for ; yyj1 < yyrr1; yyj1++ {
|
||||
yyh1.ElemContainerState(yyj1)
|
||||
if r.TryDecodeAsNil() {
|
||||
yyv1[yyj1] = MetadataOnlyObject{}
|
||||
} else {
|
||||
yyv2 := &yyv1[yyj1]
|
||||
yyv2.CodecDecodeSelf(d)
|
||||
}
|
||||
|
||||
}
|
||||
if yyrt1 {
|
||||
for ; yyj1 < yyl1; yyj1++ {
|
||||
yyv1 = append(yyv1, MetadataOnlyObject{})
|
||||
yyh1.ElemContainerState(yyj1)
|
||||
if r.TryDecodeAsNil() {
|
||||
yyv1[yyj1] = MetadataOnlyObject{}
|
||||
} else {
|
||||
yyv3 := &yyv1[yyj1]
|
||||
yyv3.CodecDecodeSelf(d)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
yyj1 := 0
|
||||
for ; !r.CheckBreak(); yyj1++ {
|
||||
|
||||
if yyj1 >= len(yyv1) {
|
||||
yyv1 = append(yyv1, MetadataOnlyObject{}) // var yyz1 MetadataOnlyObject
|
||||
yyc1 = true
|
||||
}
|
||||
yyh1.ElemContainerState(yyj1)
|
||||
if yyj1 < len(yyv1) {
|
||||
if r.TryDecodeAsNil() {
|
||||
yyv1[yyj1] = MetadataOnlyObject{}
|
||||
} else {
|
||||
yyv4 := &yyv1[yyj1]
|
||||
yyv4.CodecDecodeSelf(d)
|
||||
}
|
||||
|
||||
} else {
|
||||
z.DecSwallow()
|
||||
}
|
||||
|
||||
}
|
||||
if yyj1 < len(yyv1) {
|
||||
yyv1 = yyv1[:yyj1]
|
||||
yyc1 = true
|
||||
} else if yyj1 == 0 && yyv1 == nil {
|
||||
yyv1 = []MetadataOnlyObject{}
|
||||
yyc1 = true
|
||||
}
|
||||
}
|
||||
yyh1.End()
|
||||
if yyc1 {
|
||||
*v = yyv1
|
||||
}
|
||||
}
|
42
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/types.go
generated
vendored
Normal file
42
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/types.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metaonly
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
// MetadataOnlyObject allows decoding only the apiVersion, kind, and metadata fields of
|
||||
// JSON data.
|
||||
// TODO: enable meta-only decoding for protobuf.
|
||||
type MetadataOnlyObject struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// +optional
|
||||
v1.ObjectMeta `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// MetadataOnlyObjectList allows decoding from JSON data only the typemeta and metadata of
|
||||
// a list, and those of the enclosing objects.
|
||||
// TODO: enable meta-only decoding for protobuf.
|
||||
type MetadataOnlyObjectList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []MetadataOnlyObject `json:"items"`
|
||||
}
|
73
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metrics.go
generated
vendored
Normal file
73
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metrics.go
generated
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package garbagecollector
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
GarbageCollectSubsystem = "garbage_collector"
|
||||
EventProcessingLatencyKey = "event_processing_latency_microseconds"
|
||||
DirtyProcessingLatencyKey = "dirty_processing_latency_microseconds"
|
||||
OrphanProcessingLatencyKey = "orphan_processing_latency_microseconds"
|
||||
)
|
||||
|
||||
var (
|
||||
EventProcessingLatency = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: GarbageCollectSubsystem,
|
||||
Name: EventProcessingLatencyKey,
|
||||
Help: "Time in microseconds of an event spend in the eventQueue",
|
||||
},
|
||||
)
|
||||
DirtyProcessingLatency = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: GarbageCollectSubsystem,
|
||||
Name: DirtyProcessingLatencyKey,
|
||||
Help: "Time in microseconds of an item spend in the dirtyQueue",
|
||||
},
|
||||
)
|
||||
OrphanProcessingLatency = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: GarbageCollectSubsystem,
|
||||
Name: OrphanProcessingLatencyKey,
|
||||
Help: "Time in microseconds of an item spend in the orphanQueue",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
var registerMetrics sync.Once
|
||||
|
||||
// Register all metrics.
|
||||
func Register() {
|
||||
// Register the metrics.
|
||||
registerMetrics.Do(func() {
|
||||
prometheus.MustRegister(EventProcessingLatency)
|
||||
prometheus.MustRegister(DirtyProcessingLatency)
|
||||
prometheus.MustRegister(OrphanProcessingLatency)
|
||||
})
|
||||
}
|
||||
|
||||
func sinceInMicroseconds(clock clock.Clock, start time.Time) float64 {
|
||||
return float64(clock.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
||||
}
|
60
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/rate_limiter_helper.go
generated
vendored
Normal file
60
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/rate_limiter_helper.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package garbagecollector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
)
|
||||
|
||||
// RegisteredRateLimiter records the registered RateLimters to avoid
|
||||
// duplication.
|
||||
type RegisteredRateLimiter struct {
|
||||
rateLimiters map[schema.GroupVersion]*sync.Once
|
||||
}
|
||||
|
||||
// NewRegisteredRateLimiter returns a new RegisteredRateLimiater.
|
||||
// TODO: NewRegisteredRateLimiter is not dynamic. We need to find a better way
|
||||
// when GC dynamically change the resources it monitors.
|
||||
func NewRegisteredRateLimiter(resources map[schema.GroupVersionResource]struct{}) *RegisteredRateLimiter {
|
||||
rateLimiters := make(map[schema.GroupVersion]*sync.Once)
|
||||
for resource := range resources {
|
||||
gv := resource.GroupVersion()
|
||||
if _, found := rateLimiters[gv]; !found {
|
||||
rateLimiters[gv] = &sync.Once{}
|
||||
}
|
||||
}
|
||||
return &RegisteredRateLimiter{rateLimiters: rateLimiters}
|
||||
}
|
||||
|
||||
func (r *RegisteredRateLimiter) registerIfNotPresent(gv schema.GroupVersion, client *dynamic.Client, prefix string) {
|
||||
once, found := r.rateLimiters[gv]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
once.Do(func() {
|
||||
if rateLimiter := client.GetRateLimiter(); rateLimiter != nil {
|
||||
group := strings.Replace(gv.Group, ".", ":", -1)
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage(fmt.Sprintf("%s_%s_%s", prefix, group, gv.Version), rateLimiter)
|
||||
}
|
||||
})
|
||||
}
|
52
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/uid_cache.go
generated
vendored
Normal file
52
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/uid_cache.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package garbagecollector
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// UIDCache is an LRU cache for uid.
|
||||
type UIDCache struct {
|
||||
mutex sync.Mutex
|
||||
cache *lru.Cache
|
||||
}
|
||||
|
||||
// NewUIDCache returns a UIDCache.
|
||||
func NewUIDCache(maxCacheEntries int) *UIDCache {
|
||||
return &UIDCache{
|
||||
cache: lru.New(maxCacheEntries),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a uid to the cache.
|
||||
func (c *UIDCache) Add(uid types.UID) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.cache.Add(uid, nil)
|
||||
}
|
||||
|
||||
// Has returns if a uid is in the cache.
|
||||
func (c *UIDCache) Has(uid types.UID) bool {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
_, found := c.cache.Get(uid)
|
||||
return found
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue