slub: extract common code to remove objects from partial list without locking

There are a couple of places where repeat the same statements when removing
a page from the partial list. Consolidate that into __remove_partial().

Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
Christoph Lameter 2010-09-28 08:10:28 -05:00 committed by Pekka Enberg
parent f7cb193362
commit 62e346a830
1 changed files with 11 additions and 8 deletions

View File

@ -1310,13 +1310,19 @@ static void add_partial(struct kmem_cache_node *n,
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
} }
static inline void __remove_partial(struct kmem_cache_node *n,
struct page *page)
{
list_del(&page->lru);
n->nr_partial--;
}
static void remove_partial(struct kmem_cache *s, struct page *page) static void remove_partial(struct kmem_cache *s, struct page *page)
{ {
struct kmem_cache_node *n = get_node(s, page_to_nid(page)); struct kmem_cache_node *n = get_node(s, page_to_nid(page));
spin_lock(&n->list_lock); spin_lock(&n->list_lock);
list_del(&page->lru); __remove_partial(n, page);
n->nr_partial--;
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
} }
@ -1329,8 +1335,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
struct page *page) struct page *page)
{ {
if (slab_trylock(page)) { if (slab_trylock(page)) {
list_del(&page->lru); __remove_partial(n, page);
n->nr_partial--;
__SetPageSlubFrozen(page); __SetPageSlubFrozen(page);
return 1; return 1;
} }
@ -2462,9 +2467,8 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
spin_lock_irqsave(&n->list_lock, flags); spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry_safe(page, h, &n->partial, lru) { list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) { if (!page->inuse) {
list_del(&page->lru); __remove_partial(n, page);
discard_slab(s, page); discard_slab(s, page);
n->nr_partial--;
} else { } else {
list_slab_objects(s, page, list_slab_objects(s, page,
"Objects remaining on kmem_cache_close()"); "Objects remaining on kmem_cache_close()");
@ -2822,8 +2826,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
* may have freed the last object and be * may have freed the last object and be
* waiting to release the slab. * waiting to release the slab.
*/ */
list_del(&page->lru); __remove_partial(n, page);
n->nr_partial--;
slab_unlock(page); slab_unlock(page);
discard_slab(s, page); discard_slab(s, page);
} else { } else {