mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
Add virt_to_head_page and consolidate code in slab and slub
Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
6d7779538f
commit
b49af68ff9
3 changed files with 14 additions and 11 deletions
|
@ -286,6 +286,12 @@ static inline void get_page(struct page *page)
|
|||
atomic_inc(&page->_count);
|
||||
}
|
||||
|
||||
static inline struct page *virt_to_head_page(const void *x)
|
||||
{
|
||||
struct page *page = virt_to_page(x);
|
||||
return compound_head(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup the page count before being freed into the page allocator for
|
||||
* the first time (boot or memory hotplug)
|
||||
|
|
|
@ -614,20 +614,19 @@ static inline void page_set_slab(struct page *page, struct slab *slab)
|
|||
|
||||
static inline struct slab *page_get_slab(struct page *page)
|
||||
{
|
||||
page = compound_head(page);
|
||||
BUG_ON(!PageSlab(page));
|
||||
return (struct slab *)page->lru.prev;
|
||||
}
|
||||
|
||||
static inline struct kmem_cache *virt_to_cache(const void *obj)
|
||||
{
|
||||
struct page *page = virt_to_page(obj);
|
||||
struct page *page = virt_to_head_page(obj);
|
||||
return page_get_cache(page);
|
||||
}
|
||||
|
||||
static inline struct slab *virt_to_slab(const void *obj)
|
||||
{
|
||||
struct page *page = virt_to_page(obj);
|
||||
struct page *page = virt_to_head_page(obj);
|
||||
return page_get_slab(page);
|
||||
}
|
||||
|
||||
|
@ -2876,7 +2875,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
|
|||
|
||||
objp -= obj_offset(cachep);
|
||||
kfree_debugcheck(objp);
|
||||
page = virt_to_page(objp);
|
||||
page = virt_to_head_page(objp);
|
||||
|
||||
slabp = page_get_slab(page);
|
||||
|
||||
|
@ -3100,7 +3099,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
|
|||
struct slab *slabp;
|
||||
unsigned objnr;
|
||||
|
||||
slabp = page_get_slab(virt_to_page(objp));
|
||||
slabp = page_get_slab(virt_to_head_page(objp));
|
||||
objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
|
||||
slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
|
||||
}
|
||||
|
|
10
mm/slub.c
10
mm/slub.c
|
@ -1323,9 +1323,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
|
|||
{
|
||||
struct page * page;
|
||||
|
||||
page = virt_to_page(x);
|
||||
|
||||
page = compound_head(page);
|
||||
page = virt_to_head_page(x);
|
||||
|
||||
if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER)))
|
||||
set_tracking(s, x, TRACK_FREE);
|
||||
|
@ -1336,7 +1334,7 @@ EXPORT_SYMBOL(kmem_cache_free);
|
|||
/* Figure out on which slab object the object resides */
|
||||
static struct page *get_object_page(const void *x)
|
||||
{
|
||||
struct page *page = compound_head(virt_to_page(x));
|
||||
struct page *page = virt_to_head_page(x);
|
||||
|
||||
if (!PageSlab(page))
|
||||
return NULL;
|
||||
|
@ -2076,7 +2074,7 @@ void kfree(const void *x)
|
|||
if (!x)
|
||||
return;
|
||||
|
||||
page = compound_head(virt_to_page(x));
|
||||
page = virt_to_head_page(x);
|
||||
|
||||
s = page->slab;
|
||||
|
||||
|
@ -2112,7 +2110,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
page = compound_head(virt_to_page(p));
|
||||
page = virt_to_head_page(p);
|
||||
|
||||
new_cache = get_slab(new_size, flags);
|
||||
|
||||
|
|
Loading…
Reference in a new issue