page allocator: do not check NUMA node ID when the caller knows the node is valid

Callers of alloc_pages_node() can optionally specify -1 as a node to mean
"allocate from the current node".  However, a number of the callers in
fast paths know for a fact their node is valid.  To avoid a comparison and
branch, this patch adds alloc_pages_exact_node() that only checks the nid
with VM_BUG_ON().  Callers that know their node is valid are then
converted.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi>
Acked-by: Paul Mundt <lethal@linux-sh.org>	[for the SLOB NUMA bits]
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Mel Gorman 2009-06-16 15:31:54 -07:00 committed by Linus Torvalds
parent b3c466ce51
commit 6484eb3e2a
17 changed files with 33 additions and 24 deletions

View file

@ -1131,7 +1131,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
{ {
struct page *page; struct page *page;
page = alloc_pages_node(ioc->node == MAX_NUMNODES ? page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ?
numa_node_id() : ioc->node, flags, numa_node_id() : ioc->node, flags,
get_order(size)); get_order(size));

View file

@ -1829,8 +1829,7 @@ ia64_mca_cpu_init(void *cpu_data)
data = mca_bootmem(); data = mca_bootmem();
first_time = 0; first_time = 0;
} else } else
data = page_address(alloc_pages_node(numa_node_id(), data = __get_free_pages(GFP_KERNEL, get_order(sz));
GFP_KERNEL, get_order(sz)));
if (!data) if (!data)
panic("Could not allocate MCA memory for cpu %d\n", panic("Could not allocate MCA memory for cpu %d\n",
cpu); cpu);

View file

@ -98,7 +98,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
/* attempt to allocate a granule's worth of cached memory pages */ /* attempt to allocate a granule's worth of cached memory pages */
page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, page = alloc_pages_exact_node(nid,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
IA64_GRANULE_SHIFT-PAGE_SHIFT); IA64_GRANULE_SHIFT-PAGE_SHIFT);
if (!page) { if (!page) {
mutex_unlock(&uc_pool->add_chunk_mutex); mutex_unlock(&uc_pool->add_chunk_mutex);

View file

@ -90,7 +90,8 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
*/ */
node = pcibus_to_node(pdev->bus); node = pcibus_to_node(pdev->bus);
if (likely(node >=0)) { if (likely(node >=0)) {
struct page *p = alloc_pages_node(node, flags, get_order(size)); struct page *p = alloc_pages_exact_node(node,
flags, get_order(size));
if (likely(p)) if (likely(p))
cpuaddr = page_address(p); cpuaddr = page_address(p);

View file

@ -122,7 +122,7 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
area->nid = nid; area->nid = nid;
area->order = order; area->order = order;
area->pages = alloc_pages_node(area->nid, GFP_KERNEL | GFP_THISNODE, area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE,
area->order); area->order);
if (!area->pages) { if (!area->pages) {

View file

@ -1277,7 +1277,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
struct page *pages; struct page *pages;
struct vmcs *vmcs; struct vmcs *vmcs;
pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
if (!pages) if (!pages)
return NULL; return NULL;
vmcs = page_address(pages); vmcs = page_address(pages);

View file

@ -302,7 +302,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
pnode = uv_node_to_pnode(nid); pnode = uv_node_to_pnode(nid);
if (bid < 0 || gru_base[bid]) if (bid < 0 || gru_base[bid])
continue; continue;
page = alloc_pages_node(nid, GFP_KERNEL, order); page = alloc_pages_exact_node(nid, GFP_KERNEL, order);
if (!page) if (!page)
goto fail; goto fail;
gru_base[bid] = page_address(page); gru_base[bid] = page_address(page);

View file

@ -232,7 +232,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
mq->mmr_blade = uv_cpu_to_blade_id(cpu); mq->mmr_blade = uv_cpu_to_blade_id(cpu);
nid = cpu_to_node(cpu); nid = cpu_to_node(cpu);
page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
pg_order); pg_order);
if (page == NULL) { if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "

View file

@ -5,6 +5,7 @@
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/topology.h> #include <linux/topology.h>
#include <linux/mmdebug.h>
struct vm_area_struct; struct vm_area_struct;
@ -192,6 +193,14 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
} }
static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);

View file

@ -7,7 +7,6 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/mmdebug.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/prio_tree.h> #include <linux/prio_tree.h>

View file

@ -365,7 +365,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
node = cpu_to_node(cpu); node = cpu_to_node(cpu);
per_cpu(cpu_profile_flip, cpu) = 0; per_cpu(cpu_profile_flip, cpu) = 0;
if (!per_cpu(cpu_profile_hits, cpu)[1]) { if (!per_cpu(cpu_profile_hits, cpu)[1]) {
page = alloc_pages_node(node, page = alloc_pages_exact_node(node,
GFP_KERNEL | __GFP_ZERO, GFP_KERNEL | __GFP_ZERO,
0); 0);
if (!page) if (!page)
@ -373,7 +373,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
} }
if (!per_cpu(cpu_profile_hits, cpu)[0]) { if (!per_cpu(cpu_profile_hits, cpu)[0]) {
page = alloc_pages_node(node, page = alloc_pages_exact_node(node,
GFP_KERNEL | __GFP_ZERO, GFP_KERNEL | __GFP_ZERO,
0); 0);
if (!page) if (!page)
@ -564,14 +564,14 @@ static int create_hash_tables(void)
int node = cpu_to_node(cpu); int node = cpu_to_node(cpu);
struct page *page; struct page *page;
page = alloc_pages_node(node, page = alloc_pages_exact_node(node,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
0); 0);
if (!page) if (!page)
goto out_cleanup; goto out_cleanup;
per_cpu(cpu_profile_hits, cpu)[1] per_cpu(cpu_profile_hits, cpu)[1]
= (struct profile_hit *)page_address(page); = (struct profile_hit *)page_address(page);
page = alloc_pages_node(node, page = alloc_pages_exact_node(node,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
0); 0);
if (!page) if (!page)

View file

@ -521,7 +521,7 @@ struct page *__page_cache_alloc(gfp_t gfp)
{ {
if (cpuset_do_page_mem_spread()) { if (cpuset_do_page_mem_spread()) {
int n = cpuset_mem_spread_node(); int n = cpuset_mem_spread_node();
return alloc_pages_node(n, gfp, 0); return alloc_pages_exact_node(n, gfp, 0);
} }
return alloc_pages(gfp, 0); return alloc_pages(gfp, 0);
} }

View file

@ -630,7 +630,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
if (h->order >= MAX_ORDER) if (h->order >= MAX_ORDER)
return NULL; return NULL;
page = alloc_pages_node(nid, page = alloc_pages_exact_node(nid,
htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
__GFP_REPEAT|__GFP_NOWARN, __GFP_REPEAT|__GFP_NOWARN,
huge_page_order(h)); huge_page_order(h));
@ -649,7 +649,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
* Use a helper variable to find the next node and then * Use a helper variable to find the next node and then
* copy it back to hugetlb_next_nid afterwards: * copy it back to hugetlb_next_nid afterwards:
* otherwise there's a window in which a racer might * otherwise there's a window in which a racer might
* pass invalid nid MAX_NUMNODES to alloc_pages_node. * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
* But we don't need to use a spin_lock here: it really * But we don't need to use a spin_lock here: it really
* doesn't matter if occasionally a racer chooses the * doesn't matter if occasionally a racer chooses the
* same nid as we do. Move nid forward in the mask even * same nid as we do. Move nid forward in the mask even

View file

@ -803,7 +803,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
static struct page *new_node_page(struct page *page, unsigned long node, int **x) static struct page *new_node_page(struct page *page, unsigned long node, int **x)
{ {
return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0); return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
} }
/* /*

View file

@ -802,7 +802,7 @@ static struct page *new_page_node(struct page *p, unsigned long private,
*result = &pm->status; *result = &pm->status;
return alloc_pages_node(pm->node, return alloc_pages_exact_node(pm->node,
GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
} }

View file

@ -1707,7 +1707,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
if (cachep->flags & SLAB_RECLAIM_ACCOUNT) if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
flags |= __GFP_RECLAIMABLE; flags |= __GFP_RECLAIMABLE;
page = alloc_pages_node(nodeid, flags, cachep->gfporder); page = alloc_pages_exact_node(nodeid, flags, cachep->gfporder);
if (!page) if (!page)
return NULL; return NULL;
@ -3261,7 +3261,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
if (local_flags & __GFP_WAIT) if (local_flags & __GFP_WAIT)
local_irq_enable(); local_irq_enable();
kmem_flagcheck(cache, flags); kmem_flagcheck(cache, flags);
obj = kmem_getpages(cache, local_flags, -1); obj = kmem_getpages(cache, local_flags, numa_node_id());
if (local_flags & __GFP_WAIT) if (local_flags & __GFP_WAIT)
local_irq_disable(); local_irq_disable();
if (obj) { if (obj) {

View file

@ -46,7 +46,7 @@
* NUMA support in SLOB is fairly simplistic, pushing most of the real * NUMA support in SLOB is fairly simplistic, pushing most of the real
* logic down to the page allocator, and simply doing the node accounting * logic down to the page allocator, and simply doing the node accounting
* on the upper levels. In the event that a node id is explicitly * on the upper levels. In the event that a node id is explicitly
* provided, alloc_pages_node() with the specified node id is used * provided, alloc_pages_exact_node() with the specified node id is used
* instead. The common case (or when the node id isn't explicitly provided) * instead. The common case (or when the node id isn't explicitly provided)
* will default to the current node, as per numa_node_id(). * will default to the current node, as per numa_node_id().
* *
@ -244,7 +244,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (node != -1) if (node != -1)
page = alloc_pages_node(node, gfp, order); page = alloc_pages_exact_node(node, gfp, order);
else else
#endif #endif
page = alloc_pages(gfp, order); page = alloc_pages(gfp, order);