[PATCH] mempool: use common mempool page allocator

Convert two mempool users that currently use their own mempool-backed page
allocators to use the generic mempool page allocator.

Also included are 2 trivial whitespace fixes.

Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Matthew Dobson 2006-03-26 01:37:45 -08:00 committed by Linus Torvalds
parent 6e0678f394
commit a19b27ce38
2 changed files with 8 additions and 32 deletions

View File

@ -93,20 +93,6 @@ struct crypt_config {
static kmem_cache_t *_crypt_io_pool;
/*
* Mempool alloc and free functions for the page
*/
static void *mempool_alloc_page(gfp_t gfp_mask, void *data)
{
return alloc_page(gfp_mask);
}
static void mempool_free_page(void *page, void *data)
{
__free_page(page);
}
/*
* Different IV generation algorithms:
*
@ -637,8 +623,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad3;
}
cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page,
mempool_free_page, NULL);
cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
if (!cc->page_pool) {
ti->error = PFX "Cannot allocate page mempool";
goto bad4;

View File

@ -31,14 +31,9 @@
static mempool_t *page_pool, *isa_page_pool;
static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data)
static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
{
return alloc_page(gfp_mask | GFP_DMA);
}
static void page_pool_free(void *page, void *data)
{
__free_page(page);
return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
}
/*
@ -51,11 +46,6 @@ static void page_pool_free(void *page, void *data)
*/
#ifdef CONFIG_HIGHMEM
static void *page_pool_alloc(gfp_t gfp_mask, void *data)
{
return alloc_page(gfp_mask);
}
static int pkmap_count[LAST_PKMAP];
static unsigned int last_pkmap_nr;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
@ -229,7 +219,7 @@ static __init int init_emergency_pool(void)
if (!i.totalhigh)
return 0;
page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL);
page_pool = mempool_create_page_pool(POOL_SIZE, 0);
if (!page_pool)
BUG();
printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
@ -272,7 +262,8 @@ int init_emergency_isa_pool(void)
if (isa_page_pool)
return 0;
isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL);
isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
mempool_free_pages, (void *) 0);
if (!isa_page_pool)
BUG();
@ -337,7 +328,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
bio_put(bio);
}
static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err)
static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
{
if (bio->bi_size)
return 1;
@ -384,7 +375,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
}
static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
mempool_t *pool)
mempool_t *pool)
{
struct page *page;
struct bio *bio = NULL;