mm: use vm_unmapped_area() in hugetlbfs on tile architecture

Update the tile hugetlb_get_unmapped_area function to make use of
vm_unmapped_area() instead of implementing a brute force search.

[akpm@linux-foundation.org: fix build]
Signed-off-by: Michel Lespinasse <walken@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Michel Lespinasse 2012-12-11 16:02:17 -08:00 committed by Linus Torvalds
parent a046be3d3c
commit dd5295965b

View file

@ -231,42 +231,15 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long pgoff, unsigned long flags) unsigned long pgoff, unsigned long flags)
{ {
struct hstate *h = hstate_file(file); struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info;
struct vm_area_struct *vma;
unsigned long start_addr;
if (len > mm->cached_hole_size) { info.flags = 0;
start_addr = mm->free_area_cache; info.length = len;
} else { info.low_limit = TASK_UNMAPPED_BASE;
start_addr = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE;
mm->cached_hole_size = 0; info.align_mask = PAGE_MASK & ~huge_page_mask(h);
} info.align_offset = 0;
return vm_unmapped_area(&info);
full_search:
addr = ALIGN(start_addr, huge_page_size(h));
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr) {
/*
* Start a new search - just in case we missed
* some holes.
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = TASK_UNMAPPED_BASE;
mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
}
if (!vma || addr + len <= vma->vm_start) {
mm->free_area_cache = addr + len;
return addr;
}
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
addr = ALIGN(vma->vm_end, huge_page_size(h));
}
} }
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
@ -274,92 +247,30 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
unsigned long pgoff, unsigned long flags) unsigned long pgoff, unsigned long flags)
{ {
struct hstate *h = hstate_file(file); struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info;
struct vm_area_struct *vma, *prev_vma; unsigned long addr;
unsigned long base = mm->mmap_base, addr = addr0;
unsigned long largest_hole = mm->cached_hole_size;
int first_time = 1;
/* don't allow allocations above current base */ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
if (mm->free_area_cache > base) info.length = len;
mm->free_area_cache = base; info.low_limit = PAGE_SIZE;
info.high_limit = current->mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
addr = vm_unmapped_area(&info);
if (len <= largest_hole) {
largest_hole = 0;
mm->free_area_cache = base;
}
try_again:
/* make sure it can fit in the remaining address space */
if (mm->free_area_cache < len)
goto fail;
/* either no address requested or can't fit in requested address hole */
addr = (mm->free_area_cache - len) & huge_page_mask(h);
do {
/*
* Lookup failure means no vma is above this address,
* i.e. return with success:
*/
vma = find_vma_prev(mm, addr, &prev_vma);
if (!vma) {
return addr;
break;
}
/*
* new region fits between prev_vma->vm_end and
* vma->vm_start, use it:
*/
if (addr + len <= vma->vm_start &&
(!prev_vma || (addr >= prev_vma->vm_end))) {
/* remember the address as a hint for next time */
mm->cached_hole_size = largest_hole;
mm->free_area_cache = addr;
return addr;
} else {
/* pull free_area_cache down to the first hole */
if (mm->free_area_cache == vma->vm_end) {
mm->free_area_cache = vma->vm_start;
mm->cached_hole_size = largest_hole;
}
}
/* remember the largest hole we saw so far */
if (addr + largest_hole < vma->vm_start)
largest_hole = vma->vm_start - addr;
/* try just below the current vma->vm_start */
addr = (vma->vm_start - len) & huge_page_mask(h);
} while (len <= vma->vm_start);
fail:
/*
* if hint left us with no space for the requested
* mapping then try again:
*/
if (first_time) {
mm->free_area_cache = base;
largest_hole = 0;
first_time = 0;
goto try_again;
}
/* /*
* A failed mmap() very likely causes application failure, * A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario * so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap() * can happen with large stack limits and large mmap()
* allocations. * allocations.
*/ */
mm->free_area_cache = TASK_UNMAPPED_BASE; if (addr & ~PAGE_MASK) {
mm->cached_hole_size = ~0UL; VM_BUG_ON(addr != -ENOMEM);
addr = hugetlb_get_unmapped_area_bottomup(file, addr0, info.flags = 0;
len, pgoff, flags); info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
/* addr = vm_unmapped_area(&info);
* Restore the topdown base: }
*/
mm->free_area_cache = base;
mm->cached_hole_size = ~0UL;
return addr; return addr;
} }