linux-stable/arch/metag/mm/hugetlbpage.c
Naoya Horiguchi 61f77eda9b mm/hugetlb: reduce arch dependent code around follow_huge_*
Currently we have many duplicates in definitions around
follow_huge_addr(), follow_huge_pmd(), and follow_huge_pud(), so this
patch tries to remove the m.  The basic idea is to put the default
implementation for these functions in mm/hugetlb.c as weak symbols
(regardless of CONFIG_ARCH_WANT_GENERAL_HUGETL B), and to implement
arch-specific code only when the arch needs it.

For follow_huge_addr(), only powerpc and ia64 have their own
implementation, and in all other architectures this function just returns
ERR_PTR(-EINVAL).  So this patch sets returning ERR_PTR(-EINVAL) as
default.

As for follow_huge_(pmd|pud)(), if (pmd|pud)_huge() is implemented to
always return 0 in your architecture (like in ia64 or sparc,) it's never
called (the callsite is optimized away) no matter how implemented it is.
So in such architectures, we don't need arch-specific implementation.

In some architecture (like mips, s390 and tile,) their current
arch-specific follow_huge_(pmd|pud)() are effectively identical with the
common code, so this patch lets these architecture use the common code.

One exception is metag, where pmd_huge() could return non-zero but it
expects follow_huge_pmd() to always return NULL.  This means that we need
arch-specific implementation which returns NULL.  This behavior looks
strange to me (because non-zero pmd_huge() implies that the architecture
supports PMD-based hugepage, so follow_huge_pmd() can/should return some
relevant value,) but that's beyond this cleanup patch, so let's keep it.

Justification of non-trivial changes:
- in s390, follow_huge_pmd() checks !MACHINE_HAS_HPAGE at first, and this
  patch removes the check. This is OK because we can assume MACHINE_HAS_HPAGE
  is true when follow_huge_pmd() can be called (note that pmd_huge() has
  the same check and always returns 0 for !MACHINE_HAS_HPAGE.)
- in s390 and mips, we use HPAGE_MASK instead of PMD_MASK as done in common
  code. This patch forces these archs use PMD_MASK, but it's OK because
  they are identical in both archs.
  In s390, both of HPAGE_SHIFT and PMD_SHIFT are 20.
  In mips, HPAGE_SHIFT is defined as (PAGE_SHIFT + PAGE_SHIFT - 3) and
  PMD_SHIFT is define as (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3), but
  PTE_ORDER is always 0, so these are identical.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Steve Capper <steve.capper@linaro.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-11 17:06:01 -08:00

253 lines
5.6 KiB
C

/*
* arch/metag/mm/hugetlbpage.c
*
* METAG HugeTLB page support.
*
* Cloned from SuperH
*
* Cloned from sparc64 by Paul Mundt.
*
* Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
*/
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
int prepare_hugepage_range(struct file *file, unsigned long addr,
unsigned long len)
{
struct mm_struct *mm = current->mm;
struct hstate *h = hstate_file(file);
struct vm_area_struct *vma;
if (len & ~huge_page_mask(h))
return -EINVAL;
if (addr & ~huge_page_mask(h))
return -EINVAL;
if (TASK_SIZE - len < addr)
return -EINVAL;
vma = find_vma(mm, ALIGN_HUGEPT(addr));
if (vma && !(vma->vm_flags & MAP_HUGETLB))
return -EINVAL;
vma = find_vma(mm, addr);
if (vma) {
if (addr + len > vma->vm_start)
return -EINVAL;
if (!(vma->vm_flags & MAP_HUGETLB) &&
(ALIGN_HUGEPT(addr + len) > vma->vm_start))
return -EINVAL;
}
return 0;
}
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset(mm, addr);
pud = pud_offset(pgd, addr);
pmd = pmd_offset(pud, addr);
pte = pte_alloc_map(mm, NULL, pmd, addr);
pgd->pgd &= ~_PAGE_SZ_MASK;
pgd->pgd |= _PAGE_SZHUGE;
return pte;
}
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
pud = pud_offset(pgd, addr);
pmd = pmd_offset(pud, addr);
pte = pte_offset_kernel(pmd, addr);
return pte;
}
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
int pmd_huge(pmd_t pmd)
{
return pmd_page_shift(pmd) > PAGE_SHIFT;
}
int pud_huge(pud_t pud)
{
return 0;
}
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
return NULL;
}
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
/*
* Look for an unmapped area starting after another hugetlb vma.
* There are guaranteed to be no huge pte's spare if all the huge pages are
* full size (4MB), so in that case compile out this search.
*/
#if HPAGE_SHIFT == HUGEPT_SHIFT
static inline unsigned long
hugetlb_get_unmapped_area_existing(unsigned long len)
{
return 0;
}
#else
static unsigned long
hugetlb_get_unmapped_area_existing(unsigned long len)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr, addr;
int after_huge;
if (mm->context.part_huge) {
start_addr = mm->context.part_huge;
after_huge = 1;
} else {
start_addr = TASK_UNMAPPED_BASE;
after_huge = 0;
}
new_search:
addr = start_addr;
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
if ((!vma && !after_huge) || TASK_SIZE - len < addr) {
/*
* Start a new search - just in case we missed
* some holes.
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = TASK_UNMAPPED_BASE;
goto new_search;
}
return 0;
}
/* skip ahead if we've aligned right over some vmas */
if (vma && vma->vm_end <= addr)
continue;
/* space before the next vma? */
if (after_huge && (!vma || ALIGN_HUGEPT(addr + len)
<= vma->vm_start)) {
unsigned long end = addr + len;
if (end & HUGEPT_MASK)
mm->context.part_huge = end;
else if (addr == mm->context.part_huge)
mm->context.part_huge = 0;
return addr;
}
if (vma->vm_flags & MAP_HUGETLB) {
/* space after a huge vma in 2nd level page table? */
if (vma->vm_end & HUGEPT_MASK) {
after_huge = 1;
/* no need to align to the next PT block */
addr = vma->vm_end;
continue;
}
}
after_huge = 0;
addr = ALIGN_HUGEPT(vma->vm_end);
}
}
#endif
/* Do a full search to find an area without any nearby normal pages. */
static unsigned long
hugetlb_get_unmapped_area_new_pmd(unsigned long len)
{
struct vm_unmapped_area_info info;
info.flags = 0;
info.length = len;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & HUGEPT_MASK;
info.align_offset = 0;
return vm_unmapped_area(&info);
}
unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (len > TASK_SIZE)
return -ENOMEM;
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
return addr;
}
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
if (!prepare_hugepage_range(file, addr, len))
return addr;
}
/*
* Look for an existing hugetlb vma with space after it (this is to to
* minimise fragmentation caused by huge pages.
*/
addr = hugetlb_get_unmapped_area_existing(len);
if (addr)
return addr;
/*
* Find an unmapped naturally aligned set of 4MB blocks that we can use
* for huge pages.
*/
return hugetlb_get_unmapped_area_new_pmd(len);
}
#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
/* necessary for boot time 4MB huge page allocation */
static __init int setup_hugepagesz(char *opt)
{
unsigned long ps = memparse(opt, &opt);
if (ps == (1 << HPAGE_SHIFT)) {
hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
} else {
pr_err("hugepagesz: Unsupported page size %lu M\n",
ps >> 20);
return 0;
}
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);