mm: migration: add migrate_entry_wait_huge()

When we have a page fault for the address which is backed by a hugepage
under migration, the kernel can't wait correctly and do busy looping on
hugepage fault until the migration finishes.  As a result, users who try
to kick hugepage migration (via soft offlining, for example) occasionally
experience long delay or soft lockup.

This is because pte_offset_map_lock() can't get a correct migration entry
or a correct page table lock for hugepage.  This patch introduces
migration_entry_wait_huge() to solve this.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: <stable@vger.kernel.org>	[2.6.35+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Naoya Horiguchi 2013-06-12 14:05:04 -07:00 committed by Linus Torvalds
parent 27749f2ff0
commit 30dad30922
3 changed files with 22 additions and 6 deletions

View file

@ -137,6 +137,7 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address); unsigned long address);
extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte);
#else #else
#define make_migration_entry(page, write) swp_entry(0, 0) #define make_migration_entry(page, write) swp_entry(0, 0)
@ -148,6 +149,8 @@ static inline int is_migration_entry(swp_entry_t swp)
static inline void make_migration_entry_read(swp_entry_t *entryp) { } static inline void make_migration_entry_read(swp_entry_t *entryp) { }
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { } unsigned long address) { }
static inline void migration_entry_wait_huge(struct mm_struct *mm,
pte_t *pte) { }
static inline int is_write_migration_entry(swp_entry_t entry) static inline int is_write_migration_entry(swp_entry_t entry)
{ {
return 0; return 0;

View file

@ -2839,7 +2839,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (ptep) { if (ptep) {
entry = huge_ptep_get(ptep); entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) { if (unlikely(is_hugetlb_entry_migration(entry))) {
migration_entry_wait(mm, (pmd_t *)ptep, address); migration_entry_wait_huge(mm, ptep);
return 0; return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE | return VM_FAULT_HWPOISON_LARGE |

View file

@ -200,15 +200,14 @@ static void remove_migration_ptes(struct page *old, struct page *new)
* get to the page and wait until migration is finished. * get to the page and wait until migration is finished.
* When we return from this function the fault will be retried. * When we return from this function the fault will be retried.
*/ */
void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
unsigned long address) spinlock_t *ptl)
{ {
pte_t *ptep, pte; pte_t pte;
spinlock_t *ptl;
swp_entry_t entry; swp_entry_t entry;
struct page *page; struct page *page;
ptep = pte_offset_map_lock(mm, pmd, address, &ptl); spin_lock(ptl);
pte = *ptep; pte = *ptep;
if (!is_swap_pte(pte)) if (!is_swap_pte(pte))
goto out; goto out;
@ -236,6 +235,20 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
} }
void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address)
{
spinlock_t *ptl = pte_lockptr(mm, pmd);
pte_t *ptep = pte_offset_map(pmd, address);
__migration_entry_wait(mm, ptep, ptl);
}
void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
{
spinlock_t *ptl = &(mm)->page_table_lock;
__migration_entry_wait(mm, pte, ptl);
}
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
/* Returns true if all buffers are successfully locked */ /* Returns true if all buffers are successfully locked */
static bool buffer_migrate_lock_buffers(struct buffer_head *head, static bool buffer_migrate_lock_buffers(struct buffer_head *head,