diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e6767a35f7de..f1bcaae0d73a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3509,7 +3509,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, * Keep the pte_same checks anyway to make transition from the mutex easier. */ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long haddr, pte_t *ptep, + unsigned long address, pte_t *ptep, struct page *pagecache_page, spinlock_t *ptl) { pte_t pte; @@ -3518,6 +3518,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, int ret = 0, outside_reserve = 0; unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */ + unsigned long haddr = address & huge_page_mask(h); pte = huge_ptep_get(ptep); old_page = pte_page(pte); @@ -3592,7 +3593,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, goto out_release_all; } - copy_user_huge_page(new_page, old_page, haddr, vma, + copy_user_huge_page(new_page, old_page, address, vma, pages_per_huge_page(h)); __SetPageUptodate(new_page); set_page_huge_active(new_page); @@ -3826,7 +3827,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, hugetlb_count_add(pages_per_huge_page(h), mm); if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { /* Optimization, do the COW without a second fault */ - ret = hugetlb_cow(mm, vma, haddr, ptep, page, ptl); + ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); } spin_unlock(ptl); @@ -3980,7 +3981,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (flags & FAULT_FLAG_WRITE) { if (!huge_pte_write(entry)) { - ret = hugetlb_cow(mm, vma, haddr, ptep, + ret = hugetlb_cow(mm, vma, address, ptep, pagecache_page, ptl); goto out_put_page; }