mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-29 13:53:33 +00:00
mm: hugetlb: fix UAF in hugetlb_handle_userfault
commit958f32ce83
upstream. The vma_lock and hugetlb_fault_mutex are dropped before handling userfault and reacquire them again after handle_userfault(), but reacquire the vma_lock could lead to UAF[1,2] due to the following race, hugetlb_fault hugetlb_no_page /*unlock vma_lock */ hugetlb_handle_userfault handle_userfault /* unlock mm->mmap_lock*/ vm_mmap_pgoff do_mmap mmap_region munmap_vma_range /* clean old vma */ /* lock vma_lock again <--- UAF */ /* unlock vma_lock */ Since the vma_lock will unlock immediately after hugetlb_handle_userfault(), let's drop the unneeded lock and unlock in hugetlb_handle_userfault() to fix the issue. [1] https://lore.kernel.org/linux-mm/000000000000d5e00a05e834962e@google.com/ [2] https://lore.kernel.org/linux-mm/20220921014457.1668-1-liuzixian4@huawei.com/ Link: https://lkml.kernel.org/r/20220923042113.137273-1-liushixin2@huawei.com Fixes:1a1aad8a9b
("userfaultfd: hugetlbfs: add userfaultfd hugetlb hook") Signed-off-by: Liu Shixin <liushixin2@huawei.com> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reported-by: syzbot+193f9cee8638750b23cf@syzkaller.appspotmail.com Reported-by: Liu Zixian <liuzixian4@huawei.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: David Hildenbrand <david@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: <stable@vger.kernel.org> [4.14+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
5156958755
commit
78504bcedb
1 changed files with 17 additions and 20 deletions
37
mm/hugetlb.c
37
mm/hugetlb.c
|
@ -5482,7 +5482,6 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
|
|||
unsigned long addr,
|
||||
unsigned long reason)
|
||||
{
|
||||
vm_fault_t ret;
|
||||
u32 hash;
|
||||
struct vm_fault vmf = {
|
||||
.vma = vma,
|
||||
|
@ -5500,18 +5499,14 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
|
|||
};
|
||||
|
||||
/*
|
||||
* hugetlb_fault_mutex and i_mmap_rwsem must be
|
||||
* dropped before handling userfault. Reacquire
|
||||
* after handling fault to make calling code simpler.
|
||||
* vma_lock and hugetlb_fault_mutex must be dropped before handling
|
||||
* userfault. Also mmap_lock will be dropped during handling
|
||||
* userfault, any vma operation should be careful from here.
|
||||
*/
|
||||
hash = hugetlb_fault_mutex_hash(mapping, idx);
|
||||
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
||||
i_mmap_unlock_read(mapping);
|
||||
ret = handle_userfault(&vmf, reason);
|
||||
i_mmap_lock_read(mapping);
|
||||
mutex_lock(&hugetlb_fault_mutex_table[hash]);
|
||||
|
||||
return ret;
|
||||
return handle_userfault(&vmf, reason);
|
||||
}
|
||||
|
||||
static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
||||
|
@ -5529,6 +5524,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
|||
spinlock_t *ptl;
|
||||
unsigned long haddr = address & huge_page_mask(h);
|
||||
bool new_page, new_pagecache_page = false;
|
||||
u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
|
||||
|
||||
/*
|
||||
* Currently, we are forced to kill the process in the event the
|
||||
|
@ -5539,7 +5535,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
|||
if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
|
||||
pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
|
||||
current->pid);
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -5556,12 +5552,10 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
|||
page = find_lock_page(mapping, idx);
|
||||
if (!page) {
|
||||
/* Check for page in userfault range */
|
||||
if (userfaultfd_missing(vma)) {
|
||||
ret = hugetlb_handle_userfault(vma, mapping, idx,
|
||||
if (userfaultfd_missing(vma))
|
||||
return hugetlb_handle_userfault(vma, mapping, idx,
|
||||
flags, haddr, address,
|
||||
VM_UFFD_MISSING);
|
||||
goto out;
|
||||
}
|
||||
|
||||
page = alloc_huge_page(vma, haddr, 0);
|
||||
if (IS_ERR(page)) {
|
||||
|
@ -5621,10 +5615,9 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
|||
if (userfaultfd_minor(vma)) {
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
ret = hugetlb_handle_userfault(vma, mapping, idx,
|
||||
return hugetlb_handle_userfault(vma, mapping, idx,
|
||||
flags, haddr, address,
|
||||
VM_UFFD_MINOR);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5682,6 +5675,8 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
|||
|
||||
unlock_page(page);
|
||||
out:
|
||||
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
||||
i_mmap_unlock_read(mapping);
|
||||
return ret;
|
||||
|
||||
backout:
|
||||
|
@ -5780,11 +5775,13 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
|
||||
entry = huge_ptep_get(ptep);
|
||||
/* PTE markers should be handled the same way as none pte */
|
||||
if (huge_pte_none_mostly(entry)) {
|
||||
ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
|
||||
if (huge_pte_none_mostly(entry))
|
||||
/*
|
||||
* hugetlb_no_page will drop vma lock and hugetlb fault
|
||||
* mutex internally, which make us return immediately.
|
||||
*/
|
||||
return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
|
||||
entry, flags);
|
||||
goto out_mutex;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
|
|
Loading…
Reference in a new issue