Take mmap lock in cacheflush syscall

We need to take the mmap lock around find_vma() and subsequent use of the
VMA. Otherwise, we can race with concurrent operations like munmap(), which
can lead to use-after-free accesses to freed VMAs.

Fixes: 1000197d80 ("nios2: System calls handling")
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Ley Foon Tan <ley.foon.tan@intel.com>
This commit is contained in:
Jann Horn 2021-02-19 14:59:35 +08:00 committed by Ley Foon Tan
parent f40ddce885
commit c26958cb5a
1 changed files with 9 additions and 2 deletions

View File

@ -22,6 +22,7 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len,
unsigned int op)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
if (len == 0)
return 0;
@ -34,16 +35,22 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len,
if (addr + len < addr)
return -EFAULT;
if (mmap_read_lock_killable(mm))
return -EINTR;
/*
* Verify that the specified address region actually belongs
* to this process.
*/
vma = find_vma(current->mm, addr);
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
vma = find_vma(mm, addr);
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
mmap_read_unlock(mm);
return -EFAULT;
}
flush_cache_range(vma, addr, addr + len);
mmap_read_unlock(mm);
return 0;
}