userfaultfd: use per-vma locks in userfaultfd operations

All userfaultfd operations, except write-protect, opportunistically use
per-vma locks to lock vmas.  On failure, attempt again inside mmap_lock
critical section.

Write-protect operation requires mmap_lock as it iterates over multiple
vmas.

Link: https://lkml.kernel.org/r/20240215182756.3448972-5-lokeshgidra@google.com
Signed-off-by: Lokesh Gidra <lokeshgidra@google.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Brian Geffon <bgeffon@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Nicolas Geoffray <ngeoffray@google.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Tim Murray <timmurray@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Lokesh Gidra 2024-02-15 10:27:56 -08:00 committed by Andrew Morton
parent 32af81af2f
commit 867a43a34f
4 changed files with 301 additions and 106 deletions

View File

@ -2005,17 +2005,8 @@ static int userfaultfd_move(struct userfaultfd_ctx *ctx,
return -EINVAL; return -EINVAL;
if (mmget_not_zero(mm)) { if (mmget_not_zero(mm)) {
mmap_read_lock(mm); ret = move_pages(ctx, uffdio_move.dst, uffdio_move.src,
uffdio_move.len, uffdio_move.mode);
/* Re-check after taking map_changing_lock */
down_read(&ctx->map_changing_lock);
if (likely(!atomic_read(&ctx->mmap_changing)))
ret = move_pages(ctx, mm, uffdio_move.dst, uffdio_move.src,
uffdio_move.len, uffdio_move.mode);
else
ret = -EAGAIN;
up_read(&ctx->map_changing_lock);
mmap_read_unlock(mm);
mmput(mm); mmput(mm);
} else { } else {
return -ESRCH; return -ESRCH;

View File

@ -138,9 +138,8 @@ extern long uffd_wp_range(struct vm_area_struct *vma,
/* move_pages */ /* move_pages */
void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2); void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2); void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm, ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
unsigned long dst_start, unsigned long src_start, unsigned long src_start, unsigned long len, __u64 flags);
unsigned long len, __u64 flags);
int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval, int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma, struct vm_area_struct *src_vma,

View File

@ -2158,7 +2158,7 @@ unlock:
#ifdef CONFIG_USERFAULTFD #ifdef CONFIG_USERFAULTFD
/* /*
* The PT lock for src_pmd and the mmap_lock for reading are held by * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
* the caller, but it must return after releasing the page_table_lock. * the caller, but it must return after releasing the page_table_lock.
* Just move the page from src_pmd to dst_pmd if possible. * Just move the page from src_pmd to dst_pmd if possible.
* Return zero if succeeded in moving the page, -EAGAIN if it needs to be * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
@ -2181,7 +2181,8 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
src_ptl = pmd_lockptr(mm, src_pmd); src_ptl = pmd_lockptr(mm, src_pmd);
lockdep_assert_held(src_ptl); lockdep_assert_held(src_ptl);
mmap_assert_locked(mm); vma_assert_locked(src_vma);
vma_assert_locked(dst_vma);
/* Sanity checks before the operation */ /* Sanity checks before the operation */
if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) || if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||

View File

@ -20,19 +20,11 @@
#include "internal.h" #include "internal.h"
static __always_inline static __always_inline
struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
unsigned long dst_start,
unsigned long len)
{ {
/* /* Make sure that the dst range is fully within dst_vma. */
* Make sure that the dst range is both valid and fully within a if (dst_end > dst_vma->vm_end)
* single existing vma. return false;
*/
struct vm_area_struct *dst_vma;
dst_vma = find_vma(dst_mm, dst_start);
if (!range_in_vma(dst_vma, dst_start, dst_start + len))
return NULL;
/* /*
* Check the vma is registered in uffd, this is required to * Check the vma is registered in uffd, this is required to
@ -40,11 +32,122 @@ struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
* time. * time.
*/ */
if (!dst_vma->vm_userfaultfd_ctx.ctx) if (!dst_vma->vm_userfaultfd_ctx.ctx)
return NULL; return false;
return true;
}
static __always_inline
struct vm_area_struct *find_vma_and_prepare_anon(struct mm_struct *mm,
unsigned long addr)
{
struct vm_area_struct *vma;
mmap_assert_locked(mm);
vma = vma_lookup(mm, addr);
if (!vma)
vma = ERR_PTR(-ENOENT);
else if (!(vma->vm_flags & VM_SHARED) &&
unlikely(anon_vma_prepare(vma)))
vma = ERR_PTR(-ENOMEM);
return vma;
}
#ifdef CONFIG_PER_VMA_LOCK
/*
* lock_vma() - Lookup and lock vma corresponding to @address.
* @mm: mm to search vma in.
* @address: address that the vma should contain.
*
* Should be called without holding mmap_lock. vma should be unlocked after use
* with unlock_vma().
*
* Return: A locked vma containing @address, -ENOENT if no vma is found, or
* -ENOMEM if anon_vma couldn't be allocated.
*/
static struct vm_area_struct *lock_vma(struct mm_struct *mm,
unsigned long address)
{
struct vm_area_struct *vma;
vma = lock_vma_under_rcu(mm, address);
if (vma) {
/*
* lock_vma_under_rcu() only checks anon_vma for private
* anonymous mappings. But we need to ensure it is assigned in
* private file-backed vmas as well.
*/
if (!(vma->vm_flags & VM_SHARED) && unlikely(!vma->anon_vma))
vma_end_read(vma);
else
return vma;
}
mmap_read_lock(mm);
vma = find_vma_and_prepare_anon(mm, address);
if (!IS_ERR(vma)) {
/*
* We cannot use vma_start_read() as it may fail due to
* false locked (see comment in vma_start_read()). We
* can avoid that by directly locking vm_lock under
* mmap_lock, which guarantees that nobody can lock the
* vma for write (vma_start_write()) under us.
*/
down_read(&vma->vm_lock->lock);
}
mmap_read_unlock(mm);
return vma;
}
static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
unsigned long dst_start,
unsigned long len)
{
struct vm_area_struct *dst_vma;
dst_vma = lock_vma(dst_mm, dst_start);
if (IS_ERR(dst_vma) || validate_dst_vma(dst_vma, dst_start + len))
return dst_vma;
vma_end_read(dst_vma);
return ERR_PTR(-ENOENT);
}
static void uffd_mfill_unlock(struct vm_area_struct *vma)
{
vma_end_read(vma);
}
#else
static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
unsigned long dst_start,
unsigned long len)
{
struct vm_area_struct *dst_vma;
mmap_read_lock(dst_mm);
dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start);
if (IS_ERR(dst_vma))
goto out_unlock;
if (validate_dst_vma(dst_vma, dst_start + len))
return dst_vma;
dst_vma = ERR_PTR(-ENOENT);
out_unlock:
mmap_read_unlock(dst_mm);
return dst_vma; return dst_vma;
} }
static void uffd_mfill_unlock(struct vm_area_struct *vma)
{
mmap_read_unlock(vma->vm_mm);
}
#endif
/* Check if dst_addr is outside of file's size. Must be called with ptl held. */ /* Check if dst_addr is outside of file's size. Must be called with ptl held. */
static bool mfill_file_over_size(struct vm_area_struct *dst_vma, static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
unsigned long dst_addr) unsigned long dst_addr)
@ -350,7 +453,8 @@ static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
/* /*
* mfill_atomic processing for HUGETLB vmas. Note that this routine is * mfill_atomic processing for HUGETLB vmas. Note that this routine is
* called with mmap_lock held, it will release mmap_lock before returning. * called with either vma-lock or mmap_lock held, it will release the lock
* before returning.
*/ */
static __always_inline ssize_t mfill_atomic_hugetlb( static __always_inline ssize_t mfill_atomic_hugetlb(
struct userfaultfd_ctx *ctx, struct userfaultfd_ctx *ctx,
@ -361,7 +465,6 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
uffd_flags_t flags) uffd_flags_t flags)
{ {
struct mm_struct *dst_mm = dst_vma->vm_mm; struct mm_struct *dst_mm = dst_vma->vm_mm;
int vm_shared = dst_vma->vm_flags & VM_SHARED;
ssize_t err; ssize_t err;
pte_t *dst_pte; pte_t *dst_pte;
unsigned long src_addr, dst_addr; unsigned long src_addr, dst_addr;
@ -380,7 +483,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
*/ */
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) { if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
up_read(&ctx->map_changing_lock); up_read(&ctx->map_changing_lock);
mmap_read_unlock(dst_mm); uffd_mfill_unlock(dst_vma);
return -EINVAL; return -EINVAL;
} }
@ -403,24 +506,28 @@ retry:
* retry, dst_vma will be set to NULL and we must lookup again. * retry, dst_vma will be set to NULL and we must lookup again.
*/ */
if (!dst_vma) { if (!dst_vma) {
dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
if (IS_ERR(dst_vma)) {
err = PTR_ERR(dst_vma);
goto out;
}
err = -ENOENT; err = -ENOENT;
dst_vma = find_dst_vma(dst_mm, dst_start, len); if (!is_vm_hugetlb_page(dst_vma))
if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) goto out_unlock_vma;
goto out_unlock;
err = -EINVAL; err = -EINVAL;
if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
goto out_unlock; goto out_unlock_vma;
vm_shared = dst_vma->vm_flags & VM_SHARED; /*
} * If memory mappings are changing because of non-cooperative
* operation (e.g. mremap) running in parallel, bail out and
/* * request the user to retry later
* If not shared, ensure the dst_vma has a anon_vma. */
*/ down_read(&ctx->map_changing_lock);
err = -ENOMEM; err = -EAGAIN;
if (!vm_shared) { if (atomic_read(&ctx->mmap_changing))
if (unlikely(anon_vma_prepare(dst_vma)))
goto out_unlock; goto out_unlock;
} }
@ -465,7 +572,7 @@ retry:
if (unlikely(err == -ENOENT)) { if (unlikely(err == -ENOENT)) {
up_read(&ctx->map_changing_lock); up_read(&ctx->map_changing_lock);
mmap_read_unlock(dst_mm); uffd_mfill_unlock(dst_vma);
BUG_ON(!folio); BUG_ON(!folio);
err = copy_folio_from_user(folio, err = copy_folio_from_user(folio,
@ -474,17 +581,6 @@ retry:
err = -EFAULT; err = -EFAULT;
goto out; goto out;
} }
mmap_read_lock(dst_mm);
down_read(&ctx->map_changing_lock);
/*
* If memory mappings are changing because of non-cooperative
* operation (e.g. mremap) running in parallel, bail out and
* request the user to retry later
*/
if (atomic_read(&ctx->mmap_changing)) {
err = -EAGAIN;
break;
}
dst_vma = NULL; dst_vma = NULL;
goto retry; goto retry;
@ -505,7 +601,8 @@ retry:
out_unlock: out_unlock:
up_read(&ctx->map_changing_lock); up_read(&ctx->map_changing_lock);
mmap_read_unlock(dst_mm); out_unlock_vma:
uffd_mfill_unlock(dst_vma);
out: out:
if (folio) if (folio)
folio_put(folio); folio_put(folio);
@ -597,7 +694,15 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
copied = 0; copied = 0;
folio = NULL; folio = NULL;
retry: retry:
mmap_read_lock(dst_mm); /*
* Make sure the vma is not shared, that the dst range is
* both valid and fully within a single existing vma.
*/
dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
if (IS_ERR(dst_vma)) {
err = PTR_ERR(dst_vma);
goto out;
}
/* /*
* If memory mappings are changing because of non-cooperative * If memory mappings are changing because of non-cooperative
@ -609,15 +714,6 @@ retry:
if (atomic_read(&ctx->mmap_changing)) if (atomic_read(&ctx->mmap_changing))
goto out_unlock; goto out_unlock;
/*
* Make sure the vma is not shared, that the dst range is
* both valid and fully within a single existing vma.
*/
err = -ENOENT;
dst_vma = find_dst_vma(dst_mm, dst_start, len);
if (!dst_vma)
goto out_unlock;
err = -EINVAL; err = -EINVAL;
/* /*
* shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
@ -647,16 +743,6 @@ retry:
uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
goto out_unlock; goto out_unlock;
/*
* Ensure the dst_vma has a anon_vma or this page
* would get a NULL anon_vma when moved in the
* dst_vma.
*/
err = -ENOMEM;
if (!(dst_vma->vm_flags & VM_SHARED) &&
unlikely(anon_vma_prepare(dst_vma)))
goto out_unlock;
while (src_addr < src_start + len) { while (src_addr < src_start + len) {
pmd_t dst_pmdval; pmd_t dst_pmdval;
@ -699,7 +785,7 @@ retry:
void *kaddr; void *kaddr;
up_read(&ctx->map_changing_lock); up_read(&ctx->map_changing_lock);
mmap_read_unlock(dst_mm); uffd_mfill_unlock(dst_vma);
BUG_ON(!folio); BUG_ON(!folio);
kaddr = kmap_local_folio(folio, 0); kaddr = kmap_local_folio(folio, 0);
@ -730,7 +816,7 @@ retry:
out_unlock: out_unlock:
up_read(&ctx->map_changing_lock); up_read(&ctx->map_changing_lock);
mmap_read_unlock(dst_mm); uffd_mfill_unlock(dst_vma);
out: out:
if (folio) if (folio)
folio_put(folio); folio_put(folio);
@ -1267,27 +1353,136 @@ static int validate_move_areas(struct userfaultfd_ctx *ctx,
if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma)) if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma))
return -EINVAL; return -EINVAL;
/*
* Ensure the dst_vma has a anon_vma or this page
* would get a NULL anon_vma when moved in the
* dst_vma.
*/
if (unlikely(anon_vma_prepare(dst_vma)))
return -ENOMEM;
return 0; return 0;
} }
static __always_inline
int find_vmas_mm_locked(struct mm_struct *mm,
unsigned long dst_start,
unsigned long src_start,
struct vm_area_struct **dst_vmap,
struct vm_area_struct **src_vmap)
{
struct vm_area_struct *vma;
mmap_assert_locked(mm);
vma = find_vma_and_prepare_anon(mm, dst_start);
if (IS_ERR(vma))
return PTR_ERR(vma);
*dst_vmap = vma;
/* Skip finding src_vma if src_start is in dst_vma */
if (src_start >= vma->vm_start && src_start < vma->vm_end)
goto out_success;
vma = vma_lookup(mm, src_start);
if (!vma)
return -ENOENT;
out_success:
*src_vmap = vma;
return 0;
}
#ifdef CONFIG_PER_VMA_LOCK
static int uffd_move_lock(struct mm_struct *mm,
unsigned long dst_start,
unsigned long src_start,
struct vm_area_struct **dst_vmap,
struct vm_area_struct **src_vmap)
{
struct vm_area_struct *vma;
int err;
vma = lock_vma(mm, dst_start);
if (IS_ERR(vma))
return PTR_ERR(vma);
*dst_vmap = vma;
/*
* Skip finding src_vma if src_start is in dst_vma. This also ensures
* that we don't lock the same vma twice.
*/
if (src_start >= vma->vm_start && src_start < vma->vm_end) {
*src_vmap = vma;
return 0;
}
/*
* Using lock_vma() to get src_vma can lead to following deadlock:
*
* Thread1 Thread2
* ------- -------
* vma_start_read(dst_vma)
* mmap_write_lock(mm)
* vma_start_write(src_vma)
* vma_start_read(src_vma)
* mmap_read_lock(mm)
* vma_start_write(dst_vma)
*/
*src_vmap = lock_vma_under_rcu(mm, src_start);
if (likely(*src_vmap))
return 0;
/* Undo any locking and retry in mmap_lock critical section */
vma_end_read(*dst_vmap);
mmap_read_lock(mm);
err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
if (!err) {
/*
* See comment in lock_vma() as to why not using
* vma_start_read() here.
*/
down_read(&(*dst_vmap)->vm_lock->lock);
if (*dst_vmap != *src_vmap)
down_read(&(*src_vmap)->vm_lock->lock);
}
mmap_read_unlock(mm);
return err;
}
static void uffd_move_unlock(struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma)
{
vma_end_read(src_vma);
if (src_vma != dst_vma)
vma_end_read(dst_vma);
}
#else
static int uffd_move_lock(struct mm_struct *mm,
unsigned long dst_start,
unsigned long src_start,
struct vm_area_struct **dst_vmap,
struct vm_area_struct **src_vmap)
{
int err;
mmap_read_lock(mm);
err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
if (err)
mmap_read_unlock(mm);
return err;
}
static void uffd_move_unlock(struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma)
{
mmap_assert_locked(src_vma->vm_mm);
mmap_read_unlock(dst_vma->vm_mm);
}
#endif
/** /**
* move_pages - move arbitrary anonymous pages of an existing vma * move_pages - move arbitrary anonymous pages of an existing vma
* @ctx: pointer to the userfaultfd context * @ctx: pointer to the userfaultfd context
* @mm: the address space to move pages
* @dst_start: start of the destination virtual memory range * @dst_start: start of the destination virtual memory range
* @src_start: start of the source virtual memory range * @src_start: start of the source virtual memory range
* @len: length of the virtual memory range * @len: length of the virtual memory range
* @mode: flags from uffdio_move.mode * @mode: flags from uffdio_move.mode
* *
* Must be called with mmap_lock held for read. * It will either use the mmap_lock in read mode or per-vma locks
* *
* move_pages() remaps arbitrary anonymous pages atomically in zero * move_pages() remaps arbitrary anonymous pages atomically in zero
* copy. It only works on non shared anonymous pages because those can * copy. It only works on non shared anonymous pages because those can
@ -1355,10 +1550,10 @@ static int validate_move_areas(struct userfaultfd_ctx *ctx,
* could be obtained. This is the only additional complexity added to * could be obtained. This is the only additional complexity added to
* the rmap code to provide this anonymous page remapping functionality. * the rmap code to provide this anonymous page remapping functionality.
*/ */
ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm, ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
unsigned long dst_start, unsigned long src_start, unsigned long src_start, unsigned long len, __u64 mode)
unsigned long len, __u64 mode)
{ {
struct mm_struct *mm = ctx->mm;
struct vm_area_struct *src_vma, *dst_vma; struct vm_area_struct *src_vma, *dst_vma;
unsigned long src_addr, dst_addr; unsigned long src_addr, dst_addr;
pmd_t *src_pmd, *dst_pmd; pmd_t *src_pmd, *dst_pmd;
@ -1376,28 +1571,34 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
WARN_ON_ONCE(dst_start + len <= dst_start)) WARN_ON_ONCE(dst_start + len <= dst_start))
goto out; goto out;
err = uffd_move_lock(mm, dst_start, src_start, &dst_vma, &src_vma);
if (err)
goto out;
/* Re-check after taking map_changing_lock */
err = -EAGAIN;
down_read(&ctx->map_changing_lock);
if (likely(atomic_read(&ctx->mmap_changing)))
goto out_unlock;
/* /*
* Make sure the vma is not shared, that the src and dst remap * Make sure the vma is not shared, that the src and dst remap
* ranges are both valid and fully within a single existing * ranges are both valid and fully within a single existing
* vma. * vma.
*/ */
src_vma = find_vma(mm, src_start); err = -EINVAL;
if (!src_vma || (src_vma->vm_flags & VM_SHARED)) if (src_vma->vm_flags & VM_SHARED)
goto out; goto out_unlock;
if (src_start < src_vma->vm_start || if (src_start + len > src_vma->vm_end)
src_start + len > src_vma->vm_end) goto out_unlock;
goto out;
dst_vma = find_vma(mm, dst_start); if (dst_vma->vm_flags & VM_SHARED)
if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) goto out_unlock;
goto out; if (dst_start + len > dst_vma->vm_end)
if (dst_start < dst_vma->vm_start || goto out_unlock;
dst_start + len > dst_vma->vm_end)
goto out;
err = validate_move_areas(ctx, src_vma, dst_vma); err = validate_move_areas(ctx, src_vma, dst_vma);
if (err) if (err)
goto out; goto out_unlock;
for (src_addr = src_start, dst_addr = dst_start; for (src_addr = src_start, dst_addr = dst_start;
src_addr < src_start + len;) { src_addr < src_start + len;) {
@ -1514,6 +1715,9 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
moved += step_size; moved += step_size;
} }
out_unlock:
up_read(&ctx->map_changing_lock);
uffd_move_unlock(dst_vma, src_vma);
out: out:
VM_WARN_ON(moved < 0); VM_WARN_ON(moved < 0);
VM_WARN_ON(err > 0); VM_WARN_ON(err > 0);