mm/mmap: use vma_prepare() and vma_complete() in vma_expand()

Use the new locking functions for vma_expand().  This reduces code
duplication.

At the same time change VM_BUG_ON() to VM_WARN_ON()

Link: https://lkml.kernel.org/r/20230120162650.984577-42-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Liam R. Howlett 2023-01-20 11:26:42 -05:00 committed by Andrew Morton
parent 440703e082
commit 9303d3e1c3
1 changed files with 72 additions and 116 deletions

188
mm/mmap.c
View File

@ -457,122 +457,6 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
return 0;
}
/*
* vma_expand - Expand an existing VMA
*
* @mas: The maple state
* @vma: The vma to expand
* @start: The start of the vma
* @end: The exclusive end of the vma
* @pgoff: The page offset of vma
* @next: The current of next vma.
*
* Expand @vma to @start and @end. Can expand off the start and end. Will
* expand over @next if it's different from @vma and @end == @next->vm_end.
* Checking if the @vma can expand and merge with @next needs to be handled by
* the caller.
*
* Returns: 0 on success
*/
inline int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff,
struct vm_area_struct *next)
{
struct mm_struct *mm = vma->vm_mm;
struct address_space *mapping = NULL;
struct rb_root_cached *root = NULL;
struct anon_vma *anon_vma = vma->anon_vma;
struct file *file = vma->vm_file;
bool remove_next = false;
if (next && (vma != next) && (end == next->vm_end)) {
remove_next = true;
if (next->anon_vma && !vma->anon_vma) {
int error;
anon_vma = next->anon_vma;
vma->anon_vma = anon_vma;
error = anon_vma_clone(vma, next);
if (error)
return error;
}
}
/* Not merging but overwriting any part of next is not handled. */
VM_BUG_ON(next && !remove_next && next != vma && end > next->vm_start);
/* Only handles expanding */
VM_BUG_ON(vma->vm_start < start || vma->vm_end > end);
if (vma_iter_prealloc(vmi))
goto nomem;
vma_adjust_trans_huge(vma, start, end, 0);
if (file) {
mapping = file->f_mapping;
root = &mapping->i_mmap;
uprobe_munmap(vma, vma->vm_start, vma->vm_end);
i_mmap_lock_write(mapping);
}
if (anon_vma) {
anon_vma_lock_write(anon_vma);
anon_vma_interval_tree_pre_update_vma(vma);
}
if (file) {
flush_dcache_mmap_lock(mapping);
vma_interval_tree_remove(vma, root);
}
/* VMA iterator points to previous, so set to start if necessary */
if (vma_iter_addr(vmi) != start)
vma_iter_set(vmi, start);
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
vma_iter_store(vmi, vma);
if (file) {
vma_interval_tree_insert(vma, root);
flush_dcache_mmap_unlock(mapping);
}
/* Expanding over the next vma */
if (remove_next && file) {
__remove_shared_vm_struct(next, file, mapping);
}
if (anon_vma) {
anon_vma_interval_tree_post_update_vma(vma);
anon_vma_unlock_write(anon_vma);
}
if (file) {
i_mmap_unlock_write(mapping);
uprobe_mmap(vma);
}
if (remove_next) {
if (file) {
uprobe_munmap(next, next->vm_start, next->vm_end);
fput(file);
}
if (next->anon_vma)
anon_vma_merge(vma, next);
mm->map_count--;
mpol_put(vma_policy(next));
vm_area_free(next);
}
validate_mm(mm);
return 0;
nomem:
return -ENOMEM;
}
/*
* vma_prepare() - Helper function for handling locking VMAs prior to altering
* @vp: The initialized vma_prepare struct
@ -694,6 +578,78 @@ again:
uprobe_mmap(vp->insert);
}
/*
* vma_expand - Expand an existing VMA
*
* @vmi: The vma iterator
* @vma: The vma to expand
* @start: The start of the vma
* @end: The exclusive end of the vma
* @pgoff: The page offset of vma
* @next: The current of next vma.
*
* Expand @vma to @start and @end. Can expand off the start and end. Will
* expand over @next if it's different from @vma and @end == @next->vm_end.
* Checking if the @vma can expand and merge with @next needs to be handled by
* the caller.
*
* Returns: 0 on success
*/
inline int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff,
struct vm_area_struct *next)
{
struct vma_prepare vp;
memset(&vp, 0, sizeof(vp));
vp.vma = vma;
vp.anon_vma = vma->anon_vma;
if (next && (vma != next) && (end == next->vm_end)) {
vp.remove = next;
if (next->anon_vma && !vma->anon_vma) {
int error;
vp.anon_vma = next->anon_vma;
vma->anon_vma = next->anon_vma;
error = anon_vma_clone(vma, next);
if (error)
return error;
}
}
/* Not merging but overwriting any part of next is not handled. */
VM_WARN_ON(next && !vp.remove &&
next != vma && end > next->vm_start);
/* Only handles expanding */
VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
if (vma_iter_prealloc(vmi))
goto nomem;
vma_adjust_trans_huge(vma, start, end, 0);
vp.file = vma->vm_file;
if (vp.file)
vp.mapping = vp.file->f_mapping;
/* VMA iterator points to previous, so set to start if necessary */
if (vma_iter_addr(vmi) != start)
vma_iter_set(vmi, start);
vma_prepare(&vp);
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
/* Note: mas must be pointing to the expanding VMA */
vma_iter_store(vmi, vma);
vma_complete(&vp, vmi, vma->vm_mm);
validate_mm(vma->vm_mm);
return 0;
nomem:
return -ENOMEM;
}
/*
* We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
* is already present in an i_mmap tree without adjusting the tree.