mm: expand vma iterator interface

Add wrappers for the maple tree to the vma iterator.  This will provide
type safety at compile time.

Link: https://lkml.kernel.org/r/20230120162650.984577-8-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Liam R. Howlett 2023-01-20 11:26:08 -05:00 committed by Andrew Morton
parent 17dc622c7b
commit b62b633e04
4 changed files with 125 additions and 7 deletions

View file

@ -670,16 +670,16 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
static inline
struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
{
return mas_find(&vmi->mas, max);
return mas_find(&vmi->mas, max - 1);
}
static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
{
/*
* Uses vma_find() to get the first VMA when the iterator starts.
* Uses mas_find() to get the first VMA when the iterator starts.
* Calling mas_next() could skip the first entry.
*/
return vma_find(vmi, ULONG_MAX);
return mas_find(&vmi->mas, ULONG_MAX);
}
static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
@ -692,12 +692,50 @@ static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
return vmi->mas.index;
}
static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
{
return vmi->mas.last + 1;
}
static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
unsigned long count)
{
return mas_expected_entries(&vmi->mas, count);
}
/* Free any unused preallocations */
static inline void vma_iter_free(struct vma_iterator *vmi)
{
mas_destroy(&vmi->mas);
}
static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
struct vm_area_struct *vma)
{
vmi->mas.index = vma->vm_start;
vmi->mas.last = vma->vm_end - 1;
mas_store(&vmi->mas, vma);
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
return 0;
}
static inline void vma_iter_invalidate(struct vma_iterator *vmi)
{
mas_pause(&vmi->mas);
}
static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
{
mas_set(&vmi->mas, addr);
}
#define for_each_vma(__vmi, __vma) \
while (((__vma) = vma_next(&(__vmi))) != NULL)
/* The MM code likes to work with exclusive end addresses */
#define for_each_vma_range(__vmi, __vma, __end) \
while (((__vma) = vma_find(&(__vmi), (__end) - 1)) != NULL)
while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
#ifdef CONFIG_SHMEM
/*

View file

@ -849,9 +849,7 @@ struct vma_iterator {
static inline void vma_iter_init(struct vma_iterator *vmi,
struct mm_struct *mm, unsigned long addr)
{
vmi->mas.tree = &mm->mm_mt;
vmi->mas.index = addr;
vmi->mas.node = MAS_START;
mas_init(&vmi->mas, &mm->mm_mt, addr);
}
struct mmu_gather;

View file

@ -877,4 +877,68 @@ static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
return !(vma->vm_flags & VM_SOFTDIRTY);
}
/*
* VMA Iterator functions shared between nommu and mmap
*/
static inline int vma_iter_prealloc(struct vma_iterator *vmi)
{
return mas_preallocate(&vmi->mas, GFP_KERNEL);
}
static inline void vma_iter_clear(struct vma_iterator *vmi,
unsigned long start, unsigned long end)
{
mas_set_range(&vmi->mas, start, end - 1);
mas_store_prealloc(&vmi->mas, NULL);
}
static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
{
return mas_walk(&vmi->mas);
}
/* Store a VMA with preallocated memory */
static inline void vma_iter_store(struct vma_iterator *vmi,
struct vm_area_struct *vma)
{
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.index > vma->vm_start)) {
printk("%lu > %lu\n", vmi->mas.index, vma->vm_start);
printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last);
mt_dump(vmi->mas.tree);
}
if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last < vma->vm_start)) {
printk("%lu < %lu\n", vmi->mas.last, vma->vm_start);
printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last);
mt_dump(vmi->mas.tree);
}
#endif
if (vmi->mas.node != MAS_START &&
((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
vma_iter_invalidate(vmi);
vmi->mas.index = vma->vm_start;
vmi->mas.last = vma->vm_end - 1;
mas_store_prealloc(&vmi->mas, vma);
}
static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
struct vm_area_struct *vma, gfp_t gfp)
{
if (vmi->mas.node != MAS_START &&
((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
vma_iter_invalidate(vmi);
vmi->mas.index = vma->vm_start;
vmi->mas.last = vma->vm_end - 1;
mas_store_gfp(&vmi->mas, vma, gfp);
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
return 0;
}
#endif /* __MM_INTERNAL_H */

View file

@ -144,6 +144,24 @@ static void remove_vma(struct vm_area_struct *vma)
vm_area_free(vma);
}
static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
unsigned long min)
{
return mas_prev(&vmi->mas, min);
}
static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
unsigned long start, unsigned long end, gfp_t gfp)
{
vmi->mas.index = start;
vmi->mas.last = end - 1;
mas_store_gfp(&vmi->mas, NULL, gfp);
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
return 0;
}
/*
* check_brk_limits() - Use platform specific check of range & verify mlock
* limits.