mm: vmalloc: fix lockdep warning

A lockdep reports a possible deadlock in the find_vmap_area_exceed_addr_lock()
function:

============================================
WARNING: possible recursive locking detected
6.9.0-rc1-00060-ged3ccc57b108-dirty #6140 Not tainted
--------------------------------------------
drgn/455 is trying to acquire lock:
ffff0000c00131d0 (&vn->busy.lock/1){+.+.}-{2:2}, at: find_vmap_area_exceed_addr_lock+0x64/0x124

but task is already holding lock:
ffff0000c0011878 (&vn->busy.lock/1){+.+.}-{2:2}, at: find_vmap_area_exceed_addr_lock+0x64/0x124

other info that might help us debug this:
 Possible unsafe locking scenario:

       CPU0
       ----
  lock(&vn->busy.lock/1);
  lock(&vn->busy.lock/1);

 *** DEADLOCK ***

indeed it can happen if the find_vmap_area_exceed_addr_lock() gets called
concurrently because it tries to acquire two nodes locks.  It was done to
prevent removing a lowest VA found on a previous step.

To address this a lowest VA is found first without holding a node lock
where it resides.  As a last step we check if a VA still there because it
can go away, if removed, proceed with next lowest.

[akpm@linux-foundation.org: fix comment typos, per Baoquan]
Link: https://lkml.kernel.org/r/20240328140330.4747-1-urezki@gmail.com
Fixes: 53becf32ae ("mm: vmalloc: support multiple nodes in vread_iter")
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Tested-by: Jens Axboe <axboe@kernel.dk>
Tested-by: Omar Sandoval <osandov@fb.com>
Reported-by: Jens Axboe <axboe@kernel.dk>
Cc: Baoquan He <bhe@redhat.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sony.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Uladzislau Rezki (Sony) 2024-03-28 15:03:30 +01:00 committed by Andrew Morton
parent 4ed91fa917
commit fc2c22693c
1 changed files with 42 additions and 29 deletions

View File

@ -989,6 +989,27 @@ unsigned long vmalloc_nr_pages(void)
return atomic_long_read(&nr_vmalloc_pages);
}
static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
{
struct rb_node *n = root->rb_node;
addr = (unsigned long)kasan_reset_tag((void *)addr);
while (n) {
struct vmap_area *va;
va = rb_entry(n, struct vmap_area, rb_node);
if (addr < va->va_start)
n = n->rb_left;
else if (addr >= va->va_end)
n = n->rb_right;
else
return va;
}
return NULL;
}
/* Look up the first VA which satisfies addr < va_end, NULL if none. */
static struct vmap_area *
__find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
@ -1025,47 +1046,39 @@ __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
static struct vmap_node *
find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
{
struct vmap_node *vn, *va_node = NULL;
struct vmap_area *va_lowest;
unsigned long va_start_lowest;
struct vmap_node *vn;
int i;
for (i = 0; i < nr_vmap_nodes; i++) {
repeat:
for (i = 0, va_start_lowest = 0; i < nr_vmap_nodes; i++) {
vn = &vmap_nodes[i];
spin_lock(&vn->busy.lock);
va_lowest = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
if (va_lowest) {
if (!va_node || va_lowest->va_start < (*va)->va_start) {
if (va_node)
spin_unlock(&va_node->busy.lock);
*va = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
*va = va_lowest;
va_node = vn;
continue;
}
}
if (*va)
if (!va_start_lowest || (*va)->va_start < va_start_lowest)
va_start_lowest = (*va)->va_start;
spin_unlock(&vn->busy.lock);
}
return va_node;
}
/*
* Check if found VA exists, it might have gone away. In this case we
* repeat the search because a VA has been removed concurrently and we
* need to proceed to the next one, which is a rare case.
*/
if (va_start_lowest) {
vn = addr_to_node(va_start_lowest);
static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
{
struct rb_node *n = root->rb_node;
spin_lock(&vn->busy.lock);
*va = __find_vmap_area(va_start_lowest, &vn->busy.root);
addr = (unsigned long)kasan_reset_tag((void *)addr);
if (*va)
return vn;
while (n) {
struct vmap_area *va;
va = rb_entry(n, struct vmap_area, rb_node);
if (addr < va->va_start)
n = n->rb_left;
else if (addr >= va->va_end)
n = n->rb_right;
else
return va;
spin_unlock(&vn->busy.lock);
goto repeat;
}
return NULL;