radix-tree: Use local_lock for protection

The radix-tree and idr preload mechanisms use preempt_disable() to protect
the complete operation between xxx_preload() and xxx_preload_end().

As the code inside the preempt disabled section acquires regular spinlocks,
which are converted to 'sleeping' spinlocks on a PREEMPT_RT kernel and
eventually calls into a memory allocator, this conflicts with the RT
semantics.

Convert it to a local_lock which allows RT kernels to substitute them with
a real per CPU lock. On non RT kernels this maps to preempt_disable() as
before, but provides also lockdep coverage of the critical region.
No functional change.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20200527201119.1692513-3-bigeasy@linutronix.de
This commit is contained in:
Sebastian Andrzej Siewior 2020-05-27 22:11:14 +02:00 committed by Ingo Molnar
parent 91710728d1
commit cfa6705d89
3 changed files with 20 additions and 13 deletions

View File

@ -171,7 +171,7 @@ static inline bool idr_is_empty(const struct idr *idr)
*/
static inline void idr_preload_end(void)
{
preempt_enable();
local_unlock(&radix_tree_preloads.lock);
}
/**

View File

@ -16,11 +16,20 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/xarray.h>
#include <linux/local_lock.h>
/* Keep unconverted code working */
#define radix_tree_root xarray
#define radix_tree_node xa_node
struct radix_tree_preload {
local_lock_t lock;
unsigned nr;
/* nodes->parent points to next preallocated node */
struct radix_tree_node *nodes;
};
DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
/*
* The bottom two bits of the slot determine how the remaining bits in the
* slot are interpreted:
@ -245,7 +254,7 @@ int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
static inline void radix_tree_preload_end(void)
{
preempt_enable();
local_unlock(&radix_tree_preloads.lock);
}
void __rcu **idr_get_free(struct radix_tree_root *root,

View File

@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/percpu.h>
#include <linux/local_lock.h>
#include <linux/preempt.h> /* in_interrupt() */
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
@ -27,7 +28,6 @@
#include <linux/string.h>
#include <linux/xarray.h>
/*
* Radix tree node cache.
*/
@ -58,12 +58,10 @@ struct kmem_cache *radix_tree_node_cachep;
/*
* Per-cpu pool of preloaded nodes
*/
struct radix_tree_preload {
unsigned nr;
/* nodes->parent points to next preallocated node */
struct radix_tree_node *nodes;
DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = {
.lock = INIT_LOCAL_LOCK(lock),
};
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads);
static inline struct radix_tree_node *entry_to_node(void *ptr)
{
@ -332,14 +330,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
*/
gfp_mask &= ~__GFP_ACCOUNT;
preempt_disable();
local_lock(&radix_tree_preloads.lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
while (rtp->nr < nr) {
preempt_enable();
local_unlock(&radix_tree_preloads.lock);
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
preempt_disable();
local_lock(&radix_tree_preloads.lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < nr) {
node->parent = rtp->nodes;
@ -381,7 +379,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
if (gfpflags_allow_blocking(gfp_mask))
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
/* Preloading doesn't help anything with this gfp mask, skip it */
preempt_disable();
local_lock(&radix_tree_preloads.lock);
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
@ -1470,7 +1468,7 @@ EXPORT_SYMBOL(radix_tree_tagged);
void idr_preload(gfp_t gfp_mask)
{
if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
preempt_disable();
local_lock(&radix_tree_preloads.lock);
}
EXPORT_SYMBOL(idr_preload);