rcu/kvfree: Add debug to check grace periods

This commit adds debugging checks to verify that the required RCU
grace period has elapsed for each kvfree_rcu_bulk_data structure that
arrives at the kvfree_rcu_bulk() function.  These checks make use
of that structure's ->gp_snap field, which has been upgraded from an
unsigned long to an rcu_gp_oldstate structure.  This upgrade reduces
the chances of false positives to nearly zero, even on 32-bit systems,
for which this structure carries 64 bits of state.

Cc: Ziwei Dai <ziwei.dai@unisoc.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
Paul E. McKenney 2023-04-03 16:49:14 -07:00
parent 7e3f926bf4
commit cdfa0f6fa6
1 changed files with 17 additions and 16 deletions

View File

@ -2756,7 +2756,7 @@ EXPORT_SYMBOL_GPL(call_rcu);
*/
struct kvfree_rcu_bulk_data {
struct list_head list;
unsigned long gp_snap;
struct rcu_gp_oldstate gp_snap;
unsigned long nr_records;
void *records[];
};
@ -2921,23 +2921,24 @@ kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp,
int i;
debug_rcu_bhead_unqueue(bnode);
if (!WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&bnode->gp_snap))) {
rcu_lock_acquire(&rcu_callback_map);
if (idx == 0) { // kmalloc() / kfree().
trace_rcu_invoke_kfree_bulk_callback(
rcu_state.name, bnode->nr_records,
bnode->records);
rcu_lock_acquire(&rcu_callback_map);
if (idx == 0) { // kmalloc() / kfree().
trace_rcu_invoke_kfree_bulk_callback(
rcu_state.name, bnode->nr_records,
bnode->records);
kfree_bulk(bnode->nr_records, bnode->records);
} else { // vmalloc() / vfree().
for (i = 0; i < bnode->nr_records; i++) {
trace_rcu_invoke_kvfree_callback(
rcu_state.name, bnode->records[i], 0);
kfree_bulk(bnode->nr_records, bnode->records);
} else { // vmalloc() / vfree().
for (i = 0; i < bnode->nr_records; i++) {
trace_rcu_invoke_kvfree_callback(
rcu_state.name, bnode->records[i], 0);
vfree(bnode->records[i]);
vfree(bnode->records[i]);
}
}
rcu_lock_release(&rcu_callback_map);
}
rcu_lock_release(&rcu_callback_map);
raw_spin_lock_irqsave(&krcp->lock, flags);
if (put_cached_bnode(krcp, bnode))
@ -3081,7 +3082,7 @@ kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
INIT_LIST_HEAD(&bulk_ready[i]);
list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) {
if (!poll_state_synchronize_rcu(bnode->gp_snap))
if (!poll_state_synchronize_rcu_full(&bnode->gp_snap))
break;
atomic_sub(bnode->nr_records, &krcp->bulk_count[i]);
@ -3285,7 +3286,7 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
// Finally insert and update the GP for this page.
bnode->records[bnode->nr_records++] = ptr;
bnode->gp_snap = get_state_synchronize_rcu();
get_state_synchronize_rcu_full(&bnode->gp_snap);
atomic_inc(&(*krcp)->bulk_count[idx]);
return true;