diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0628df155970..c34422d92aa9 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -169,7 +169,17 @@ module_param(gp_init_delay, int, 0644); #else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ static const int gp_init_delay; #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ -#define PER_RCU_NODE_PERIOD 10 /* Number of grace periods between delays. */ + +/* + * Number of grace periods between delays, normalized by the duration of + * the delay. The longer the the delay, the more the grace periods between + * each delay. The reason for this normalization is that it means that, + * for non-zero delays, the overall slowdown of grace periods is constant + * regardless of the duration of the delay. This arrangement balances + * the need for long delays to increase some race probabilities with the + * need for fast grace periods to increase other race probabilities. + */ +#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ /* * Track the rcutorture test sequence number and the update version @@ -1848,7 +1858,8 @@ static int rcu_gp_init(struct rcu_state *rsp) cond_resched_rcu_qs(); WRITE_ONCE(rsp->gp_activity, jiffies); if (gp_init_delay > 0 && - !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD))) + !(rsp->gpnum % + (rcu_num_nodes * PER_RCU_NODE_PERIOD * gp_init_delay))) schedule_timeout_uninterruptible(gp_init_delay); }