rcu: Remove obsolete __rcu_pending() statistics for debugfs

The debugfs interface displayed statistics on RCU-pending checks
but this interface has since been removed.  This commit therefore
removes the no-longer-used rcu_data structure's ->n_rcu_pending,
->n_rp_core_needs_qs, ->n_rp_report_qs, ->n_rp_cb_ready,
->n_rp_cpu_needs_gp, ->n_rp_gp_completed, ->n_rp_gp_started,
->n_rp_nocb_defer_wakeup, and ->n_rp_need_nothing fields along with
their updates.

If this information proves necessary in the future, the corresponding
event traces will be added.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Paul E. McKenney 2018-01-10 12:36:00 -08:00
parent 62df63e048
commit 01c495f72a
2 changed files with 9 additions and 39 deletions

View file

@ -3354,8 +3354,6 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
{ {
struct rcu_node *rnp = rdp->mynode; struct rcu_node *rnp = rdp->mynode;
rdp->n_rcu_pending++;
/* Check for CPU stalls, if enabled. */ /* Check for CPU stalls, if enabled. */
check_cpu_stall(rsp, rdp); check_cpu_stall(rsp, rdp);
@ -3364,48 +3362,31 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
return 0; return 0;
/* Is the RCU core waiting for a quiescent state from this CPU? */ /* Is the RCU core waiting for a quiescent state from this CPU? */
if (rcu_scheduler_fully_active && if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm)
rdp->core_needs_qs && rdp->cpu_no_qs.b.norm &&
rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_dynticks.rcu_qs_ctr)) {
rdp->n_rp_core_needs_qs++;
} else if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) {
rdp->n_rp_report_qs++;
return 1; return 1;
}
/* Does this CPU have callbacks ready to invoke? */ /* Does this CPU have callbacks ready to invoke? */
if (rcu_segcblist_ready_cbs(&rdp->cblist)) { if (rcu_segcblist_ready_cbs(&rdp->cblist))
rdp->n_rp_cb_ready++;
return 1; return 1;
}
/* Has RCU gone idle with this CPU needing another grace period? */ /* Has RCU gone idle with this CPU needing another grace period? */
if (cpu_needs_another_gp(rsp, rdp)) { if (cpu_needs_another_gp(rsp, rdp))
rdp->n_rp_cpu_needs_gp++;
return 1; return 1;
}
/* Has another RCU grace period completed? */ /* Has another RCU grace period completed? */
if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ if (READ_ONCE(rnp->completed) != rdp->completed) /* outside lock */
rdp->n_rp_gp_completed++;
return 1; return 1;
}
/* Has a new RCU grace period started? */ /* Has a new RCU grace period started? */
if (READ_ONCE(rnp->gpnum) != rdp->gpnum || if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */ unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
rdp->n_rp_gp_started++;
return 1; return 1;
}
/* Does this CPU need a deferred NOCB wakeup? */ /* Does this CPU need a deferred NOCB wakeup? */
if (rcu_nocb_need_deferred_wakeup(rdp)) { if (rcu_nocb_need_deferred_wakeup(rdp))
rdp->n_rp_nocb_defer_wakeup++;
return 1; return 1;
}
/* nothing to do */ /* nothing to do */
rdp->n_rp_need_nothing++;
return 0; return 0;
} }

View file

@ -226,18 +226,7 @@ struct rcu_data {
/* Grace period that needs help */ /* Grace period that needs help */
/* from cond_resched(). */ /* from cond_resched(). */
/* 5) __rcu_pending() statistics. */ /* 5) _rcu_barrier(), OOM callbacks, and expediting. */
unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
unsigned long n_rp_core_needs_qs;
unsigned long n_rp_report_qs;
unsigned long n_rp_cb_ready;
unsigned long n_rp_cpu_needs_gp;
unsigned long n_rp_gp_completed;
unsigned long n_rp_gp_started;
unsigned long n_rp_nocb_defer_wakeup;
unsigned long n_rp_need_nothing;
/* 6) _rcu_barrier(), OOM callbacks, and expediting. */
struct rcu_head barrier_head; struct rcu_head barrier_head;
#ifdef CONFIG_RCU_FAST_NO_HZ #ifdef CONFIG_RCU_FAST_NO_HZ
struct rcu_head oom_head; struct rcu_head oom_head;
@ -248,7 +237,7 @@ struct rcu_data {
atomic_long_t exp_workdone3; /* # done by others #3. */ atomic_long_t exp_workdone3; /* # done by others #3. */
int exp_dynticks_snap; /* Double-check need for IPI. */ int exp_dynticks_snap; /* Double-check need for IPI. */
/* 7) Callback offloading. */ /* 6) Callback offloading. */
#ifdef CONFIG_RCU_NOCB_CPU #ifdef CONFIG_RCU_NOCB_CPU
struct rcu_head *nocb_head; /* CBs waiting for kthread. */ struct rcu_head *nocb_head; /* CBs waiting for kthread. */
struct rcu_head **nocb_tail; struct rcu_head **nocb_tail;
@ -275,7 +264,7 @@ struct rcu_data {
/* Leader CPU takes GP-end wakeups. */ /* Leader CPU takes GP-end wakeups. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
/* 8) RCU CPU stall data. */ /* 7) RCU CPU stall data. */
unsigned int softirq_snap; /* Snapshot of softirq activity. */ unsigned int softirq_snap; /* Snapshot of softirq activity. */
/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */ /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
struct irq_work rcu_iw; /* Check for non-irq activity. */ struct irq_work rcu_iw; /* Check for non-irq activity. */