summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/rcu/tree.c1
-rw-r--r--kernel/rcu/tree.h2
-rw-r--r--kernel/rcu/tree_plugin.h1
3 files changed, 0 insertions, 4 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 66c73a214cff..8e0711954bbf 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2691,7 +2691,6 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
/* Update counts and requeue any remaining callbacks. */
rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
smp_mb(); /* List handling before counting for rcu_barrier(). */
- rdp->n_cbs_invoked += count;
rcu_segcblist_insert_count(&rdp->cblist, &rcl);
/* Reinstate batch limit if we have worked down the excess. */
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 01abd1c4e5da..b258fac73524 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -211,8 +211,6 @@ struct rcu_data {
/* different grace periods. */
long qlen_last_fqs_check;
/* qlen at last check for QS forcing */
- unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
- unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
unsigned long n_force_qs_snap;
/* did other CPU force QS recently? */
long blimit; /* Upper limit on a processed batch */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 0d552985b905..1c2d58a85511 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2243,7 +2243,6 @@ static int rcu_nocb_kthread(void *arg)
smp_mb__before_atomic(); /* _add after CB invocation. */
atomic_long_add(-c, &rdp->nocb_q_count);
atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
- rdp->n_nocbs_invoked += c;
}
return 0;
}