summaryrefslogtreecommitdiff
path: root/kernel/context_tracking.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/context_tracking.c')
-rw-r--r--kernel/context_tracking.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index a951bde0bbcb..ae94215aa132 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -29,7 +29,7 @@
DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
.nesting = 1,
- .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
+ .nmi_nesting = DYNTICK_IRQ_NONIDLE,
#endif
.state = ATOMIC_INIT(CT_RCU_WATCHING),
};
@@ -117,7 +117,7 @@ static noinstr void ct_kernel_enter_state(int offset)
* Enter an RCU extended quiescent state, which can be either the
* idle loop or adaptive-tickless usermode execution.
*
- * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
+ * We crowbar the ->nmi_nesting field to zero to allow for
* the possibility of usermode upcalls having messed up our count
* of interrupt nesting level during the prior busy period.
*/
@@ -126,7 +126,7 @@ static void noinstr ct_kernel_exit(bool user, int offset)
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
WARN_ON_ONCE(ct_dynticks_nmi_nesting() != DYNTICK_IRQ_NONIDLE);
- WRITE_ONCE(ct->dynticks_nmi_nesting, 0);
+ WRITE_ONCE(ct->nmi_nesting, 0);
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
ct_nesting() == 0);
if (ct_nesting() != 1) {
@@ -156,7 +156,7 @@ static void noinstr ct_kernel_exit(bool user, int offset)
* Exit an RCU extended quiescent state, which can be either the
* idle loop or adaptive-tickless usermode execution.
*
- * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
+ * We crowbar the ->nmi_nesting field to DYNTICK_IRQ_NONIDLE to
* allow for the possibility of usermode upcalls messing up our count of
* interrupt nesting level during the busy period that is just now starting.
*/
@@ -186,7 +186,7 @@ static void noinstr ct_kernel_enter(bool user, int offset)
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
WRITE_ONCE(ct->nesting, 1);
WARN_ON_ONCE(ct_dynticks_nmi_nesting());
- WRITE_ONCE(ct->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
+ WRITE_ONCE(ct->nmi_nesting, DYNTICK_IRQ_NONIDLE);
instrumentation_end();
}
@@ -194,7 +194,7 @@ static void noinstr ct_kernel_enter(bool user, int offset)
* ct_nmi_exit - inform RCU of exit from NMI context
*
* If we are returning from the outermost NMI handler that interrupted an
- * RCU-idle period, update ct->state and ct->dynticks_nmi_nesting
+ * RCU-idle period, update ct->state and ct->nmi_nesting
* to let the RCU grace-period handling know that the CPU is back to
* being RCU-idle.
*
@@ -207,7 +207,7 @@ void noinstr ct_nmi_exit(void)
instrumentation_begin();
/*
- * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
+ * Check for ->nmi_nesting underflow and bad ->dynticks.
* (We are exiting an NMI handler, so RCU better be paying attention
* to us!)
*/
@@ -221,7 +221,7 @@ void noinstr ct_nmi_exit(void)
if (ct_dynticks_nmi_nesting() != 1) {
trace_rcu_dyntick(TPS("--="), ct_dynticks_nmi_nesting(), ct_dynticks_nmi_nesting() - 2,
ct_rcu_watching());
- WRITE_ONCE(ct->dynticks_nmi_nesting, /* No store tearing. */
+ WRITE_ONCE(ct->nmi_nesting, /* No store tearing. */
ct_dynticks_nmi_nesting() - 2);
instrumentation_end();
return;
@@ -229,7 +229,7 @@ void noinstr ct_nmi_exit(void)
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
trace_rcu_dyntick(TPS("Startirq"), ct_dynticks_nmi_nesting(), 0, ct_rcu_watching());
- WRITE_ONCE(ct->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
+ WRITE_ONCE(ct->nmi_nesting, 0); /* Avoid store tearing. */
// instrumentation for the noinstr ct_kernel_exit_state()
instrument_atomic_write(&ct->state, sizeof(ct->state));
@@ -247,7 +247,7 @@ void noinstr ct_nmi_exit(void)
* ct_nmi_enter - inform RCU of entry to NMI context
*
* If the CPU was idle from RCU's viewpoint, update ct->state and
- * ct->dynticks_nmi_nesting to let the RCU grace-period handling know
+ * ct->nmi_nesting to let the RCU grace-period handling know
* that the CPU is active. This implementation permits nested NMIs, as
* long as the nesting level does not overflow an int. (You will probably
* run out of stack space first.)
@@ -264,10 +264,10 @@ void noinstr ct_nmi_enter(void)
WARN_ON_ONCE(ct_dynticks_nmi_nesting() < 0);
/*
- * If idle from RCU viewpoint, atomically increment ->dynticks
- * to mark non-idle and increment ->dynticks_nmi_nesting by one.
- * Otherwise, increment ->dynticks_nmi_nesting by two. This means
- * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
+ * If idle from RCU viewpoint, atomically increment CT state
+ * to mark non-idle and increment ->nmi_nesting by one.
+ * Otherwise, increment ->nmi_nesting by two. This means
+ * if ->nmi_nesting is equal to one, we are guaranteed
* to be in the outermost NMI handler that interrupted an RCU-idle
* period (observation due to Andy Lutomirski).
*/
@@ -298,7 +298,7 @@ void noinstr ct_nmi_enter(void)
ct_dynticks_nmi_nesting(),
ct_dynticks_nmi_nesting() + incby, ct_rcu_watching());
instrumentation_end();
- WRITE_ONCE(ct->dynticks_nmi_nesting, /* Prevent store tearing. */
+ WRITE_ONCE(ct->nmi_nesting, /* Prevent store tearing. */
ct_dynticks_nmi_nesting() + incby);
barrier();
}