diff options
author | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2020-11-05 21:32:38 -0500 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2020-11-06 08:33:23 -0500 |
commit | da5afbeb1724609996ca7bb4fbce2cd104c95914 (patch) | |
tree | 3b0401623f109b52d7c49a2e1c0f12a3eb96f9ae | |
parent | 6e4eb9cb22fc8a893cb708ed42644de5ee7c3827 (diff) |
ftrace: Optimize testing what context current is in
The preempt_count() is not a simple location in memory, it could be part of
per_cpu code or more. Each access to preempt_count(), or one of its accessor
functions (like in_interrupt()) takes several cycles. By reading
preempt_count() once, and then doing tests to find the context against the
value return is slightly faster than using in_nmi() and in_interrupt().
Link: https://lkml.kernel.org/r/20201028115612.780796355@goodmis.org
Link: https://lkml.kernel.org/r/20201106023546.558881845@goodmis.org
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r-- | include/linux/trace_recursion.h | 33 |
1 files changed, 20 insertions, 13 deletions
diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h index f2a949dbfec7..ac3d73484cb2 100644 --- a/include/linux/trace_recursion.h +++ b/include/linux/trace_recursion.h @@ -117,22 +117,29 @@ enum { #define TRACE_CONTEXT_MASK TRACE_LIST_MAX +/* + * Used for setting context + * NMI = 0 + * IRQ = 1 + * SOFTIRQ = 2 + * NORMAL = 3 + */ +enum { + TRACE_CTX_NMI, + TRACE_CTX_IRQ, + TRACE_CTX_SOFTIRQ, + TRACE_CTX_NORMAL, +}; + static __always_inline int trace_get_context_bit(void) { - int bit; - - if (in_interrupt()) { - if (in_nmi()) - bit = 0; - - else if (in_irq()) - bit = 1; - else - bit = 2; - } else - bit = 3; + unsigned long pc = preempt_count(); - return bit; + if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) + return TRACE_CTX_NORMAL; + else + return pc & NMI_MASK ? TRACE_CTX_NMI : + pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ; } static __always_inline int trace_test_and_set_recursion(int start, int max) |