summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/context_tracking.c14
-rw-r--r--kernel/locking/lockdep.c4
-rw-r--r--kernel/panic.c4
-rw-r--r--kernel/softirq.c44
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/trace/trace_preemptirq.c10
6 files changed, 54 insertions, 24 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index ce430885c26c..36a98c48aedc 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(context_tracking_key);
DEFINE_PER_CPU(struct context_tracking, context_tracking);
EXPORT_SYMBOL_GPL(context_tracking);
-static bool context_tracking_recursion_enter(void)
+static noinstr bool context_tracking_recursion_enter(void)
{
int recursion;
@@ -45,7 +45,7 @@ static bool context_tracking_recursion_enter(void)
return false;
}
-static void context_tracking_recursion_exit(void)
+static __always_inline void context_tracking_recursion_exit(void)
{
__this_cpu_dec(context_tracking.recursion);
}
@@ -59,7 +59,7 @@ static void context_tracking_recursion_exit(void)
* instructions to execute won't use any RCU read side critical section
* because this function sets RCU in extended quiescent state.
*/
-void __context_tracking_enter(enum ctx_state state)
+void noinstr __context_tracking_enter(enum ctx_state state)
{
/* Kernel threads aren't supposed to go to userspace */
WARN_ON_ONCE(!current->mm);
@@ -77,8 +77,10 @@ void __context_tracking_enter(enum ctx_state state)
* on the tick.
*/
if (state == CONTEXT_USER) {
+ instrumentation_begin();
trace_user_enter(0);
vtime_user_enter(current);
+ instrumentation_end();
}
rcu_user_enter();
}
@@ -99,7 +101,6 @@ void __context_tracking_enter(enum ctx_state state)
}
context_tracking_recursion_exit();
}
-NOKPROBE_SYMBOL(__context_tracking_enter);
EXPORT_SYMBOL_GPL(__context_tracking_enter);
void context_tracking_enter(enum ctx_state state)
@@ -142,7 +143,7 @@ NOKPROBE_SYMBOL(context_tracking_user_enter);
* This call supports re-entrancy. This way it can be called from any exception
* handler without needing to know if we came from userspace or not.
*/
-void __context_tracking_exit(enum ctx_state state)
+void noinstr __context_tracking_exit(enum ctx_state state)
{
if (!context_tracking_recursion_enter())
return;
@@ -155,15 +156,16 @@ void __context_tracking_exit(enum ctx_state state)
*/
rcu_user_exit();
if (state == CONTEXT_USER) {
+ instrumentation_begin();
vtime_user_exit(current);
trace_user_exit(0);
+ instrumentation_end();
}
}
__this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
}
context_tracking_recursion_exit();
}
-NOKPROBE_SYMBOL(__context_tracking_exit);
EXPORT_SYMBOL_GPL(__context_tracking_exit);
void context_tracking_exit(enum ctx_state state)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 38cce34d03dc..29a8de4c50b9 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -393,7 +393,7 @@ void lockdep_init_task(struct task_struct *task)
task->lockdep_recursion = 0;
}
-static inline void lockdep_recursion_finish(void)
+static __always_inline void lockdep_recursion_finish(void)
{
if (WARN_ON_ONCE(--current->lockdep_recursion))
current->lockdep_recursion = 0;
@@ -801,7 +801,7 @@ static int count_matching_names(struct lock_class *new_class)
}
/* used from NMI context -- must be lockless */
-static inline struct lock_class *
+static __always_inline struct lock_class *
look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
{
struct lockdep_subclass_key *key;
diff --git a/kernel/panic.c b/kernel/panic.c
index 85568bbfb12b..e2157ca387c8 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -680,10 +680,12 @@ device_initcall(register_warn_debugfs);
* Called when gcc's -fstack-protector feature is used, and
* gcc detects corruption of the on-stack canary value
*/
-__visible void __stack_chk_fail(void)
+__visible noinstr void __stack_chk_fail(void)
{
+ instrumentation_begin();
panic("stack-protector: Kernel stack is corrupted in: %pB",
__builtin_return_address(0));
+ instrumentation_end();
}
EXPORT_SYMBOL(__stack_chk_fail);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index a47c6dd57452..c4201b7f42b1 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -339,12 +339,11 @@ asmlinkage __visible void do_softirq(void)
local_irq_restore(flags);
}
-/*
- * Enter an interrupt context.
+/**
+ * irq_enter_rcu - Enter an interrupt context with RCU watching
*/
-void irq_enter(void)
+void irq_enter_rcu(void)
{
- rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
* Prevent raise_softirq from needlessly waking up ksoftirqd
@@ -354,10 +353,18 @@ void irq_enter(void)
tick_irq_enter();
_local_bh_enable();
}
-
__irq_enter();
}
+/**
+ * irq_enter - Enter an interrupt context including RCU update
+ */
+void irq_enter(void)
+{
+ rcu_irq_enter();
+ irq_enter_rcu();
+}
+
static inline void invoke_softirq(void)
{
if (ksoftirqd_running(local_softirq_pending()))
@@ -397,10 +404,7 @@ static inline void tick_irq_exit(void)
#endif
}
-/*
- * Exit an interrupt context. Process softirqs if needed and possible:
- */
-void irq_exit(void)
+static inline void __irq_exit_rcu(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
local_irq_disable();
@@ -413,6 +417,28 @@ void irq_exit(void)
invoke_softirq();
tick_irq_exit();
+}
+
+/**
+ * irq_exit_rcu() - Exit an interrupt context without updating RCU
+ *
+ * Also processes softirqs if needed and possible.
+ */
+void irq_exit_rcu(void)
+{
+ __irq_exit_rcu();
+ /* must be last! */
+ lockdep_hardirq_exit();
+}
+
+/**
+ * irq_exit - Exit an interrupt context, update RCU and lockdep
+ *
+ * Also processes softirqs if needed and possible.
+ */
+void irq_exit(void)
+{
+ __irq_exit_rcu();
rcu_irq_exit();
/* must be last! */
lockdep_hardirq_exit();
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 9ebaab13339d..d20d489841c8 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -953,7 +953,7 @@ EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
* but without the sequence counter protect. This internal function
* is called just when timekeeping lock is already held.
*/
-time64_t __ktime_get_real_seconds(void)
+noinstr time64_t __ktime_get_real_seconds(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index fb0691b8a88d..f10073e62603 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -58,7 +58,7 @@ NOKPROBE_SYMBOL(trace_hardirqs_on);
* and lockdep uses a staged approach which splits the lockdep hardirq
* tracking into a RCU on and a RCU off section.
*/
-void trace_hardirqs_off_prepare(void)
+void trace_hardirqs_off_finish(void)
{
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
@@ -68,19 +68,19 @@ void trace_hardirqs_off_prepare(void)
}
}
-EXPORT_SYMBOL(trace_hardirqs_off_prepare);
-NOKPROBE_SYMBOL(trace_hardirqs_off_prepare);
+EXPORT_SYMBOL(trace_hardirqs_off_finish);
+NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
void trace_hardirqs_off(void)
{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
if (!in_nmi())
trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
}
-
- lockdep_hardirqs_off(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_off);
NOKPROBE_SYMBOL(trace_hardirqs_off);