summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-15 15:51:28 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-15 15:51:28 -0700
commitfefa636d815975b34afc45f50852a2810fb23ba9 (patch)
tree5a48fc557bfc2237fbf6dae3a2c929b991315f87 /arch
parent2d0f6b0aab9afbd6fdf3514cb4acc249d7aebf9c (diff)
parent6107742d15832011cd0396d821f3225b52551f1f (diff)
Merge tag 'trace-v5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "Updates for tracing and bootconfig: - Add support for "bool" type in synthetic events - Add per instance tracing for bootconfig - Support perf-style return probe ("SYMBOL%return") in kprobes and uprobes - Allow for kprobes to be enabled earlier in boot up - Added tracepoint helper function to allow testing if tracepoints are enabled in headers - Synthetic events can now have dynamic strings (variable length) - Various fixes and cleanups" * tag 'trace-v5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (58 commits) tracing: support "bool" type in synthetic trace events selftests/ftrace: Add test case for synthetic event syntax errors tracing: Handle synthetic event array field type checking correctly selftests/ftrace: Change synthetic event name for inter-event-combined test tracing: Add synthetic event error logging tracing: Check that the synthetic event and field names are legal tracing: Move is_good_name() from trace_probe.h to trace.h tracing: Don't show dynamic string internals in synthetic event description tracing: Fix some typos in comments tracing/boot: Add ftrace.instance.*.alloc_snapshot option tracing: Fix race in trace_open and buffer resize call tracing: Check return value of __create_val_fields() before using its result tracing: Fix synthetic print fmt check for use of __get_str() tracing: Remove a pointless assignment ftrace: ftrace_global_list is renamed to ftrace_ops_list ftrace: Format variable declarations of ftrace_allocate_records ftrace: Simplify the calculation of page number for ftrace_page->records ftrace: Simplify the dyn_ftrace->flags macro ftrace: Simplify the hash calculation ftrace: Use fls() to get the bits for dup_hash() ...
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/msr.h20
1 files changed, 9 insertions, 11 deletions
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 86f20d520a07..0b4920a7238e 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -60,22 +60,20 @@ struct saved_msrs {
#define EAX_EDX_RET(val, low, high) "=A" (val)
#endif
-#ifdef CONFIG_TRACEPOINTS
/*
* Be very careful with includes. This header is prone to include loops.
*/
#include <asm/atomic.h>
#include <linux/tracepoint-defs.h>
-extern struct tracepoint __tracepoint_read_msr;
-extern struct tracepoint __tracepoint_write_msr;
-extern struct tracepoint __tracepoint_rdpmc;
-#define msr_tracepoint_active(t) static_key_false(&(t).key)
+#ifdef CONFIG_TRACEPOINTS
+DECLARE_TRACEPOINT(read_msr);
+DECLARE_TRACEPOINT(write_msr);
+DECLARE_TRACEPOINT(rdpmc);
extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
#else
-#define msr_tracepoint_active(t) false
static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
@@ -128,7 +126,7 @@ static inline unsigned long long native_read_msr(unsigned int msr)
val = __rdmsr(msr);
- if (msr_tracepoint_active(__tracepoint_read_msr))
+ if (tracepoint_enabled(read_msr))
do_trace_read_msr(msr, val, 0);
return val;
@@ -150,7 +148,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
_ASM_EXTABLE(2b, 3b)
: [err] "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), [fault] "i" (-EIO));
- if (msr_tracepoint_active(__tracepoint_read_msr))
+ if (tracepoint_enabled(read_msr))
do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
return EAX_EDX_VAL(val, low, high);
}
@@ -161,7 +159,7 @@ native_write_msr(unsigned int msr, u32 low, u32 high)
{
__wrmsr(msr, low, high);
- if (msr_tracepoint_active(__tracepoint_write_msr))
+ if (tracepoint_enabled(write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
}
@@ -181,7 +179,7 @@ native_write_msr_safe(unsigned int msr, u32 low, u32 high)
: "c" (msr), "0" (low), "d" (high),
[fault] "i" (-EIO)
: "memory");
- if (msr_tracepoint_active(__tracepoint_write_msr))
+ if (tracepoint_enabled(write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), err);
return err;
}
@@ -248,7 +246,7 @@ static inline unsigned long long native_read_pmc(int counter)
DECLARE_ARGS(val, low, high);
asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
- if (msr_tracepoint_active(__tracepoint_rdpmc))
+ if (tracepoint_enabled(rdpmc))
do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
return EAX_EDX_VAL(val, low, high);
}