summaryrefslogtreecommitdiff
path: root/arch/x86/events
diff options
context:
space:
mode:
authorKan Liang <kan.liang@linux.intel.com>2024-06-26 07:35:38 -0700
committerPeter Zijlstra <peterz@infradead.org>2024-07-04 16:00:39 +0200
commite8fb5d6e765838e913253ef7c9b6fd8ec76c8d53 (patch)
tree4e1e0ad4b0b080baf0a449cf4ac86f95cf044ce0 /arch/x86/events
parent608f6976c309793ceea37292c54b057dab091944 (diff)
perf/x86: Add config_mask to represent EVENTSEL bitmask
Different vendors may support different fields in EVENTSEL MSR, such as Intel would introduce new fields umask2 and eq bits in EVENTSEL MSR since Perfmon version 6. However, a fixed mask X86_RAW_EVENT_MASK is used to filter the attr.config. Introduce a new config_mask to record the real supported EVENTSEL bitmask. Only apply it to the existing code now. No functional change. Co-developed-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Ian Rogers <irogers@google.com> Link: https://lkml.kernel.org/r/20240626143545.480761-7-kan.liang@linux.intel.com
Diffstat (limited to 'arch/x86/events')
-rw-r--r--arch/x86/events/core.c5
-rw-r--r--arch/x86/events/intel/core.c1
-rw-r--r--arch/x86/events/perf_event.h7
3 files changed, 12 insertions, 1 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 848dbe9cbd0e..8ea1c988e19b 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -624,7 +624,7 @@ int x86_pmu_hw_config(struct perf_event *event)
event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
if (event->attr.type == event->pmu->type)
- event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
+ event->hw.config |= x86_pmu_get_event_config(event);
if (event->attr.sample_period && x86_pmu.limit_period) {
s64 left = event->attr.sample_period;
@@ -2098,6 +2098,9 @@ static int __init init_hw_perf_events(void)
if (!x86_pmu.intel_ctrl)
x86_pmu.intel_ctrl = x86_pmu.cntr_mask64;
+ if (!x86_pmu.config_mask)
+ x86_pmu.config_mask = X86_RAW_EVENT_MASK;
+
perf_events_lapic_init();
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 6a6f1f4b9205..6e42ba0e6b72 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -6144,6 +6144,7 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
pmu->cntr_mask64 = x86_pmu.cntr_mask64;
pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
+ pmu->config_mask = X86_RAW_EVENT_MASK;
pmu->unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, pmu->cntr_mask64,
0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 493bc9f70ffe..55468ea89d23 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -695,6 +695,7 @@ struct x86_hybrid_pmu {
union perf_capabilities intel_cap;
u64 intel_ctrl;
u64 pebs_events_mask;
+ u64 config_mask;
union {
u64 cntr_mask64;
unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
@@ -790,6 +791,7 @@ struct x86_pmu {
int (*rdpmc_index)(int index);
u64 (*event_map)(int);
int max_events;
+ u64 config_mask;
union {
u64 cntr_mask64;
unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
@@ -1241,6 +1243,11 @@ static inline int x86_pmu_max_num_counters_fixed(struct pmu *pmu)
return fls64(hybrid(pmu, fixed_cntr_mask64));
}
+static inline u64 x86_pmu_get_event_config(struct perf_event *event)
+{
+ return event->attr.config & hybrid(event->pmu, config_mask);
+}
+
extern struct event_constraint emptyconstraint;
extern struct event_constraint unconstrained;