diff options
author | Sean Christopherson <seanjc@google.com> | 2024-01-09 15:02:24 -0800 |
---|---|---|
committer | Sean Christopherson <seanjc@google.com> | 2024-01-30 15:28:02 -0800 |
commit | 61bb2ad795a72a7a75add3ffc257660fd6c7cfea (patch) | |
tree | c7ef7c4528016fcc84db54d06657a39d4983db26 /arch/x86/kvm/vmx/pmu_intel.c | |
parent | db9e008a0f37bd31ac3d77d650812612b7c873da (diff) |
KVM: x86/pmu: Setup fixed counters' eventsel during PMU initialization
Set the eventsel for all fixed counters during PMU initialization, the
eventsel is hardcoded and consumed if and only if the counter is supported,
i.e. there is no reason to redo the setup every time the PMU is refreshed.
Configuring all KVM-supported fixed counter also eliminates a potential
pitfall if/when KVM supports discontiguous fixed counters, in which case
configuring only nr_arch_fixed_counters will be insufficient (ignoring the
fact that KVM will need many other changes to support discontiguous fixed
counters).
Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20240109230250.424295-5-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/vmx/pmu_intel.c')
-rw-r--r-- | arch/x86/kvm/vmx/pmu_intel.c | 16 |
1 files changed, 5 insertions, 11 deletions
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index f3c44ddc09f8..98e92b9ece09 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -407,27 +407,21 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) * Note, reference cycles is counted using a perf-defined "psuedo-encoding", * as there is no architectural general purpose encoding for reference cycles. */ -static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu) +static u64 intel_get_fixed_pmc_eventsel(int index) { const struct { - u8 eventsel; + u8 event; u8 unit_mask; } fixed_pmc_events[] = { [0] = { 0xc0, 0x00 }, /* Instruction Retired / PERF_COUNT_HW_INSTRUCTIONS. */ [1] = { 0x3c, 0x00 }, /* CPU Cycles/ PERF_COUNT_HW_CPU_CYCLES. */ [2] = { 0x00, 0x03 }, /* Reference Cycles / PERF_COUNT_HW_REF_CPU_CYCLES*/ }; - int i; BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_events) != KVM_PMC_MAX_FIXED); - for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { - int index = array_index_nospec(i, KVM_PMC_MAX_FIXED); - struct kvm_pmc *pmc = &pmu->fixed_counters[index]; - - pmc->eventsel = (fixed_pmc_events[index].unit_mask << 8) | - fixed_pmc_events[index].eventsel; - } + return (fixed_pmc_events[index].unit_mask << 8) | + fixed_pmc_events[index].event; } static void intel_pmu_refresh(struct kvm_vcpu *vcpu) @@ -493,7 +487,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) kvm_pmu_cap.bit_width_fixed); pmu->counter_bitmask[KVM_PMC_FIXED] = ((u64)1 << edx.split.bit_width_fixed) - 1; - setup_fixed_pmc_eventsel(pmu); } for (i = 0; i < pmu->nr_arch_fixed_counters; i++) @@ -571,6 +564,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) pmu->fixed_counters[i].vcpu = vcpu; pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; pmu->fixed_counters[i].current_config = 0; + pmu->fixed_counters[i].eventsel = intel_get_fixed_pmc_eventsel(i); } lbr_desc->records.nr = 0; |