summaryrefslogtreecommitdiff
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2023-11-09 18:28:52 -0800
committerSean Christopherson <seanjc@google.com>2024-02-01 09:35:48 -0800
commite5a65d4f723ab9997deab798c539e6bfd71f8440 (patch)
treec8ad383b1b4c18509af87f9aea4e66133a372067 /arch/x86/kvm
parent004a0aa56edea3effc97bc90a620da1bcde5c63c (diff)
KVM: x86/pmu: Add macros to iterate over all PMCs given a bitmap
Add and use kvm_for_each_pmc() to dedup a variety of open coded for-loops that iterate over valid PMCs given a bitmap (and because seeing checkpatch whine about bad macro style is always amusing). No functional change intended. Link: https://lore.kernel.org/r/20231110022857.1273836-6-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/pmu.c26
-rw-r--r--arch/x86/kvm/pmu.h6
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c7
3 files changed, 15 insertions, 24 deletions
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 81a0c6719863..7b5563ff259c 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -493,6 +493,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
{
DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct kvm_pmc *pmc;
int bit;
bitmap_copy(bitmap, pmu->reprogram_pmi, X86_PMC_IDX_MAX);
@@ -505,12 +506,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(sizeof(bitmap) != sizeof(atomic64_t));
atomic64_andnot(*(s64 *)bitmap, &pmu->__reprogram_pmi);
- for_each_set_bit(bit, bitmap, X86_PMC_IDX_MAX) {
- struct kvm_pmc *pmc = kvm_pmc_idx_to_pmc(pmu, bit);
-
- if (unlikely(!pmc))
- continue;
-
+ kvm_for_each_pmc(pmu, pmc, bit, bitmap) {
/*
* If reprogramming fails, e.g. due to contention, re-set the
* regprogram bit set, i.e. opportunistically try again on the
@@ -730,11 +726,7 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
- for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
- pmc = kvm_pmc_idx_to_pmc(pmu, i);
- if (!pmc)
- continue;
-
+ kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) {
pmc_stop_counter(pmc);
pmc->counter = 0;
pmc->emulated_counter = 0;
@@ -806,10 +798,8 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
pmu->pmc_in_use, X86_PMC_IDX_MAX);
- for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
- pmc = kvm_pmc_idx_to_pmc(pmu, i);
-
- if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
+ kvm_for_each_pmc(pmu, pmc, i, bitmask) {
+ if (pmc->perf_event && !pmc_speculative_in_use(pmc))
pmc_stop_counter(pmc);
}
@@ -861,10 +851,8 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
struct kvm_pmc *pmc;
int i;
- for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
- pmc = kvm_pmc_idx_to_pmc(pmu, i);
-
- if (!pmc || !pmc_event_is_allowed(pmc))
+ kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) {
+ if (!pmc_event_is_allowed(pmc))
continue;
/* Ignore checks for edge detect, pin control, invert and CMASK bits */
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 56e8e665e1af..fd18bc0b281c 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -83,6 +83,12 @@ static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx)
return NULL;
}
+#define kvm_for_each_pmc(pmu, pmc, i, bitmap) \
+ for_each_set_bit(i, bitmap, X86_PMC_IDX_MAX) \
+ if (!(pmc = kvm_pmc_idx_to_pmc(pmu, i))) \
+ continue; \
+ else \
+
static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 845a964f22a6..73b9943ceb17 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -704,11 +704,8 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
struct kvm_pmc *pmc = NULL;
int bit, hw_idx;
- for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl,
- X86_PMC_IDX_MAX) {
- pmc = kvm_pmc_idx_to_pmc(pmu, bit);
-
- if (!pmc || !pmc_speculative_in_use(pmc) ||
+ kvm_for_each_pmc(pmu, pmc, bit, (unsigned long *)&pmu->global_ctrl) {
+ if (!pmc_speculative_in_use(pmc) ||
!pmc_is_globally_enabled(pmc) || !pmc->perf_event)
continue;