summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2021-08-20 12:23:53 +0100
committerMarc Zyngier <maz@kernel.org>2021-08-20 12:23:53 +0100
commit7c7b363d62a51eea987ba63fc916ff8d6f89df0e (patch)
treebabe5d9c3ab0cffe0bace0336f5b8301c7863384
parentdeb151a58210d8dfb1a7cc1f14fec35b520c38a6 (diff)
parent411d63d8c64c2f3b0c497fe4658f13b3bca951e2 (diff)
Merge branch kvm-arm64/pkvm-fixed-features-prologue into kvmarm-master/next
* kvm-arm64/pkvm-fixed-features-prologue: : Rework a bunch of common infrastructure as a prologue : to Fuad Tabba's protected VM fixed feature series. KVM: arm64: Upgrade trace_kvm_arm_set_dreg32() to 64bit KVM: arm64: Add config register bit definitions KVM: arm64: Add feature register flag definitions KVM: arm64: Track value of cptr_el2 in struct kvm_vcpu_arch KVM: arm64: Keep mdcr_el2's value as set by __init_el2_debug KVM: arm64: Restore mdcr_el2 from vcpu KVM: arm64: Refactor sys_regs.h,c for nVHE reuse KVM: arm64: Fix names of config register fields KVM: arm64: MDCR_EL2 is a 64-bit register KVM: arm64: Remove trailing whitespace in comment KVM: arm64: placeholder to check if VM is protected Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/include/asm/cpufeature.h4
-rw-r--r--arch/arm64/include/asm/kvm_arm.h54
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h13
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h16
-rw-r--r--arch/arm64/kernel/cpufeature.c8
-rw-r--r--arch/arm64/kvm/arm.c1
-rw-r--r--arch/arm64/kvm/debug.c2
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h6
-rw-r--r--arch/arm64/kvm/hyp/nvhe/debug-sr.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c13
-rw-r--r--arch/arm64/kvm/hyp/vhe/debug-sr.c2
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c12
-rw-r--r--arch/arm64/kvm/hyp/vhe/sysreg-sr.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c64
-rw-r--r--arch/arm64/kvm/sys_regs.h31
-rw-r--r--arch/arm64/kvm/trace_handle_exit.h10
18 files changed, 140 insertions, 104 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index c4dabba9cfdf..20e0517eb669 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -602,14 +602,14 @@ static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT);
- return val == ID_AA64PFR0_EL1_32BIT_64BIT;
+ return val == ID_AA64PFR0_ELx_32BIT_64BIT;
}
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
- return val == ID_AA64PFR0_EL0_32BIT_64BIT;
+ return val == ID_AA64PFR0_ELx_32BIT_64BIT;
}
static inline bool id_aa64pfr0_sve(u64 pfr0)
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index d436831dd706..327120c0089f 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -12,8 +12,13 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
+
+#define HCR_TID5 (UL(1) << 58)
+#define HCR_DCT (UL(1) << 57)
#define HCR_ATA_SHIFT 56
#define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
+#define HCR_AMVOFFEN (UL(1) << 51)
+#define HCR_FIEN (UL(1) << 47)
#define HCR_FWB (UL(1) << 46)
#define HCR_API (UL(1) << 41)
#define HCR_APK (UL(1) << 40)
@@ -32,9 +37,9 @@
#define HCR_TVM (UL(1) << 26)
#define HCR_TTLB (UL(1) << 25)
#define HCR_TPU (UL(1) << 24)
-#define HCR_TPC (UL(1) << 23)
+#define HCR_TPC (UL(1) << 23) /* HCR_TPCP if FEAT_DPB */
#define HCR_TSW (UL(1) << 22)
-#define HCR_TAC (UL(1) << 21)
+#define HCR_TACR (UL(1) << 21)
#define HCR_TIDCP (UL(1) << 20)
#define HCR_TSC (UL(1) << 19)
#define HCR_TID3 (UL(1) << 18)
@@ -56,12 +61,13 @@
#define HCR_PTW (UL(1) << 2)
#define HCR_SWIO (UL(1) << 1)
#define HCR_VM (UL(1) << 0)
+#define HCR_RES0 ((UL(1) << 48) | (UL(1) << 39))
/*
* The bits we set in HCR:
* TLOR: Trap LORegion register accesses
* RW: 64bit by default, can be overridden for 32bit VMs
- * TAC: Trap ACTLR
+ * TACR: Trap ACTLR
* TSC: Trap SMC
* TSW: Trap cache operations by set/way
* TWE: Trap WFE
@@ -76,7 +82,7 @@
* PTW: Take a stage2 fault if a stage1 walk steps in device memory
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
- HCR_BSU_IS | HCR_FB | HCR_TAC | \
+ HCR_BSU_IS | HCR_FB | HCR_TACR | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
HCR_FMO | HCR_IMO | HCR_PTW )
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
@@ -275,24 +281,40 @@
#define CPTR_EL2_TTA (1 << 20)
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
#define CPTR_EL2_TZ (1 << 8)
-#define CPTR_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 */
-#define CPTR_EL2_DEFAULT CPTR_EL2_RES1
+#define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
+#define CPTR_EL2_DEFAULT CPTR_NVHE_EL2_RES1
+#define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \
+ GENMASK(29, 21) | \
+ GENMASK(19, 14) | \
+ BIT(11))
/* Hyp Debug Configuration Register bits */
#define MDCR_EL2_E2TB_MASK (UL(0x3))
#define MDCR_EL2_E2TB_SHIFT (UL(24))
-#define MDCR_EL2_TTRF (1 << 19)
-#define MDCR_EL2_TPMS (1 << 14)
+#define MDCR_EL2_HPMFZS (UL(1) << 36)
+#define MDCR_EL2_HPMFZO (UL(1) << 29)
+#define MDCR_EL2_MTPME (UL(1) << 28)
+#define MDCR_EL2_TDCC (UL(1) << 27)
+#define MDCR_EL2_HCCD (UL(1) << 23)
+#define MDCR_EL2_TTRF (UL(1) << 19)
+#define MDCR_EL2_HPMD (UL(1) << 17)
+#define MDCR_EL2_TPMS (UL(1) << 14)
#define MDCR_EL2_E2PB_MASK (UL(0x3))
#define MDCR_EL2_E2PB_SHIFT (UL(12))
-#define MDCR_EL2_TDRA (1 << 11)
-#define MDCR_EL2_TDOSA (1 << 10)
-#define MDCR_EL2_TDA (1 << 9)
-#define MDCR_EL2_TDE (1 << 8)
-#define MDCR_EL2_HPME (1 << 7)
-#define MDCR_EL2_TPM (1 << 6)
-#define MDCR_EL2_TPMCR (1 << 5)
-#define MDCR_EL2_HPMN_MASK (0x1F)
+#define MDCR_EL2_TDRA (UL(1) << 11)
+#define MDCR_EL2_TDOSA (UL(1) << 10)
+#define MDCR_EL2_TDA (UL(1) << 9)
+#define MDCR_EL2_TDE (UL(1) << 8)
+#define MDCR_EL2_HPME (UL(1) << 7)
+#define MDCR_EL2_TPM (UL(1) << 6)
+#define MDCR_EL2_TPMCR (UL(1) << 5)
+#define MDCR_EL2_HPMN_MASK (UL(0x1F))
+#define MDCR_EL2_RES0 (GENMASK(63, 37) | \
+ GENMASK(35, 30) | \
+ GENMASK(25, 24) | \
+ GENMASK(22, 20) | \
+ BIT(18) | \
+ GENMASK(16, 15))
/* For compatibility with fault code shared with 32-bit */
#define FSC_FAULT ESR_ELx_FSC_FAULT
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index aed2aa61766a..e86045ac43ba 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -209,7 +209,7 @@ extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
extern void __vgic_v3_init_lrs(void);
-extern u32 __kvm_get_mdcr_el2(void);
+extern u64 __kvm_get_mdcr_el2(void);
#define __KVM_EXTABLE(from, to) \
" .pushsection __kvm_ex_table, \"a\"\n" \
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index bdab1754c71f..f8be56d5342b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -286,9 +286,13 @@ struct kvm_vcpu_arch {
/* Stage 2 paging state used by the hardware on next switch */
struct kvm_s2_mmu *hw_mmu;
- /* HYP configuration */
+ /* Values of trap registers for the guest. */
u64 hcr_el2;
- u32 mdcr_el2;
+ u64 mdcr_el2;
+ u64 cptr_el2;
+
+ /* Values of trap registers for the host before guest entry. */
+ u64 mdcr_el2_host;
/* Exception Information */
struct kvm_vcpu_fault_info fault;
@@ -771,6 +775,11 @@ void kvm_arch_free_vm(struct kvm *kvm);
int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
+static inline bool kvm_vm_is_protected(struct kvm *kvm)
+{
+ return false;
+}
+
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 9d60b3006efc..657d0c94cf82 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -95,7 +95,7 @@ void __sve_restore_state(void *sve_pffr, u32 *fpsr);
#ifndef __KVM_NVHE_HYPERVISOR__
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
-void deactivate_traps_vhe_put(void);
+void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
#endif
u64 __guest_enter(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 2084b22613e2..7640fa27be94 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -784,14 +784,13 @@
#define ID_AA64PFR0_AMU 0x1
#define ID_AA64PFR0_SVE 0x1
#define ID_AA64PFR0_RAS_V1 0x1
+#define ID_AA64PFR0_RAS_V1P1 0x2
#define ID_AA64PFR0_FP_NI 0xf
#define ID_AA64PFR0_FP_SUPPORTED 0x0
#define ID_AA64PFR0_ASIMD_NI 0xf
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
-#define ID_AA64PFR0_EL1_64BIT_ONLY 0x1
-#define ID_AA64PFR0_EL1_32BIT_64BIT 0x2
-#define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
-#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
+#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1
+#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2
/* id_aa64pfr1 */
#define ID_AA64PFR1_MPAMFRAC_SHIFT 16
@@ -847,6 +846,9 @@
#define ID_AA64MMFR0_ASID_SHIFT 4
#define ID_AA64MMFR0_PARANGE_SHIFT 0
+#define ID_AA64MMFR0_ASID_8 0x0
+#define ID_AA64MMFR0_ASID_16 0x2
+
#define ID_AA64MMFR0_TGRAN4_NI 0xf
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7
@@ -911,6 +913,7 @@
#define ID_AA64MMFR2_CNP_SHIFT 0
/* id_aa64dfr0 */
+#define ID_AA64DFR0_MTPMU_SHIFT 48
#define ID_AA64DFR0_TRBE_SHIFT 44
#define ID_AA64DFR0_TRACE_FILT_SHIFT 40
#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
@@ -1167,6 +1170,11 @@
#define ICH_VTR_A3V_SHIFT 21
#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
+#define ARM64_FEATURE_FIELD_BITS 4
+
+/* Create a mask for the feature bits of the specified feature. */
+#define ARM64_FEATURE_MASK(x) (GENMASK_ULL(x##_SHIFT + ARM64_FEATURE_FIELD_BITS - 1, x##_SHIFT))
+
#ifdef __ASSEMBLY__
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 0ead8bfedf20..5b59fe5e26e4 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -239,8 +239,8 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY),
ARM64_FTR_END,
};
@@ -1956,7 +1956,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_EL0_SHIFT,
- .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
+ .min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT,
},
#ifdef CONFIG_KVM
{
@@ -1967,7 +1967,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_EL1_SHIFT,
- .min_field_value = ID_AA64PFR0_EL1_32BIT_64BIT,
+ .min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT,
},
{
.desc = "Protected KVM",
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 9ebb99e22ab2..fe102cd2e518 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1122,6 +1122,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
}
vcpu_reset_hcr(vcpu);
+ vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT;
/*
* Handle the "start in power-off" case.
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index d5e79d7ee6e9..db9361338b2a 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -21,7 +21,7 @@
DBG_MDSCR_KDE | \
DBG_MDSCR_MDE)
-static DEFINE_PER_CPU(u32, mdcr_el2);
+static DEFINE_PER_CPU(u64, mdcr_el2);
/**
* save/restore_guest_debug_regs
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index e4a2f295a394..a0e78a6027be 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -92,11 +92,15 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(0, pmselr_el0);
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
}
+
+ vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
}
-static inline void __deactivate_traps_common(void)
+static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
{
+ write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2);
+
write_sysreg(0, hstr_el2);
if (kvm_arm_support_pmu_v3())
write_sysreg(0, pmuserenr_el0);
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index 7d3f25868cae..df361d839902 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -109,7 +109,7 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu)
__debug_switch_to_host_common(vcpu);
}
-u32 __kvm_get_mdcr_el2(void)
+u64 __kvm_get_mdcr_el2(void)
{
return read_sysreg(mdcr_el2);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 3e7ad32b3f0d..a34b01cc8ab9 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -41,7 +41,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
___activate_traps(vcpu);
__activate_traps_common(vcpu);
- val = CPTR_EL2_DEFAULT;
+ val = vcpu->arch.cptr_el2;
val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
if (!update_fp_enabled(vcpu)) {
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
@@ -69,12 +69,10 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
static void __deactivate_traps(struct kvm_vcpu *vcpu)
{
extern char __kvm_hyp_host_vector[];
- u64 mdcr_el2, cptr;
+ u64 cptr;
___deactivate_traps(vcpu);
- mdcr_el2 = read_sysreg(mdcr_el2);
-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val;
@@ -92,13 +90,8 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
isb();
}
- __deactivate_traps_common();
-
- mdcr_el2 &= MDCR_EL2_HPMN_MASK;
- mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
- mdcr_el2 |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
+ __deactivate_traps_common(vcpu);
- write_sysreg(mdcr_el2, mdcr_el2);
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
cptr = CPTR_EL2_DEFAULT;
diff --git a/arch/arm64/kvm/hyp/vhe/debug-sr.c b/arch/arm64/kvm/hyp/vhe/debug-sr.c
index f1e2e5a00933..289689b2682d 100644
--- a/arch/arm64/kvm/hyp/vhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/debug-sr.c
@@ -20,7 +20,7 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu)
__debug_switch_to_host_common(vcpu);
}
-u32 __kvm_get_mdcr_el2(void)
+u64 __kvm_get_mdcr_el2(void)
{
return read_sysreg(mdcr_el2);
}
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 709f6438283e..ded2c66675f0 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -91,17 +91,9 @@ void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
__activate_traps_common(vcpu);
}
-void deactivate_traps_vhe_put(void)
+void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu)
{
- u64 mdcr_el2 = read_sysreg(mdcr_el2);
-
- mdcr_el2 &= MDCR_EL2_HPMN_MASK |
- MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
- MDCR_EL2_TPMS;
-
- write_sysreg(mdcr_el2, mdcr_el2);
-
- __deactivate_traps_common();
+ __deactivate_traps_common(vcpu);
}
/* Switch to the guest for VHE systems running in EL2 */
diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
index 2a0b8c88d74f..007a12dd4351 100644
--- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
@@ -101,7 +101,7 @@ void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *host_ctxt;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
- deactivate_traps_vhe_put();
+ deactivate_traps_vhe_put(vcpu);
__sysreg_save_el1_state(guest_ctxt);
__sysreg_save_user_state(guest_ctxt);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index a1f5101f49a3..1d46e185f31e 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -44,10 +44,6 @@
* 64bit interface.
*/
-#define reg_to_encoding(x) \
- sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
- (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
-
static bool read_from_write_only(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *r)
@@ -318,14 +314,14 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
/*
* We want to avoid world-switching all the DBG registers all the
* time:
- *
+ *
* - If we've touched any debug register, it is likely that we're
* going to touch more of them. It then makes sense to disable the
* traps and start doing the save/restore dance
* - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
* then mandatory to save/restore the registers, as the guest
* depends on them.
- *
+ *
* For this, we use a DIRTY bit, indicating the guest has modified the
* debug registers, used as follow:
*
@@ -1063,8 +1059,6 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
return true;
}
-#define FEATURE(x) (GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT))
-
/* Read a sanitised cpufeature ID register by sys_reg_desc */
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
struct sys_reg_desc const *r, bool raz)
@@ -1075,40 +1069,40 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
switch (id) {
case SYS_ID_AA64PFR0_EL1:
if (!vcpu_has_sve(vcpu))
- val &= ~FEATURE(ID_AA64PFR0_SVE);
- val &= ~FEATURE(ID_AA64PFR0_AMU);
- val &= ~FEATURE(ID_AA64PFR0_CSV2);
- val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
- val &= ~FEATURE(ID_AA64PFR0_CSV3);
- val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU);
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2);
+ val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
+ val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
break;
case SYS_ID_AA64PFR1_EL1:
- val &= ~FEATURE(ID_AA64PFR1_MTE);
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
if (kvm_has_mte(vcpu->kvm)) {
u64 pfr, mte;
pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
mte = cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR1_MTE_SHIFT);
- val |= FIELD_PREP(FEATURE(ID_AA64PFR1_MTE), mte);
+ val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), mte);
}
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
- val &= ~(FEATURE(ID_AA64ISAR1_APA) |
- FEATURE(ID_AA64ISAR1_API) |
- FEATURE(ID_AA64ISAR1_GPA) |
- FEATURE(ID_AA64ISAR1_GPI));
+ val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_API) |
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI));
break;
case SYS_ID_AA64DFR0_EL1:
/* Limit debug to ARMv8.0 */
- val &= ~FEATURE(ID_AA64DFR0_DEBUGVER);
- val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6);
+ val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
+ val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6);
/* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val,
ID_AA64DFR0_PMUVER_SHIFT,
kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
/* Hide SPE from guests */
- val &= ~FEATURE(ID_AA64DFR0_PMSVER);
+ val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER);
break;
case SYS_ID_DFR0_EL1:
/* Limit guests to PMUv3 for ARMv8.4 */
@@ -2162,23 +2156,6 @@ static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
return 0;
}
-static int match_sys_reg(const void *key, const void *elt)
-{
- const unsigned long pval = (unsigned long)key;
- const struct sys_reg_desc *r = elt;
-
- return pval - reg_to_encoding(r);
-}
-
-static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
- const struct sys_reg_desc table[],
- unsigned int num)
-{
- unsigned long pval = reg_to_encoding(params);
-
- return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
-}
-
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
{
kvm_inject_undefined(vcpu);
@@ -2421,13 +2398,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
trace_kvm_handle_sys_reg(esr);
- params.Op0 = (esr >> 20) & 3;
- params.Op1 = (esr >> 14) & 0x7;
- params.CRn = (esr >> 10) & 0xf;
- params.CRm = (esr >> 1) & 0xf;
- params.Op2 = (esr >> 17) & 0x7;
+ params = esr_sys64_to_params(esr);
params.regval = vcpu_get_reg(vcpu, Rt);
- params.is_write = !(esr & 1);
ret = emulate_sys_reg(vcpu, &params);
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index 9d0621417c2a..cc0cc95a0280 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -11,6 +11,12 @@
#ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__
#define __ARM64_KVM_SYS_REGS_LOCAL_H__
+#include <linux/bsearch.h>
+
+#define reg_to_encoding(x) \
+ sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
+ (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
+
struct sys_reg_params {
u8 Op0;
u8 Op1;
@@ -21,6 +27,14 @@ struct sys_reg_params {
bool is_write;
};
+#define esr_sys64_to_params(esr) \
+ ((struct sys_reg_params){ .Op0 = ((esr) >> 20) & 3, \
+ .Op1 = ((esr) >> 14) & 0x7, \
+ .CRn = ((esr) >> 10) & 0xf, \
+ .CRm = ((esr) >> 1) & 0xf, \
+ .Op2 = ((esr) >> 17) & 0x7, \
+ .is_write = !((esr) & 1) })
+
struct sys_reg_desc {
/* Sysreg string for debug */
const char *name;
@@ -152,6 +166,23 @@ static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
return i1->Op2 - i2->Op2;
}
+static inline int match_sys_reg(const void *key, const void *elt)
+{
+ const unsigned long pval = (unsigned long)key;
+ const struct sys_reg_desc *r = elt;
+
+ return pval - reg_to_encoding(r);
+}
+
+static inline const struct sys_reg_desc *
+find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[],
+ unsigned int num)
+{
+ unsigned long pval = reg_to_encoding(params);
+
+ return __inline_bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
+}
+
const struct sys_reg_desc *find_reg_by_id(u64 id,
struct sys_reg_params *params,
const struct sys_reg_desc table[],
diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h
index 8d78acc4fba7..064a58c19f48 100644
--- a/arch/arm64/kvm/trace_handle_exit.h
+++ b/arch/arm64/kvm/trace_handle_exit.h
@@ -78,13 +78,17 @@ TRACE_EVENT(kvm_arm_clear_debug,
TP_printk("flags: 0x%08x", __entry->guest_debug)
);
+/*
+ * The dreg32 name is a leftover from a distant past. This will really
+ * output a 64bit value...
+ */
TRACE_EVENT(kvm_arm_set_dreg32,
- TP_PROTO(const char *name, __u32 value),
+ TP_PROTO(const char *name, __u64 value),
TP_ARGS(name, value),
TP_STRUCT__entry(
__field(const char *, name)
- __field(__u32, value)
+ __field(__u64, value)
),
TP_fast_assign(
@@ -92,7 +96,7 @@ TRACE_EVENT(kvm_arm_set_dreg32,
__entry->value = value;
),
- TP_printk("%s: 0x%08x", __entry->name, __entry->value)
+ TP_printk("%s: 0x%llx", __entry->name, __entry->value)
);
TRACE_DEFINE_SIZEOF(__u64);