diff options
Diffstat (limited to 'tools/testing/selftests/kvm/include/x86_64/processor.h')
-rw-r--r-- | tools/testing/selftests/kvm/include/x86_64/processor.h | 124 |
1 files changed, 108 insertions, 16 deletions
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 90387ddcb2a9..aa434c8f19c5 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -48,6 +48,35 @@ extern bool host_cpu_is_amd; #define X86_CR4_SMAP (1ul << 21) #define X86_CR4_PKE (1ul << 22) +struct xstate_header { + u64 xstate_bv; + u64 xcomp_bv; + u64 reserved[6]; +} __attribute__((packed)); + +struct xstate { + u8 i387[512]; + struct xstate_header header; + u8 extended_state_area[0]; +} __attribute__ ((packed, aligned (64))); + +#define XFEATURE_MASK_FP BIT_ULL(0) +#define XFEATURE_MASK_SSE BIT_ULL(1) +#define XFEATURE_MASK_YMM BIT_ULL(2) +#define XFEATURE_MASK_BNDREGS BIT_ULL(3) +#define XFEATURE_MASK_BNDCSR BIT_ULL(4) +#define XFEATURE_MASK_OPMASK BIT_ULL(5) +#define XFEATURE_MASK_ZMM_Hi256 BIT_ULL(6) +#define XFEATURE_MASK_Hi16_ZMM BIT_ULL(7) +#define XFEATURE_MASK_XTILE_CFG BIT_ULL(17) +#define XFEATURE_MASK_XTILE_DATA BIT_ULL(18) + +#define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK | \ + XFEATURE_MASK_ZMM_Hi256 | \ + XFEATURE_MASK_Hi16_ZMM) +#define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILE_DATA | \ + XFEATURE_MASK_XTILE_CFG) + /* Note, these are ordered alphabetically to match kvm_cpuid_entry2. Eww. */ enum cpuid_output_regs { KVM_CPUID_EAX, @@ -131,6 +160,7 @@ struct kvm_x86_cpu_feature { #define X86_FEATURE_XTILEDATA KVM_X86_CPU_FEATURE(0xD, 0, EAX, 18) #define X86_FEATURE_XSAVES KVM_X86_CPU_FEATURE(0xD, 1, EAX, 3) #define X86_FEATURE_XFD KVM_X86_CPU_FEATURE(0xD, 1, EAX, 4) +#define X86_FEATURE_XTILEDATA_XFD KVM_X86_CPU_FEATURE(0xD, 18, ECX, 2) /* * Extended Leafs, a.k.a. AMD defined @@ -211,10 +241,14 @@ struct kvm_x86_cpu_property { #define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15) #define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31) +#define X86_PROPERTY_SUPPORTED_XCR0_LO KVM_X86_CPU_PROPERTY(0xd, 0, EAX, 0, 31) #define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31) #define X86_PROPERTY_XSTATE_MAX_SIZE KVM_X86_CPU_PROPERTY(0xd, 0, ECX, 0, 31) +#define X86_PROPERTY_SUPPORTED_XCR0_HI KVM_X86_CPU_PROPERTY(0xd, 0, EDX, 0, 31) + #define X86_PROPERTY_XSTATE_TILE_SIZE KVM_X86_CPU_PROPERTY(0xd, 18, EAX, 0, 31) #define X86_PROPERTY_XSTATE_TILE_OFFSET KVM_X86_CPU_PROPERTY(0xd, 18, EBX, 0, 31) +#define X86_PROPERTY_AMX_MAX_PALETTE_TABLES KVM_X86_CPU_PROPERTY(0x1d, 0, EAX, 0, 31) #define X86_PROPERTY_AMX_TOTAL_TILE_BYTES KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 0, 15) #define X86_PROPERTY_AMX_BYTES_PER_TILE KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 16, 31) #define X86_PROPERTY_AMX_BYTES_PER_ROW KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 0, 15) @@ -496,6 +530,24 @@ static inline void set_cr4(uint64_t val) __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory"); } +static inline u64 xgetbv(u32 index) +{ + u32 eax, edx; + + __asm__ __volatile__("xgetbv;" + : "=a" (eax), "=d" (edx) + : "c" (index)); + return eax | ((u64)edx << 32); +} + +static inline void xsetbv(u32 index, u64 value) +{ + u32 eax = value; + u32 edx = value >> 32; + + __asm__ __volatile__("xsetbv" :: "a" (eax), "d" (edx), "c" (index)); +} + static inline struct desc_ptr get_gdt(void) { struct desc_ptr gdt; @@ -632,6 +684,15 @@ static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature) !this_cpu_has(feature.anti_feature); } +static __always_inline uint64_t this_cpu_supported_xcr0(void) +{ + if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO)) + return 0; + + return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) | + ((uint64_t)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); +} + typedef u32 __attribute__((vector_size(16))) sse128_t; #define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; } #define sse128_lo(x) ({ __sse128_u t; t.vec = x; t.as_u64[0]; }) @@ -928,14 +989,45 @@ static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu, uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index); int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value); -static inline void vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, - uint64_t msr_value) -{ - int r = _vcpu_set_msr(vcpu, msr_index, msr_value); +/* + * Assert on an MSR access(es) and pretty print the MSR name when possible. + * Note, the caller provides the stringified name so that the name of macro is + * printed, not the value the macro resolves to (due to macro expansion). + */ +#define TEST_ASSERT_MSR(cond, fmt, msr, str, args...) \ +do { \ + if (__builtin_constant_p(msr)) { \ + TEST_ASSERT(cond, fmt, str, args); \ + } else if (!(cond)) { \ + char buf[16]; \ + \ + snprintf(buf, sizeof(buf), "MSR 0x%x", msr); \ + TEST_ASSERT(cond, fmt, buf, args); \ + } \ +} while (0) - TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_SET_MSRS, r)); +/* + * Returns true if KVM should return the last written value when reading an MSR + * from userspace, e.g. the MSR isn't a command MSR, doesn't emulate state that + * is changing, etc. This is NOT an exhaustive list! The intent is to filter + * out MSRs that are not durable _and_ that a selftest wants to write. + */ +static inline bool is_durable_msr(uint32_t msr) +{ + return msr != MSR_IA32_TSC; } +#define vcpu_set_msr(vcpu, msr, val) \ +do { \ + uint64_t r, v = val; \ + \ + TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \ + "KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \ + if (!is_durable_msr(msr)) \ + break; \ + r = vcpu_get_msr(vcpu, msr); \ + TEST_ASSERT_MSR(r == v, "Set %s to '0x%lx', got back '0x%lx'", msr, #msr, v, r);\ +} while (0) void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits); bool vm_is_unrestricted_guest(struct kvm_vm *vm); @@ -1055,6 +1147,14 @@ static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val) return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr)); } +static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value) +{ + u32 eax = value; + u32 edx = value >> 32; + + return kvm_asm_safe("xsetbv", "a" (eax), "d" (edx), "c" (index)); +} + bool kvm_is_tdp_enabled(void); uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr, @@ -1066,10 +1166,10 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1); void xen_hypercall(uint64_t nr, uint64_t a0, void *a1); -void __vm_xsave_require_permission(int bit, const char *name); +void __vm_xsave_require_permission(uint64_t xfeature, const char *name); -#define vm_xsave_require_permission(perm) \ - __vm_xsave_require_permission(perm, #perm) +#define vm_xsave_require_permission(xfeature) \ + __vm_xsave_require_permission(xfeature, #xfeature) enum pg_level { PG_LEVEL_NONE, @@ -1106,14 +1206,6 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, #define X86_CR0_CD (1UL<<30) /* Cache Disable */ #define X86_CR0_PG (1UL<<31) /* Paging */ -#define XSTATE_XTILE_CFG_BIT 17 -#define XSTATE_XTILE_DATA_BIT 18 - -#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT) -#define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT) -#define XFEATURE_XTILE_MASK (XSTATE_XTILE_CFG_MASK | \ - XSTATE_XTILE_DATA_MASK) - #define PFERR_PRESENT_BIT 0 #define PFERR_WRITE_BIT 1 #define PFERR_USER_BIT 2 |