1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
// SPDX-License-Identifier: GPL-2.0
/*
* XCR0 cpuid test
*
* Copyright (C) 2022, Google LLC.
*/
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
/*
* Assert that architectural dependency rules are satisfied, e.g. that AVX is
* supported if and only if SSE is supported.
*/
#define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies) \
do { \
uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \
\
GUEST_ASSERT_3((__supported & (xfeatures)) != (xfeatures) || \
__supported == ((xfeatures) | (dependencies)), \
__supported, (xfeatures), (dependencies)); \
} while (0)
/*
* Assert that KVM reports a sane, usable as-is XCR0. Architecturally, a CPU
* isn't strictly required to _support_ all XFeatures related to a feature, but
* at the same time XSETBV will #GP if bundled XFeatures aren't enabled and
* disabled coherently. E.g. a CPU can technically enumerate supported for
* XTILE_CFG but not XTILE_DATA, but attempting to enable XTILE_CFG without
* XTILE_DATA will #GP.
*/
#define ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0, xfeatures) \
do { \
uint64_t __supported = (supported_xcr0) & (xfeatures); \
\
GUEST_ASSERT_2(!__supported || __supported == (xfeatures), \
__supported, (xfeatures)); \
} while (0)
static void guest_code(void)
{
uint64_t xcr0_reset;
uint64_t supported_xcr0;
int i, vector;
set_cr4(get_cr4() | X86_CR4_OSXSAVE);
xcr0_reset = xgetbv(0);
supported_xcr0 = this_cpu_supported_xcr0();
GUEST_ASSERT(xcr0_reset == XFEATURE_MASK_FP);
/* Check AVX */
ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0,
XFEATURE_MASK_YMM,
XFEATURE_MASK_SSE);
/* Check MPX */
ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0,
XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
/* Check AVX-512 */
ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0,
XFEATURE_MASK_AVX512,
XFEATURE_MASK_SSE | XFEATURE_MASK_YMM);
ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0,
XFEATURE_MASK_AVX512);
/* Check AMX */
ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0,
XFEATURE_MASK_XTILE);
vector = xsetbv_safe(0, supported_xcr0);
GUEST_ASSERT_2(!vector, supported_xcr0, vector);
for (i = 0; i < 64; i++) {
if (supported_xcr0 & BIT_ULL(i))
continue;
vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i));
GUEST_ASSERT_3(vector == GP_VECTOR, supported_xcr0, vector, BIT_ULL(i));
}
GUEST_DONE();
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
while (1) {
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT_3(uc, "0x%lx 0x%lx 0x%lx");
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
done:
kvm_vm_free(vm);
return 0;
}
|