summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-20 10:44:05 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-20 10:44:05 -0800
commit6a447b0e3151893f6d4a889956553c06d2e775c6 (patch)
tree0f0c149c03dd8c2e9a5fbe01d6de528b2724893e /tools
parentf4a2f7866faaf89ea1595b136e01fcb336b46aab (diff)
parentd45f89f7437d0f2c8275b4434096164db106384d (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "Much x86 work was pushed out to 5.12, but ARM more than made up for it. ARM: - PSCI relay at EL2 when "protected KVM" is enabled - New exception injection code - Simplification of AArch32 system register handling - Fix PMU accesses when no PMU is enabled - Expose CSV3 on non-Meltdown hosts - Cache hierarchy discovery fixes - PV steal-time cleanups - Allow function pointers at EL2 - Various host EL2 entry cleanups - Simplification of the EL2 vector allocation s390: - memcg accouting for s390 specific parts of kvm and gmap - selftest for diag318 - new kvm_stat for when async_pf falls back to sync x86: - Tracepoints for the new pagetable code from 5.10 - Catch VFIO and KVM irqfd events before userspace - Reporting dirty pages to userspace with a ring buffer - SEV-ES host support - Nested VMX support for wait-for-SIPI activity state - New feature flag (AVX512 FP16) - New system ioctl to report Hyper-V-compatible paravirtualization features Generic: - Selftest improvements" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (171 commits) KVM: SVM: fix 32-bit compilation KVM: SVM: Add AP_JUMP_TABLE support in prep for AP booting KVM: SVM: Provide support to launch and run an SEV-ES guest KVM: SVM: Provide an updated VMRUN invocation for SEV-ES guests KVM: SVM: Provide support for SEV-ES vCPU loading KVM: SVM: Provide support for SEV-ES vCPU creation/loading KVM: SVM: Update ASID allocation to support SEV-ES guests KVM: SVM: Set the encryption mask for the SVM host save area KVM: SVM: Add NMI support for an SEV-ES guest KVM: SVM: Guest FPU state save/restore not needed for SEV-ES guest KVM: SVM: Do not report support for SMM for an SEV-ES guest KVM: x86: Update __get_sregs() / __set_sregs() to support SEV-ES KVM: SVM: Add support for CR8 write traps for an SEV-ES guest KVM: SVM: Add support for CR4 write traps for an SEV-ES guest KVM: SVM: Add support for CR0 write traps for an SEV-ES guest KVM: SVM: Add support for EFER write traps for an SEV-ES guest KVM: SVM: Support string IO operations for an SEV-ES guest KVM: SVM: Support MMIO for an SEV-ES guest KVM: SVM: Create trace events for VMGEXIT MSR protocol processing KVM: SVM: Create trace events for VMGEXIT processing ...
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/kvm/.gitignore6
-rw-r--r--tools/testing/selftests/kvm/Makefile5
-rw-r--r--tools/testing/selftests/kvm/aarch64/get-reg-list.c39
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c55
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c344
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h38
-rw-r--r--tools/testing/selftests/kvm/include/perf_test_util.h4
-rw-r--r--tools/testing/selftests/kvm/include/s390x/diag318_test_handler.h13
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h17
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h4
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c17
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c158
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h4
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c82
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/processor.c22
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c32
-rw-r--r--tools/testing/selftests/kvm/s390x/sync_regs_test.c16
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/debug_regs.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c87
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_pv_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_sregs_test.c92
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/user_msr_test.c248
-rw-r--r--tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c770
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c15
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c21
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c1
36 files changed, 1643 insertions, 465 deletions
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 7a2c242b7152..ce8f4ad39684 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -14,17 +14,17 @@
/x86_64/set_sregs_test
/x86_64/smm_test
/x86_64/state_test
-/x86_64/user_msr_test
-/x86_64/vmx_preemption_timer_test
/x86_64/svm_vmcall_test
/x86_64/sync_regs_test
+/x86_64/tsc_msrs_test
+/x86_64/userspace_msr_exit_test
/x86_64/vmx_apic_access_test
/x86_64/vmx_close_while_nested_test
/x86_64/vmx_dirty_log_test
+/x86_64/vmx_preemption_timer_test
/x86_64/vmx_set_nested_state_test
/x86_64/vmx_tsc_adjust_test
/x86_64/xss_msr_test
-/clear_dirty_log_test
/demand_paging_test
/dirty_log_test
/dirty_log_perf_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 3d14ef77755e..c7ca4faba272 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -36,7 +36,7 @@ endif
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
-LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c
+LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
@@ -50,6 +50,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
+TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
@@ -58,7 +59,6 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
TEST_GEN_PROGS_x86_64 += x86_64/debug_regs
TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
-TEST_GEN_PROGS_x86_64 += x86_64/user_msr_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
@@ -70,6 +70,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list-sve
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
+TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
TEST_GEN_PROGS_aarch64 += set_memory_region_test
TEST_GEN_PROGS_aarch64 += steal_time
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
index 33218a395d9f..486932164cf2 100644
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
@@ -42,12 +42,16 @@
#define for_each_reg(i) \
for ((i) = 0; (i) < reg_list->n; ++(i))
+#define for_each_reg_filtered(i) \
+ for_each_reg(i) \
+ if (!filter_reg(reg_list->reg[i]))
+
#define for_each_missing_reg(i) \
for ((i) = 0; (i) < blessed_n; ++(i)) \
if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))
#define for_each_new_reg(i) \
- for ((i) = 0; (i) < reg_list->n; ++(i)) \
+ for_each_reg_filtered(i) \
if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
@@ -57,6 +61,18 @@ static __u64 base_regs[], vregs[], sve_regs[], rejects_set[];
static __u64 base_regs_n, vregs_n, sve_regs_n, rejects_set_n;
static __u64 *blessed_reg, blessed_n;
+static bool filter_reg(__u64 reg)
+{
+ /*
+ * DEMUX register presence depends on the host's CLIDR_EL1.
+ * This means there's no set of them that we can bless.
+ */
+ if ((reg & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
+ return true;
+
+ return false;
+}
+
static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
{
int i;
@@ -325,7 +341,7 @@ int main(int ac, char **av)
struct kvm_vcpu_init init = { .target = -1, };
int new_regs = 0, missing_regs = 0, i;
int failed_get = 0, failed_set = 0, failed_reject = 0;
- bool print_list = false, fixup_core_regs = false;
+ bool print_list = false, print_filtered = false, fixup_core_regs = false;
struct kvm_vm *vm;
__u64 *vec_regs;
@@ -336,8 +352,10 @@ int main(int ac, char **av)
fixup_core_regs = true;
else if (strcmp(av[i], "--list") == 0)
print_list = true;
+ else if (strcmp(av[i], "--list-filtered") == 0)
+ print_filtered = true;
else
- fprintf(stderr, "Ignoring unknown option: %s\n", av[i]);
+ TEST_FAIL("Unknown option: %s\n", av[i]);
}
vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
@@ -350,10 +368,14 @@ int main(int ac, char **av)
if (fixup_core_regs)
core_reg_fixup();
- if (print_list) {
+ if (print_list || print_filtered) {
putchar('\n');
- for_each_reg(i)
- print_reg(reg_list->reg[i]);
+ for_each_reg(i) {
+ __u64 id = reg_list->reg[i];
+ if ((print_list && !filter_reg(id)) ||
+ (print_filtered && filter_reg(id)))
+ print_reg(id);
+ }
putchar('\n');
return 0;
}
@@ -458,6 +480,8 @@ int main(int ac, char **av)
/*
* The current blessed list was primed with the output of kernel version
* v4.15 with --core-reg-fixup and then later updated with new registers.
+ *
+ * The blessed list is up to date with kernel version v5.10-rc5
*/
static __u64 base_regs[] = {
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
@@ -736,9 +760,6 @@ static __u64 base_regs[] = {
ARM64_SYS_REG(3, 4, 3, 0, 0), /* DACR32_EL2 */
ARM64_SYS_REG(3, 4, 5, 0, 1), /* IFSR32_EL2 */
ARM64_SYS_REG(3, 4, 5, 3, 0), /* FPEXC32_EL2 */
- KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | 0,
- KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | 1,
- KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | 2,
};
static __u64 base_regs_n = ARRAY_SIZE(base_regs);
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 85c9b8f73142..9c6a7be31e03 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -27,6 +27,7 @@
#define TEST_HOST_LOOP_N 2UL
/* Host variables */
+static u64 dirty_log_manual_caps;
static bool host_quit;
static uint64_t iteration;
static uint64_t vcpu_last_completed_iteration[MAX_VCPUS];
@@ -88,10 +89,6 @@ static void *vcpu_worker(void *data)
return NULL;
}
-#ifdef USE_CLEAR_DIRTY_LOG
-static u64 dirty_log_manual_caps;
-#endif
-
static void run_test(enum vm_guest_mode mode, unsigned long iterations,
uint64_t phys_offset, int wr_fract)
{
@@ -106,10 +103,8 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
struct timespec get_dirty_log_total = (struct timespec){0};
struct timespec vcpu_dirty_total = (struct timespec){0};
struct timespec avg;
-#ifdef USE_CLEAR_DIRTY_LOG
struct kvm_enable_cap cap = {};
struct timespec clear_dirty_log_total = (struct timespec){0};
-#endif
vm = create_vm(mode, nr_vcpus, guest_percpu_mem_size);
@@ -120,11 +115,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
bmap = bitmap_alloc(host_num_pages);
-#ifdef USE_CLEAR_DIRTY_LOG
- cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
- cap.args[0] = dirty_log_manual_caps;
- vm_enable_cap(vm, &cap);
-#endif
+ if (dirty_log_manual_caps) {
+ cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
+ cap.args[0] = dirty_log_manual_caps;
+ vm_enable_cap(vm, &cap);
+ }
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
@@ -190,17 +185,17 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
pr_info("Iteration %lu get dirty log time: %ld.%.9lds\n",
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
-#ifdef USE_CLEAR_DIRTY_LOG
- clock_gettime(CLOCK_MONOTONIC, &start);
- kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
- host_num_pages);
+ if (dirty_log_manual_caps) {
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
+ host_num_pages);
- ts_diff = timespec_diff_now(start);
- clear_dirty_log_total = timespec_add(clear_dirty_log_total,
- ts_diff);
- pr_info("Iteration %lu clear dirty log time: %ld.%.9lds\n",
- iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
-#endif
+ ts_diff = timespec_diff_now(start);
+ clear_dirty_log_total = timespec_add(clear_dirty_log_total,
+ ts_diff);
+ pr_info("Iteration %lu clear dirty log time: %ld.%.9lds\n",
+ iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
+ }
}
/* Tell the vcpu thread to quit */
@@ -220,12 +215,12 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
iterations, get_dirty_log_total.tv_sec,
get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
-#ifdef USE_CLEAR_DIRTY_LOG
- avg = timespec_div(clear_dirty_log_total, iterations);
- pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
- iterations, clear_dirty_log_total.tv_sec,
- clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
-#endif
+ if (dirty_log_manual_caps) {
+ avg = timespec_div(clear_dirty_log_total, iterations);
+ pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
+ iterations, clear_dirty_log_total.tv_sec,
+ clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
+ }
free(bmap);
free(vcpu_threads);
@@ -284,16 +279,10 @@ int main(int argc, char *argv[])
int opt, i;
int wr_fract = 1;
-#ifdef USE_CLEAR_DIRTY_LOG
dirty_log_manual_caps =
kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
- if (!dirty_log_manual_caps) {
- print_skip("KVM_CLEAR_DIRTY_LOG not available");
- exit(KSFT_SKIP);
- }
dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
KVM_DIRTY_LOG_INITIALLY_SET);
-#endif
#ifdef __x86_64__
guest_mode_init(VM_MODE_PXXV48_4K, true, true);
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 54da9cc20db4..471baecb7772 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -12,8 +12,13 @@
#include <unistd.h>
#include <time.h>
#include <pthread.h>
+#include <semaphore.h>
+#include <sys/types.h>
+#include <signal.h>
+#include <errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <asm/barrier.h>
#include "test_util.h"
#include "kvm_util.h"
@@ -57,6 +62,10 @@
# define test_and_clear_bit_le test_and_clear_bit
#endif
+#define TEST_DIRTY_RING_COUNT 65536
+
+#define SIG_IPI SIGUSR1
+
/*
* Guest/Host shared variables. Ensure addr_gva2hva() and/or
* sync_global_to/from_guest() are used when accessing from
@@ -128,6 +137,31 @@ static uint64_t host_dirty_count;
static uint64_t host_clear_count;
static uint64_t host_track_next_count;
+/* Whether dirty ring reset is requested, or finished */
+static sem_t dirty_ring_vcpu_stop;
+static sem_t dirty_ring_vcpu_cont;
+/*
+ * This is updated by the vcpu thread to tell the host whether it's a
+ * ring-full event. It should only be read until a sem_wait() of
+ * dirty_ring_vcpu_stop and before vcpu continues to run.
+ */
+static bool dirty_ring_vcpu_ring_full;
+/*
+ * This is only used for verifying the dirty pages. Dirty ring has a very
+ * tricky case when the ring just got full, kvm will do userspace exit due to
+ * ring full. When that happens, the very last PFN is set but actually the
+ * data is not changed (the guest WRITE is not really applied yet), because
+ * we found that the dirty ring is full, refused to continue the vcpu, and
+ * recorded the dirty gfn with the old contents.
+ *
+ * For this specific case, it's safe to skip checking this pfn for this
+ * bit, because it's a redundant bit, and when the write happens later the bit
+ * will be set again. We use this variable to always keep track of the latest
+ * dirty gfn we've collected, so that if a mismatch of data found later in the
+ * verifying process, we let it pass.
+ */
+static uint64_t dirty_ring_last_page;
+
enum log_mode_t {
/* Only use KVM_GET_DIRTY_LOG for logging */
LOG_MODE_DIRTY_LOG = 0,
@@ -135,6 +169,9 @@ enum log_mode_t {
/* Use both KVM_[GET|CLEAR]_DIRTY_LOG for logging */
LOG_MODE_CLEAR_LOG = 1,
+ /* Use dirty ring for logging */
+ LOG_MODE_DIRTY_RING = 2,
+
LOG_MODE_NUM,
/* Run all supported modes */
@@ -145,6 +182,26 @@ enum log_mode_t {
static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
/* Logging mode for current run */
static enum log_mode_t host_log_mode;
+static pthread_t vcpu_thread;
+static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
+
+static void vcpu_kick(void)
+{
+ pthread_kill(vcpu_thread, SIG_IPI);
+}
+
+/*
+ * In our test we do signal tricks, let's use a better version of
+ * sem_wait to avoid signal interrupts
+ */
+static void sem_wait_until(sem_t *sem)
+{
+ int ret;
+
+ do
+ ret = sem_wait(sem);
+ while (ret == -1 && errno == EINTR);
+}
static bool clear_log_supported(void)
{
@@ -178,6 +235,152 @@ static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
}
+static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+
+ TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
+ "vcpu run failed: errno=%d", err);
+
+ TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
+ "Invalid guest sync status: exit_reason=%s\n",
+ exit_reason_str(run->exit_reason));
+}
+
+static bool dirty_ring_supported(void)
+{
+ return kvm_check_cap(KVM_CAP_DIRTY_LOG_RING);
+}
+
+static void dirty_ring_create_vm_done(struct kvm_vm *vm)
+{
+ /*
+ * Switch to dirty ring mode after VM creation but before any
+ * of the vcpu creation.
+ */
+ vm_enable_dirty_ring(vm, test_dirty_ring_count *
+ sizeof(struct kvm_dirty_gfn));
+}
+
+static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
+{
+ return gfn->flags == KVM_DIRTY_GFN_F_DIRTY;
+}
+
+static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
+{
+ gfn->flags = KVM_DIRTY_GFN_F_RESET;
+}
+
+static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
+ int slot, void *bitmap,
+ uint32_t num_pages, uint32_t *fetch_index)
+{
+ struct kvm_dirty_gfn *cur;
+ uint32_t count = 0;
+
+ while (true) {
+ cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
+ if (!dirty_gfn_is_dirtied(cur))
+ break;
+ TEST_ASSERT(cur->slot == slot, "Slot number didn't match: "
+ "%u != %u", cur->slot, slot);
+ TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
+ "0x%llx >= 0x%x", cur->offset, num_pages);
+ //pr_info("fetch 0x%x page %llu\n", *fetch_index, cur->offset);
+ set_bit_le(cur->offset, bitmap);
+ dirty_ring_last_page = cur->offset;
+ dirty_gfn_set_collected(cur);
+ (*fetch_index)++;
+ count++;
+ }
+
+ return count;
+}
+
+static void dirty_ring_wait_vcpu(void)
+{
+ /* This makes sure that hardware PML cache flushed */
+ vcpu_kick();
+ sem_wait_until(&dirty_ring_vcpu_stop);
+}
+
+static void dirty_ring_continue_vcpu(void)
+{
+ pr_info("Notifying vcpu to continue\n");
+ sem_post(&dirty_ring_vcpu_cont);
+}
+
+static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
+ void *bitmap, uint32_t num_pages)
+{
+ /* We only have one vcpu */
+ static uint32_t fetch_index = 0;
+ uint32_t count = 0, cleared;
+ bool continued_vcpu = false;
+
+ dirty_ring_wait_vcpu();
+
+ if (!dirty_ring_vcpu_ring_full) {
+ /*
+ * This is not a ring-full event, it's safe to allow
+ * vcpu to continue
+ */
+ dirty_ring_continue_vcpu();
+ continued_vcpu = true;
+ }
+
+ /* Only have one vcpu */
+ count = dirty_ring_collect_one(vcpu_map_dirty_ring(vm, VCPU_ID),
+ slot, bitmap, num_pages, &fetch_index);
+
+ cleared = kvm_vm_reset_dirty_ring(vm);
+
+ /* Cleared pages should be the same as collected */
+ TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
+ "with collected (%u)", cleared, count);
+
+ if (!continued_vcpu) {
+ TEST_ASSERT(dirty_ring_vcpu_ring_full,
+ "Didn't continue vcpu even without ring full");
+ dirty_ring_continue_vcpu();
+ }
+
+ pr_info("Iteration %ld collected %u pages\n", iteration, count);
+}
+
+static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+
+ /* A ucall-sync or ring-full event is allowed */
+ if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) {
+ /* We should allow this to continue */
+ ;
+ } else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
+ (ret == -1 && err == EINTR)) {
+ /* Update the flag first before pause */
+ WRITE_ONCE(dirty_ring_vcpu_ring_full,
+ run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
+ sem_post(&dirty_ring_vcpu_stop);
+ pr_info("vcpu stops because %s...\n",
+ dirty_ring_vcpu_ring_full ?
+ "dirty ring is full" : "vcpu is kicked out");
+ sem_wait_until(&dirty_ring_vcpu_cont);
+ pr_info("vcpu continues now.\n");
+ } else {
+ TEST_ASSERT(false, "Invalid guest sync status: "
+ "exit_reason=%s\n",
+ exit_reason_str(run->exit_reason));
+ }
+}
+
+static void dirty_ring_before_vcpu_join(void)
+{
+ /* Kick another round of vcpu just to make sure it will quit */
+ sem_post(&dirty_ring_vcpu_cont);
+}
+
struct log_mode {
const char *name;
/* Return true if this mode is supported, otherwise false */
@@ -187,16 +390,29 @@ struct log_mode {
/* Hook to collect the dirty pages into the bitmap provided */
void (*collect_dirty_pages) (struct kvm_vm *vm, int slot,
void *bitmap, uint32_t num_pages);
+ /* Hook to call when after each vcpu run */
+ void (*after_vcpu_run)(struct kvm_vm *vm, int ret, int err);
+ void (*before_vcpu_join) (void);
} log_modes[LOG_MODE_NUM] = {
{
.name = "dirty-log",
.collect_dirty_pages = dirty_log_collect_dirty_pages,
+ .after_vcpu_run = default_after_vcpu_run,
},
{
.name = "clear-log",
.supported = clear_log_supported,
.create_vm_done = clear_log_create_vm_done,
.collect_dirty_pages = clear_log_collect_dirty_pages,
+ .after_vcpu_run = default_after_vcpu_run,
+ },
+ {
+ .name = "dirty-ring",
+ .supported = dirty_ring_supported,
+ .create_vm_done = dirty_ring_create_vm_done,
+ .collect_dirty_pages = dirty_ring_collect_dirty_pages,
+ .before_vcpu_join = dirty_ring_before_vcpu_join,
+ .after_vcpu_run = dirty_ring_after_vcpu_run,
},
};
@@ -247,6 +463,22 @@ static void log_mode_collect_dirty_pages(struct kvm_vm *vm, int slot,
mode->collect_dirty_pages(vm, slot, bitmap, num_pages);
}
+static void log_mode_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+{
+ struct log_mode *mode = &log_modes[host_log_mode];
+
+ if (mode->after_vcpu_run)
+ mode->after_vcpu_run(vm, ret, err);
+}
+
+static void log_mode_before_vcpu_join(void)
+{
+ struct log_mode *mode = &log_modes[host_log_mode];
+
+ if (mode->before_vcpu_join)
+ mode->before_vcpu_join();
+}
+
static void generate_random_array(uint64_t *guest_array, uint64_t size)
{
uint64_t i;
@@ -257,29 +489,44 @@ static void generate_random_array(uint64_t *guest_array, uint64_t size)
static void *vcpu_worker(void *data)
{
- int ret;
+ int ret, vcpu_fd;
struct kvm_vm *vm = data;
uint64_t *guest_array;
uint64_t pages_count = 0;
- struct kvm_run *run;
+ struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
+ + sizeof(sigset_t));
+ sigset_t *sigset = (sigset_t *) &sigmask->sigset;
+
+ vcpu_fd = vcpu_get_fd(vm, VCPU_ID);
+
+ /*
+ * SIG_IPI is unblocked atomically while in KVM_RUN. It causes the
+ * ioctl to return with -EINTR, but it is still pending and we need
+ * to accept it with the sigwait.
+ */
+ sigmask->len = 8;
+ pthread_sigmask(0, NULL, sigset);
+ vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
+ sigaddset(sigset, SIG_IPI);
+ pthread_sigmask(SIG_BLOCK, sigset, NULL);
- run = vcpu_state(vm, VCPU_ID);
+ sigemptyset(sigset);
+ sigaddset(sigset, SIG_IPI);
guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
- generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
while (!READ_ONCE(host_quit)) {
+ /* Clear any existing kick signals */
+ generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
+ pages_count += TEST_PAGES_PER_LOOP;
/* Let the guest dirty the random pages */
- ret = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
- if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) {
- pages_count += TEST_PAGES_PER_LOOP;
- generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
- } else {
- TEST_FAIL("Invalid guest sync status: "
- "exit_reason=%s\n",
- exit_reason_str(run->exit_reason));
+ ret = ioctl(vcpu_fd, KVM_RUN, NULL);
+ if (ret == -1 && errno == EINTR) {
+ int sig = -1;
+ sigwait(sigset, &sig);
+ assert(sig == SIG_IPI);
}
+ log_mode_after_vcpu_run(vm, ret, errno);
}
pr_info("Dirtied %"PRIu64" pages\n", pages_count);
@@ -292,6 +539,7 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
uint64_t step = vm_num_host_pages(mode, 1);
uint64_t page;
uint64_t *value_ptr;
+ uint64_t min_iter = 0;
for (page = 0; page < host_num_pages; page += step) {
value_ptr = host_test_mem + page * host_page_size;
@@ -306,14 +554,64 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
}
if (test_and_clear_bit_le(page, bmap)) {
+ bool matched;
+
host_dirty_count++;
+
/*
* If the bit is set, the value written onto
* the corresponding page should be either the
* previous iteration number or the current one.
*/
- TEST_ASSERT(*value_ptr == iteration ||
- *value_ptr == iteration - 1,
+ matched = (*value_ptr == iteration ||
+ *value_ptr == iteration - 1);
+
+ if (host_log_mode == LOG_MODE_DIRTY_RING && !matched) {
+ if (*value_ptr == iteration - 2 && min_iter <= iteration - 2) {
+ /*
+ * Short answer: this case is special
+ * only for dirty ring test where the
+ * page is the last page before a kvm
+ * dirty ring full in iteration N-2.
+ *
+ * Long answer: Assuming ring size R,
+ * one possible condition is:
+ *
+ * main thr vcpu thr
+ * -------- --------
+ * iter=1
+ * write 1 to page 0~(R-1)
+ * full, vmexit
+ * collect 0~(R-1)
+ * kick vcpu
+ * write 1 to (R-1)~(2R-2)
+ * full, vmexit
+ * iter=2
+ * collect (R-1)~(2R-2)
+ * kick vcpu
+ * write 1 to (2R-2)
+ * (NOTE!!! "1" cached in cpu reg)
+ * write 2 to (2R-1)~(3R-3)
+ * full, vmexit
+ * iter=3
+ * collect (2R-2)~(3R-3)
+ * (here if we read value on page
+ * "2R-2" is 1, while iter=3!!!)
+ *
+ * This however can only happen once per iteration.
+ */
+ min_iter = iteration - 1;
+ continue;
+ } else if (page == dirty_ring_last_page) {
+ /*
+ * Please refer to comments in
+ * dirty_ring_last_page.
+ */
+ continue;
+ }
+ }
+
+ TEST_ASSERT(matched,
"Set page %"PRIu64" value %"PRIu64
" incorrect (iteration=%"PRIu64")",
page, *value_ptr, iteration);
@@ -378,7 +676,6 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
static void run_test(enum vm_guest_mode mode, unsigned long iterations,
unsigned long interval, uint64_t phys_offset)
{
- pthread_t vcpu_thread;
struct kvm_vm *vm;
unsigned long *bmap;
@@ -443,9 +740,6 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
/* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
-#ifdef __x86_64__
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-#endif
ucall_init(vm, NULL);
/* Export the shared variables to the guest */
@@ -476,6 +770,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
/* Tell the vcpu thread to quit */
host_quit = true;
+ log_mode_before_vcpu_join();
pthread_join(vcpu_thread, NULL);
pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
@@ -506,6 +801,9 @@ static void help(char *name)
printf("usage: %s [-h] [-i iterations] [-I interval] "
"[-p offset] [-m mode]\n", name);
puts("");
+ printf(" -c: specify dirty ring size, in number of entries\n");
+ printf(" (only useful for dirty-ring test; default: %"PRIu32")\n",
+ TEST_DIRTY_RING_COUNT);
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
TEST_HOST_LOOP_N);
printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
@@ -536,6 +834,9 @@ int main(int argc, char *argv[])
unsigned int mode;
int opt, i, j;
+ sem_init(&dirty_ring_vcpu_stop, 0, 0);
+ sem_init(&dirty_ring_vcpu_cont, 0, 0);
+
#ifdef __x86_64__
guest_mode_init(VM_MODE_PXXV48_4K, true, true);
#endif
@@ -558,8 +859,11 @@ int main(int argc, char *argv[])
guest_mode_init(VM_MODE_P40V48_4K, true, true);
#endif
- while ((opt = getopt(argc, argv, "hi:I:p:m:M:")) != -1) {
+ while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
switch (opt) {
+ case 'c':
+ test_dirty_ring_count = strtol(optarg, NULL, 10);
+ break;
case 'i':
iterations = strtol(optarg, NULL, 10);
break;
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 7d29aa786959..dfa9d369e8fc 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -45,13 +45,28 @@ enum vm_guest_mode {
};
#if defined(__aarch64__)
-#define VM_MODE_DEFAULT VM_MODE_P40V48_4K
+
+#define VM_MODE_DEFAULT VM_MODE_P40V48_4K
+#define MIN_PAGE_SHIFT 12U
+#define ptes_per_page(page_size) ((page_size) / 8)
+
#elif defined(__x86_64__)
-#define VM_MODE_DEFAULT VM_MODE_PXXV48_4K
-#else
-#define VM_MODE_DEFAULT VM_MODE_P52V48_4K
+
+#define VM_MODE_DEFAULT VM_MODE_PXXV48_4K
+#define MIN_PAGE_SHIFT 12U
+#define ptes_per_page(page_size) ((page_size) / 8)
+
+#elif defined(__s390x__)
+
+#define VM_MODE_DEFAULT VM_MODE_P52V48_4K
+#define MIN_PAGE_SHIFT 12U
+#define ptes_per_page(page_size) ((page_size) / 16)
+
#endif
+#define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT)
+#define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE)
+
#define vm_guest_mode_string(m) vm_guest_mode_string[m]
extern const char * const vm_guest_mode_string[];
@@ -74,6 +89,7 @@ void kvm_vm_release(struct kvm_vm *vmp);
void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
uint64_t first_page, uint32_t num_pages);
+uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm);
int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
size_t len);
@@ -114,6 +130,8 @@ void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
void *arg);
void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
+void kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
+int _kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
@@ -146,6 +164,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
+int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_guest_debug *debug);
@@ -199,6 +218,7 @@ void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_nested_state *state, bool ignore_error);
#endif
+void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid);
const char *exit_reason_str(unsigned int exit_reason);
@@ -246,6 +266,16 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
void *guest_code);
+/* Same as vm_create_default, but can be used for more than one vcpu */
+struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
+ uint32_t num_percpu_pages, void *guest_code,
+ uint32_t vcpuids[]);
+
+/* Like vm_create_default_with_vcpus, but accepts mode as a parameter */
+struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+ uint64_t extra_mem_pages, uint32_t num_percpu_pages,
+ void *guest_code, uint32_t vcpuids[]);
+
/*
* Adds a vCPU with reasonable defaults (e.g. a stack)
*
diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h
index 2618052057b1..239421e4f6b8 100644
--- a/tools/testing/selftests/kvm/include/perf_test_util.h
+++ b/tools/testing/selftests/kvm/include/perf_test_util.h
@@ -179,10 +179,6 @@ static void add_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes)
vm_vcpu_add_default(vm, vcpu_id, guest_code);
-#ifdef __x86_64__
- vcpu_set_cpuid(vm, vcpu_id, kvm_get_supported_cpuid());
-#endif
-
vcpu_args->vcpu_id = vcpu_id;
vcpu_args->gva = guest_test_virt_mem +
(vcpu_id * vcpu_memory_bytes);
diff --git a/tools/testing/selftests/kvm/include/s390x/diag318_test_handler.h b/tools/testing/selftests/kvm/include/s390x/diag318_test_handler.h
new file mode 100644
index 000000000000..b0ed71302722
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/s390x/diag318_test_handler.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Test handler for the s390x DIAGNOSE 0x0318 instruction.
+ *
+ * Copyright (C) 2020, IBM
+ */
+
+#ifndef SELFTEST_KVM_DIAG318_TEST_HANDLER
+#define SELFTEST_KVM_DIAG318_TEST_HANDLER
+
+uint64_t get_diag318_info(void);
+
+#endif
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 8e61340b3911..90cd5984751b 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -27,6 +27,7 @@
#define X86_CR4_OSFXSR (1ul << 9)
#define X86_CR4_OSXMMEXCPT (1ul << 10)
#define X86_CR4_UMIP (1ul << 11)
+#define X86_CR4_LA57 (1ul << 12)
#define X86_CR4_VMXE (1ul << 13)
#define X86_CR4_SMXE (1ul << 14)
#define X86_CR4_FSGSBASE (1ul << 16)
@@ -36,6 +37,22 @@
#define X86_CR4_SMAP (1ul << 21)
#define X86_CR4_PKE (1ul << 22)
+/* CPUID.1.ECX */
+#define CPUID_VMX (1ul << 5)
+#define CPUID_SMX (1ul << 6)
+#define CPUID_PCID (1ul << 17)
+#define CPUID_XSAVE (1ul << 26)
+
+/* CPUID.7.EBX */
+#define CPUID_FSGSBASE (1ul << 0)
+#define CPUID_SMEP (1ul << 7)
+#define CPUID_SMAP (1ul << 20)
+
+/* CPUID.7.ECX */
+#define CPUID_UMIP (1ul << 2)
+#define CPUID_PKU (1ul << 3)
+#define CPUID_LA57 (1ul << 16)
+
#define UNEXPECTED_VECTOR_PORT 0xfff0u
/* General Registers in 64-Bit Mode */
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index e78d7e26ba61..65eb1079a161 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -11,10 +11,6 @@
#include <stdint.h>
#include "processor.h"
-#define CPUID_VMX_BIT 5
-
-#define CPUID_VMX (1 << 5)
-
/*
* Definitions of Primary Processor-Based VM-Execution Controls.
*/
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index d6c32c328e9a..cee92d477dc0 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -5,8 +5,6 @@
* Copyright (C) 2018, Red Hat, Inc.
*/
-#define _GNU_SOURCE /* for program_invocation_name */
-
#include <linux/compiler.h>
#include "kvm_util.h"
@@ -219,21 +217,6 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
}
}
-struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
- void *guest_code)
-{
- uint64_t ptrs_per_4k_pte = 512;
- uint64_t extra_pg_pages = (extra_mem_pages / ptrs_per_4k_pte) * 2;
- struct kvm_vm *vm;
-
- vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
-
- kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
- vm_vcpu_add_default(vm, vcpuid, guest_code);
-
- return vm;
-}
-
void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *init)
{
struct kvm_vcpu_init default_init = { .target = -1, };
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 126c6727a6b0..88ef7067f1e6 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -5,6 +5,7 @@
* Copyright (C) 2018, Google LLC.
*/
+#define _GNU_SOURCE /* for program_invocation_name */
#include "test_util.h"
#include "kvm_util.h"
#include "kvm_util_internal.h"
@@ -114,6 +115,16 @@ int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
return r;
}
+void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
+{
+ struct kvm_enable_cap cap = { 0 };
+
+ cap.cap = KVM_CAP_DIRTY_LOG_RING;
+ cap.args[0] = ring_size;
+ vm_enable_cap(vm, &cap);
+ vm->dirty_ring_size = ring_size;
+}
+
static void vm_open(struct kvm_vm *vm, int perm)
{
vm->kvm_fd = open(KVM_DEV_PATH, perm);
@@ -271,6 +282,63 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
return vm;
}
+struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+ uint64_t extra_mem_pages, uint32_t num_percpu_pages,
+ void *guest_code, uint32_t vcpuids[])
+{
+ /* The maximum page table size for a memory region will be when the
+ * smallest pages are used. Considering each page contains x page
+ * table descriptors, the total extra size for page tables (for extra
+ * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
+ * than N/x*2.
+ */
+ uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
+ uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
+ uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
+ struct kvm_vm *vm;
+ int i;
+
+ TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
+ "nr_vcpus = %d too large for host, max-vcpus = %d",
+ nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
+
+ pages = vm_adjust_num_guest_pages(mode, pages);
+ vm = vm_create(mode, pages, O_RDWR);
+
+ kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+
+#ifdef __x86_64__
+ vm_create_irqchip(vm);
+#endif
+
+ for (i = 0; i < nr_vcpus; ++i) {
+ uint32_t vcpuid = vcpuids ? vcpuids[i] : i;
+
+ vm_vcpu_add_default(vm, vcpuid, guest_code);
+
+#ifdef __x86_64__
+ vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
+#endif
+ }
+
+ return vm;
+}
+
+struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
+ uint32_t num_percpu_pages, void *guest_code,
+ uint32_t vcpuids[])
+{
+ return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages,
+ num_percpu_pages, guest_code, vcpuids);
+}
+
+struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
+ void *guest_code)
+{
+ return vm_create_default_with_vcpus(1, extra_mem_pages, 0, guest_code,
+ (uint32_t []){ vcpuid });
+}
+
/*
* VM Restart
*
@@ -328,6 +396,11 @@ void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
__func__, strerror(-ret));
}
+uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
+{
+ return ioctl(vm->fd, KVM_RESET_DIRTY_RINGS);
+}
+
/*
* Userspace Memory Region Find
*
@@ -432,10 +505,17 @@ struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
*
* Removes a vCPU from a VM and frees its resources.
*/
-static void vm_vcpu_rm(struct vcpu *vcpu)
+static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu)
{
int ret;
+ if (vcpu->dirty_gfns) {
+ ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
+ TEST_ASSERT(ret == 0, "munmap of VCPU dirty ring failed, "
+ "rc: %i errno: %i", ret, errno);
+ vcpu->dirty_gfns = NULL;
+ }
+
ret = munmap(vcpu->state, sizeof(*vcpu->state));
TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i "
"errno: %i", ret, errno);
@@ -453,7 +533,7 @@ void kvm_vm_release(struct kvm_vm *vmp)
int ret;
list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
- vm_vcpu_rm(vcpu);
+ vm_vcpu_rm(vmp, vcpu);
ret = close(vmp->fd);
TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
@@ -1233,6 +1313,15 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
return rc;
}
+int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+
+ return vcpu->fd;
+}
+
void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
@@ -1561,6 +1650,42 @@ int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
return ret;
}
+void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct vcpu *vcpu;
+ uint32_t size = vm->dirty_ring_size;
+
+ TEST_ASSERT(size > 0, "Should enable dirty ring first");
+
+ vcpu = vcpu_find(vm, vcpuid);
+
+ TEST_ASSERT(vcpu, "Cannot find vcpu %u", vcpuid);
+
+ if (!vcpu->dirty_gfns) {
+ void *addr;
+
+ addr = mmap(NULL, size, PROT_READ,
+ MAP_PRIVATE, vcpu->fd,
+ vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
+ TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
+
+ addr = mmap(NULL, size, PROT_READ | PROT_EXEC,
+ MAP_PRIVATE, vcpu->fd,
+ vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
+ TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
+
+ addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, vcpu->fd,
+ vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
+ TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
+
+ vcpu->dirty_gfns = addr;
+ vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
+ }
+
+ return vcpu->dirty_gfns;
+}
+
/*
* VM Ioctl
*
@@ -1583,6 +1708,32 @@ void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
}
/*
+ * KVM system ioctl
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * cmd - Ioctl number
+ * arg - Argument to pass to the ioctl
+ *
+ * Return: None
+ *
+ * Issues an arbitrary ioctl on a KVM fd.
+ */
+void kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
+{
+ int ret;
+
+ ret = ioctl(vm->kvm_fd, cmd, arg);
+ TEST_ASSERT(ret == 0, "KVM ioctl %lu failed, rc: %i errno: %i (%s)",
+ cmd, ret, errno, strerror(errno));
+}
+
+int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
+{
+ return ioctl(vm->kvm_fd, cmd, arg);
+}
+
+/*
* VM Dump
*
* Input Args:
@@ -1654,6 +1805,9 @@ static struct exit_reason {
{KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"},
{KVM_EXIT_OSI, "OSI"},
{KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"},
+ {KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"},
+ {KVM_EXIT_X86_RDMSR, "RDMSR"},
+ {KVM_EXIT_X86_WRMSR, "WRMSR"},
#ifdef KVM_EXIT_MEMORY_NOT_PRESENT
{KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
#endif
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
index f07d383d03a1..34465dc562d8 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
@@ -28,6 +28,9 @@ struct vcpu {
uint32_t id;
int fd;
struct kvm_run *state;
+ struct kvm_dirty_gfn *dirty_gfns;
+ uint32_t fetch_index;
+ uint32_t dirty_gfns_count;
};
struct kvm_vm {
@@ -52,6 +55,7 @@ struct kvm_vm {
vm_vaddr_t tss;
vm_vaddr_t idt;
vm_vaddr_t handlers;
+ uint32_t dirty_ring_size;
};
struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid);
diff --git a/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c b/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c
new file mode 100644
index 000000000000..86b9e611ad87
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test handler for the s390x DIAGNOSE 0x0318 instruction.
+ *
+ * Copyright (C) 2020, IBM
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+
+#define VCPU_ID 6
+
+#define ICPT_INSTRUCTION 0x04
+#define IPA0_DIAG 0x8300
+
+static void guest_code(void)
+{
+ uint64_t diag318_info = 0x12345678;
+
+ asm volatile ("diag %0,0,0x318\n" : : "d" (diag318_info));
+}
+
+/*
+ * The DIAGNOSE 0x0318 instruction call must be handled via userspace. As such,
+ * we create an ad-hoc VM here to handle the instruction then extract the
+ * necessary data. It is up to the caller to decide what to do with that data.
+ */
+static uint64_t diag318_handler(void)
+{
+ struct kvm_vm *vm;
+ struct kvm_run *run;
+ uint64_t reg;
+ uint64_t diag318_info;
+
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vcpu_run(vm, VCPU_ID);
+ run = vcpu_state(vm, VCPU_ID);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
+ "DIAGNOSE 0x0318 instruction was not intercepted");
+ TEST_ASSERT(run->s390_sieic.icptcode == ICPT_INSTRUCTION,
+ "Unexpected intercept code: 0x%x", run->s390_sieic.icptcode);
+ TEST_ASSERT((run->s390_sieic.ipa & 0xff00) == IPA0_DIAG,
+ "Unexpected IPA0 code: 0x%x", (run->s390_sieic.ipa & 0xff00));
+
+ reg = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ diag318_info = run->s.regs.gprs[reg];
+
+ TEST_ASSERT(diag318_info != 0, "DIAGNOSE 0x0318 info not set");
+
+ kvm_vm_free(vm);
+
+ return diag318_info;
+}
+
+uint64_t get_diag318_info(void)
+{
+ static uint64_t diag318_info;
+ static bool printed_skip;
+
+ /*
+ * If KVM does not support diag318, then return 0 to
+ * ensure tests do not break.
+ */
+ if (!kvm_check_cap(KVM_CAP_S390_DIAG318)) {
+ if (!printed_skip) {
+ fprintf(stdout, "KVM_CAP_S390_DIAG318 not supported. "
+ "Skipping diag318 test.\n");
+ printed_skip = true;
+ }
+ return 0;
+ }
+
+ /*
+ * If a test has previously requested the diag318 info,
+ * then don't bother spinning up a temporary VM again.
+ */
+ if (!diag318_info)
+ diag318_info = diag318_handler();
+
+ return diag318_info;
+}
diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c
index 7349bb2e1a24..0152f356c099 100644
--- a/tools/testing/selftests/kvm/lib/s390x/processor.c
+++ b/tools/testing/selftests/kvm/lib/s390x/processor.c
@@ -5,8 +5,6 @@
* Copyright (C) 2019, Red Hat, Inc.
*/
-#define _GNU_SOURCE /* for program_invocation_name */
-
#include "processor.h"
#include "kvm_util.h"
#include "../kvm_util_internal.h"
@@ -160,26 +158,6 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
virt_dump_region(stream, vm, indent, vm->pgd);
}
-struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
- void *guest_code)
-{
- /*
- * The additional amount of pages required for the page tables is:
- * 1 * n / 256 + 4 * (n / 256) / 2048 + 4 * (n / 256) / 2048^2 + ...
- * which is definitely smaller than (n / 256) * 2.
- */
- uint64_t extra_pg_pages = extra_mem_pages / 256 * 2;
- struct kvm_vm *vm;
-
- vm = vm_create(VM_MODE_DEFAULT,
- DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
-
- kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
- vm_vcpu_add_default(vm, vcpuid, guest_code);
-
- return vm;
-}
-
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
{
size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index d10c5c05bdf0..95e1a757c629 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -5,8 +5,6 @@
* Copyright (C) 2018, Google LLC.
*/
-#define _GNU_SOURCE /* for program_invocation_name */
-
#include "test_util.h"
#include "kvm_util.h"
#include "../kvm_util_internal.h"
@@ -731,36 +729,6 @@ void vcpu_set_cpuid(struct kvm_vm *vm,
}
-struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
- void *guest_code)
-{
- struct kvm_vm *vm;
- /*
- * For x86 the maximum page table size for a memory region
- * will be when only 4K pages are used. In that case the
- * total extra size for page tables (for extra N pages) will
- * be: N/512+N/512^2+N/512^3+... which is definitely smaller
- * than N/512*2.
- */
- uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
-
- /* Create VM */
- vm = vm_create(VM_MODE_DEFAULT,
- DEFAULT_GUEST_PHY_PAGES + extra_pg_pages,
- O_RDWR);
-
- /* Setup guest code */
- kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
-
- /* Setup IRQ Chip */
- vm_create_irqchip(vm);
-
- /* Add the first vCPU. */
- vm_vcpu_add_default(vm, vcpuid, guest_code);
-
- return vm;
-}
-
/*
* VCPU Get MSR
*
diff --git a/tools/testing/selftests/kvm/s390x/sync_regs_test.c b/tools/testing/selftests/kvm/s390x/sync_regs_test.c
index 5731ccf34917..caf7b8859a94 100644
--- a/tools/testing/selftests/kvm/s390x/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/s390x/sync_regs_test.c
@@ -20,6 +20,7 @@
#include "test_util.h"
#include "kvm_util.h"
+#include "diag318_test_handler.h"
#define VCPU_ID 5
@@ -70,7 +71,7 @@ static void compare_sregs(struct kvm_sregs *left, struct kvm_sync_regs *right)
#undef REG_COMPARE
-#define TEST_SYNC_FIELDS (KVM_SYNC_GPRS|KVM_SYNC_ACRS|KVM_SYNC_CRS)
+#define TEST_SYNC_FIELDS (KVM_SYNC_GPRS|KVM_SYNC_ACRS|KVM_SYNC_CRS|KVM_SYNC_DIAG318)
#define INVALID_SYNC_FIELD 0x80000000
int main(int argc, char *argv[])
@@ -152,6 +153,12 @@ int main(int argc, char *argv[])
run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = KVM_SYNC_GPRS | KVM_SYNC_ACRS;
+
+ if (get_diag318_info() > 0) {
+ run->s.regs.diag318 = get_diag318_info();
+ run->kvm_dirty_regs |= KVM_SYNC_DIAG318;
+ }
+
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
@@ -164,6 +171,9 @@ int main(int argc, char *argv[])
TEST_ASSERT(run->s.regs.acrs[0] == 1 << 11,
"acr0 sync regs value incorrect 0x%x.",
run->s.regs.acrs[0]);
+ TEST_ASSERT(run->s.regs.diag318 == get_diag318_info(),
+ "diag318 sync regs value incorrect 0x%llx.",
+ run->s.regs.diag318);
vcpu_regs_get(vm, VCPU_ID, &regs);
compare_regs(&regs, &run->s.regs);
@@ -177,6 +187,7 @@ int main(int argc, char *argv[])
run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = 0;
run->s.regs.gprs[11] = 0xDEADBEEF;
+ run->s.regs.diag318 = 0x4B1D;
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
@@ -186,6 +197,9 @@ int main(int argc, char *argv[])
TEST_ASSERT(run->s.regs.gprs[11] != 0xDEADBEEF,
"r11 sync regs value incorrect 0x%llx.",
run->s.regs.gprs[11]);
+ TEST_ASSERT(run->s.regs.diag318 != 0x4B1D,
+ "diag318 sync regs value incorrect 0x%llx.",
+ run->s.regs.diag318);
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index 6f441dd9f33c..f127ed31dba7 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -121,8 +121,6 @@ static struct kvm_vm *spawn_vm(pthread_t *vcpu_thread, void *guest_code)
vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
MEM_REGION_GPA, MEM_REGION_SLOT,
MEM_REGION_SIZE / getpagesize(), 0);
diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
index 140e91901582..f40fd097cb35 100644
--- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
@@ -81,7 +81,6 @@ int main(int argc, char *argv[])
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
run = vcpu_state(vm, VCPU_ID);
while (1) {
diff --git a/tools/testing/selftests/kvm/x86_64/debug_regs.c b/tools/testing/selftests/kvm/x86_64/debug_regs.c
index 2fc6b3af81a1..6097a8283377 100644
--- a/tools/testing/selftests/kvm/x86_64/debug_regs.c
+++ b/tools/testing/selftests/kvm/x86_64/debug_regs.c
@@ -85,7 +85,6 @@ int main(void)
}
vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
run = vcpu_state(vm, VCPU_ID);
/* Test software BPs - int3 */
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index 757928199f19..37b8a78f6b74 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -92,8 +92,6 @@ int main(int argc, char *argv[])
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
if (!nested_vmx_supported() ||
!kvm_check_cap(KVM_CAP_NESTED_STATE) ||
!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
index 745b708c2d3b..88a595b7fbdd 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -46,19 +46,19 @@ static bool smt_possible(void)
}
static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries,
- bool evmcs_enabled)
+ bool evmcs_expected)
{
int i;
int nent = 9;
u32 test_val;
- if (evmcs_enabled)
+ if (evmcs_expected)
nent += 1; /* 0x4000000A */
TEST_ASSERT(hv_cpuid_entries->nent == nent,
"KVM_GET_SUPPORTED_HV_CPUID should return %d entries"
" with evmcs=%d (returned %d)",
- nent, evmcs_enabled, hv_cpuid_entries->nent);
+ nent, evmcs_expected, hv_cpuid_entries->nent);
for (i = 0; i < hv_cpuid_entries->nent; i++) {
struct kvm_cpuid_entry2 *entry = &hv_cpuid_entries->entries[i];
@@ -68,7 +68,7 @@ static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries,
"function %x is our of supported range",
entry->function);
- TEST_ASSERT(evmcs_enabled || (entry->function != 0x4000000A),
+ TEST_ASSERT(evmcs_expected || (entry->function != 0x4000000A),
"0x4000000A leaf should not be reported");
TEST_ASSERT(entry->index == 0,
@@ -87,7 +87,7 @@ static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries,
TEST_ASSERT(entry->eax == test_val,
"Wrong max leaf report in 0x40000000.EAX: %x"
" (evmcs=%d)",
- entry->eax, evmcs_enabled
+ entry->eax, evmcs_expected
);
break;
case 0x40000004:
@@ -110,20 +110,23 @@ static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries,
}
-void test_hv_cpuid_e2big(struct kvm_vm *vm)
+void test_hv_cpuid_e2big(struct kvm_vm *vm, bool system)
{
static struct kvm_cpuid2 cpuid = {.nent = 0};
int ret;
- ret = _vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
+ if (!system)
+ ret = _vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
+ else
+ ret = _kvm_ioctl(vm, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
TEST_ASSERT(ret == -1 && errno == E2BIG,
- "KVM_GET_SUPPORTED_HV_CPUID didn't fail with -E2BIG when"
- " it should have: %d %d", ret, errno);
+ "%s KVM_GET_SUPPORTED_HV_CPUID didn't fail with -E2BIG when"
+ " it should have: %d %d", system ? "KVM" : "vCPU", ret, errno);
}
-struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(struct kvm_vm *vm)
+struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(struct kvm_vm *vm, bool system)
{
int nent = 20; /* should be enough */
static struct kvm_cpuid2 *cpuid;
@@ -137,7 +140,10 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(struct kvm_vm *vm)
cpuid->nent = nent;
- vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+ if (!system)
+ vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+ else
+ kvm_ioctl(vm, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
return cpuid;
}
@@ -146,45 +152,50 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(struct kvm_vm *vm)
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
- int rv, stage;
struct kvm_cpuid2 *hv_cpuid_entries;
- bool evmcs_enabled;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
- rv = kvm_check_cap(KVM_CAP_HYPERV_CPUID);
- if (!rv) {
+ if (!kvm_check_cap(KVM_CAP_HYPERV_CPUID)) {
print_skip("KVM_CAP_HYPERV_CPUID not supported");
exit(KSFT_SKIP);
}
- for (stage = 0; stage < 3; stage++) {
- evmcs_enabled = false;
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- switch (stage) {
- case 0:
- test_hv_cpuid_e2big(vm);
- continue;
- case 1:
- break;
- case 2:
- if (!nested_vmx_supported() ||
- !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
- print_skip("Enlightened VMCS is unsupported");
- continue;
- }
- vcpu_enable_evmcs(vm, VCPU_ID);
- evmcs_enabled = true;
- break;
- }
+ /* Test vCPU ioctl version */
+ test_hv_cpuid_e2big(vm, false);
+
+ hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm, false);
+ test_hv_cpuid(hv_cpuid_entries, false);
+ free(hv_cpuid_entries);
- hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
- test_hv_cpuid(hv_cpuid_entries, evmcs_enabled);
- free(hv_cpuid_entries);
- kvm_vm_free(vm);
+ if (!nested_vmx_supported() ||
+ !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
+ print_skip("Enlightened VMCS is unsupported");
+ goto do_sys;
}
+ vcpu_enable_evmcs(vm, VCPU_ID);
+ hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm, false);
+ test_hv_cpuid(hv_cpuid_entries, true);
+ free(hv_cpuid_entries);
+
+do_sys:
+ /* Test system ioctl version */
+ if (!kvm_check_cap(KVM_CAP_SYS_HYPERV_CPUID)) {
+ print_skip("KVM_CAP_SYS_HYPERV_CPUID not supported");
+ goto out;
+ }
+
+ test_hv_cpuid_e2big(vm, true);
+
+ hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm, true);
+ test_hv_cpuid(hv_cpuid_entries, nested_vmx_supported());
+ free(hv_cpuid_entries);
+
+out:
+ kvm_vm_free(vm);
return 0;
}
diff --git a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
index b10a27485bad..732b244d6956 100644
--- a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
+++ b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
@@ -211,8 +211,8 @@ int main(void)
struct kvm_vm *vm;
if (!kvm_check_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)) {
- pr_info("will skip kvm paravirt restriction tests.\n");
- return 0;
+ print_skip("KVM_CAP_ENFORCE_PV_FEATURE_CPUID not supported");
+ exit(KSFT_SKIP);
}
vm = vm_create_default(VCPU_ID, 0, guest_main);
diff --git a/tools/testing/selftests/kvm/x86_64/set_sregs_test.c b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
index 9f7656184f31..318be0bf77ab 100644
--- a/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
@@ -24,16 +24,106 @@
#define VCPU_ID 5
+static void test_cr4_feature_bit(struct kvm_vm *vm, struct kvm_sregs *orig,
+ uint64_t feature_bit)
+{
+ struct kvm_sregs sregs;
+ int rc;
+
+ /* Skip the sub-test, the feature is supported. */
+ if (orig->cr4 & feature_bit)
+ return;
+
+ memcpy(&sregs, orig, sizeof(sregs));
+ sregs.cr4 |= feature_bit;
+
+ rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs);
+ TEST_ASSERT(rc, "KVM allowed unsupported CR4 bit (0x%lx)", feature_bit);
+
+ /* Sanity check that KVM didn't change anything. */
+ vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ TEST_ASSERT(!memcmp(&sregs, orig, sizeof(sregs)), "KVM modified sregs");
+}
+
+static uint64_t calc_cr4_feature_bits(struct kvm_vm *vm)
+{
+ struct kvm_cpuid_entry2 *cpuid_1, *cpuid_7;
+ uint64_t cr4;
+
+ cpuid_1 = kvm_get_supported_cpuid_entry(1);
+ cpuid_7 = kvm_get_supported_cpuid_entry(7);
+
+ cr4 = X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE |
+ X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE | X86_CR4_PGE |
+ X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT;
+ if (cpuid_7->ecx & CPUID_UMIP)
+ cr4 |= X86_CR4_UMIP;
+ if (cpuid_7->ecx & CPUID_LA57)
+ cr4 |= X86_CR4_LA57;
+ if (cpuid_1->ecx & CPUID_VMX)
+ cr4 |= X86_CR4_VMXE;
+ if (cpuid_1->ecx & CPUID_SMX)
+ cr4 |= X86_CR4_SMXE;
+ if (cpuid_7->ebx & CPUID_FSGSBASE)
+ cr4 |= X86_CR4_FSGSBASE;
+ if (cpuid_1->ecx & CPUID_PCID)
+ cr4 |= X86_CR4_PCIDE;
+ if (cpuid_1->ecx & CPUID_XSAVE)
+ cr4 |= X86_CR4_OSXSAVE;
+ if (cpuid_7->ebx & CPUID_SMEP)
+ cr4 |= X86_CR4_SMEP;
+ if (cpuid_7->ebx & CPUID_SMAP)
+ cr4 |= X86_CR4_SMAP;
+ if (cpuid_7->ecx & CPUID_PKU)
+ cr4 |= X86_CR4_PKE;
+
+ return cr4;
+}
+
int main(int argc, char *argv[])
{
struct kvm_sregs sregs;
struct kvm_vm *vm;
+ uint64_t cr4;
int rc;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
- /* Create VM */
+ /*
+ * Create a dummy VM, specifically to avoid doing KVM_SET_CPUID2, and
+ * use it to verify all supported CR4 bits can be set prior to defining
+ * the vCPU model, i.e. without doing KVM_SET_CPUID2.
+ */
+ vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+ vm_vcpu_add(vm, VCPU_ID);
+
+ vcpu_sregs_get(vm, VCPU_ID, &sregs);
+
+ sregs.cr4 |= calc_cr4_feature_bits(vm);
+ cr4 = sregs.cr4;
+
+ rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs);
+ TEST_ASSERT(!rc, "Failed to set supported CR4 bits (0x%lx)", cr4);
+
+ vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ TEST_ASSERT(sregs.cr4 == cr4, "sregs.CR4 (0x%llx) != CR4 (0x%lx)",
+ sregs.cr4, cr4);
+
+ /* Verify all unsupported features are rejected by KVM. */
+ test_cr4_feature_bit(vm, &sregs, X86_CR4_UMIP);
+ test_cr4_feature_bit(vm, &sregs, X86_CR4_LA57);
+ test_cr4_feature_bit(vm, &sregs, X86_CR4_VMXE);
+ test_cr4_feature_bit(vm, &sregs, X86_CR4_SMXE);
+ test_cr4_feature_bit(vm, &sregs, X86_CR4_FSGSBASE);
+ test_cr4_feature_bit(vm, &sregs, X86_CR4_PCIDE);
+ test_cr4_feature_bit(vm, &sregs, X86_CR4_OSXSAVE);
+ test_cr4_feature_bit(vm, &sregs, X86_CR4_SMEP);
+ test_cr4_feature_bit(vm, &sregs, X86_CR4_SMAP);
+ test_cr4_feature_bit(vm, &sregs, X86_CR4_PKE);
+ kvm_vm_free(vm);
+
+ /* Create a "real" VM and verify APIC_BASE can be set. */
vm = vm_create_default(VCPU_ID, 0, NULL);
vcpu_sregs_get(vm, VCPU_ID, &sregs);
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
index ae39a220609f..613c42c5a9b8 100644
--- a/tools/testing/selftests/kvm/x86_64/smm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
@@ -102,8 +102,6 @@ int main(int argc, char *argv[])
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
run = vcpu_state(vm, VCPU_ID);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index f6c8b9042f8a..32854c1462ad 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -165,7 +165,6 @@ int main(int argc, char *argv[])
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
run = vcpu_state(vm, VCPU_ID);
vcpu_regs_get(vm, VCPU_ID, &regs1);
diff --git a/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
index 0e1adb4e3199..be2ca157485b 100644
--- a/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
@@ -44,7 +44,6 @@ int main(int argc, char *argv[])
nested_svm_check_supported();
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
vcpu_alloc_svm(vm, &svm_gva);
vcpu_args_set(vm, VCPU_ID, 1, svm_gva);
diff --git a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c
index f8e761149daa..e357d8e222d4 100644
--- a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c
@@ -107,7 +107,6 @@ int main(void)
uint64_t val;
vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
val = 0;
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
diff --git a/tools/testing/selftests/kvm/x86_64/user_msr_test.c b/tools/testing/selftests/kvm/x86_64/user_msr_test.c
deleted file mode 100644
index cbe1b08890ff..000000000000
--- a/tools/testing/selftests/kvm/x86_64/user_msr_test.c
+++ /dev/null
@@ -1,248 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * tests for KVM_CAP_X86_USER_SPACE_MSR and KVM_X86_SET_MSR_FILTER
- *
- * Copyright (C) 2020, Amazon Inc.
- *
- * This is a functional test to verify that we can deflect MSR events
- * into user space.
- */
-#define _GNU_SOURCE /* for program_invocation_short_name */
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include "test_util.h"
-
-#include "kvm_util.h"
-#include "processor.h"
-
-#define VCPU_ID 5
-
-static u32 msr_reads, msr_writes;
-
-static u8 bitmap_00000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
-static u8 bitmap_00000000_write[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
-static u8 bitmap_40000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
-static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
-static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
-static u8 bitmap_deadbeef[1] = { 0x1 };
-
-static void deny_msr(uint8_t *bitmap, u32 msr)
-{
- u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1);
-
- bitmap[idx / 8] &= ~(1 << (idx % 8));
-}
-
-static void prepare_bitmaps(void)
-{
- memset(bitmap_00000000, 0xff, sizeof(bitmap_00000000));
- memset(bitmap_00000000_write, 0xff, sizeof(bitmap_00000000_write));
- memset(bitmap_40000000, 0xff, sizeof(bitmap_40000000));
- memset(bitmap_c0000000, 0xff, sizeof(bitmap_c0000000));
- memset(bitmap_c0000000_read, 0xff, sizeof(bitmap_c0000000_read));
-
- deny_msr(bitmap_00000000_write, MSR_IA32_POWER_CTL);
- deny_msr(bitmap_c0000000_read, MSR_SYSCALL_MASK);
- deny_msr(bitmap_c0000000_read, MSR_GS_BASE);
-}
-
-struct kvm_msr_filter filter = {
- .flags = KVM_MSR_FILTER_DEFAULT_DENY,
- .ranges = {
- {
- .flags = KVM_MSR_FILTER_READ,
- .base = 0x00000000,
- .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
- .bitmap = bitmap_00000000,
- }, {
- .flags = KVM_MSR_FILTER_WRITE,
- .base = 0x00000000,
- .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
- .bitmap = bitmap_00000000_write,
- }, {
- .flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE,
- .base = 0x40000000,
- .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
- .bitmap = bitmap_40000000,
- }, {
- .flags = KVM_MSR_FILTER_READ,
- .base = 0xc0000000,
- .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
- .bitmap = bitmap_c0000000_read,
- }, {
- .flags = KVM_MSR_FILTER_WRITE,
- .base = 0xc0000000,
- .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
- .bitmap = bitmap_c0000000,
- }, {
- .flags = KVM_MSR_FILTER_WRITE | KVM_MSR_FILTER_READ,
- .base = 0xdeadbeef,
- .nmsrs = 1,
- .bitmap = bitmap_deadbeef,
- },
- },
-};
-
-struct kvm_msr_filter no_filter = {
- .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
-};
-
-static void guest_msr_calls(bool trapped)
-{
- /* This goes into the in-kernel emulation */
- wrmsr(MSR_SYSCALL_MASK, 0);
-
- if (trapped) {
- /* This goes into user space emulation */
- GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) == MSR_SYSCALL_MASK);
- GUEST_ASSERT(rdmsr(MSR_GS_BASE) == MSR_GS_BASE);
- } else {
- GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) != MSR_SYSCALL_MASK);
- GUEST_ASSERT(rdmsr(MSR_GS_BASE) != MSR_GS_BASE);
- }
-
- /* If trapped == true, this goes into user space emulation */
- wrmsr(MSR_IA32_POWER_CTL, 0x1234);
-
- /* This goes into the in-kernel emulation */
- rdmsr(MSR_IA32_POWER_CTL);
-
- /* Invalid MSR, should always be handled by user space exit */
- GUEST_ASSERT(rdmsr(0xdeadbeef) == 0xdeadbeef);
- wrmsr(0xdeadbeef, 0x1234);
-}
-
-static void guest_code(void)
-{
- guest_msr_calls(true);
-
- /*
- * Disable msr filtering, so that the kernel
- * handles everything in the next round
- */
- GUEST_SYNC(0);
-
- guest_msr_calls(false);
-
- GUEST_DONE();
-}
-
-static int handle_ucall(struct kvm_vm *vm)
-{
- struct ucall uc;
-
- switch (get_ucall(vm, VCPU_ID, &uc)) {
- case UCALL_ABORT:
- TEST_FAIL("Guest assertion not met");
- break;
- case UCALL_SYNC:
- vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &no_filter);
- break;
- case UCALL_DONE:
- return 1;
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- }
-
- return 0;
-}
-
-static void handle_rdmsr(struct kvm_run *run)
-{
- run->msr.data = run->msr.index;
- msr_reads++;
-
- if (run->msr.index == MSR_SYSCALL_MASK ||
- run->msr.index == MSR_GS_BASE) {
- TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
- "MSR read trap w/o access fault");
- }
-
- if (run->msr.index == 0xdeadbeef) {
- TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
- "MSR deadbeef read trap w/o inval fault");
- }
-}
-
-static void handle_wrmsr(struct kvm_run *run)
-{
- /* ignore */
- msr_writes++;
-
- if (run->msr.index == MSR_IA32_POWER_CTL) {
- TEST_ASSERT(run->msr.data == 0x1234,
- "MSR data for MSR_IA32_POWER_CTL incorrect");
- TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
- "MSR_IA32_POWER_CTL trap w/o access fault");
- }
-
- if (run->msr.index == 0xdeadbeef) {
- TEST_ASSERT(run->msr.data == 0x1234,
- "MSR data for deadbeef incorrect");
- TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
- "deadbeef trap w/o inval fault");
- }
-}
-
-int main(int argc, char *argv[])
-{
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_X86_USER_SPACE_MSR,
- .args[0] = KVM_MSR_EXIT_REASON_INVAL |
- KVM_MSR_EXIT_REASON_UNKNOWN |
- KVM_MSR_EXIT_REASON_FILTER,
- };
- struct kvm_vm *vm;
- struct kvm_run *run;
- int rc;
-
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- run = vcpu_state(vm, VCPU_ID);
-
- rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
- TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
- vm_enable_cap(vm, &cap);
-
- rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
- TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
-
- prepare_bitmaps();
- vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter);
-
- while (1) {
- rc = _vcpu_run(vm, VCPU_ID);
-
- TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
-
- switch (run->exit_reason) {
- case KVM_EXIT_X86_RDMSR:
- handle_rdmsr(run);
- break;
- case KVM_EXIT_X86_WRMSR:
- handle_wrmsr(run);
- break;
- case KVM_EXIT_IO:
- if (handle_ucall(vm))
- goto done;
- break;
- }
-
- }
-
-done:
- TEST_ASSERT(msr_reads == 4, "Handled 4 rdmsr in user space");
- TEST_ASSERT(msr_writes == 3, "Handled 3 wrmsr in user space");
-
- kvm_vm_free(vm);
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
new file mode 100644
index 000000000000..72c0d0797522
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Google LLC.
+ *
+ * Tests for exiting into userspace on registered MSRs
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "vmx.h"
+
+/* Forced emulation prefix, used to invoke the emulator unconditionally. */
+#define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
+#define KVM_FEP_LENGTH 5
+static int fep_available = 1;
+
+#define VCPU_ID 1
+#define MSR_NON_EXISTENT 0x474f4f00
+
+static u64 deny_bits = 0;
+struct kvm_msr_filter filter_allow = {
+ .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
+ .ranges = {
+ {
+ .flags = KVM_MSR_FILTER_READ |
+ KVM_MSR_FILTER_WRITE,
+ .nmsrs = 1,
+ /* Test an MSR the kernel knows about. */
+ .base = MSR_IA32_XSS,
+ .bitmap = (uint8_t*)&deny_bits,
+ }, {
+ .flags = KVM_MSR_FILTER_READ |
+ KVM_MSR_FILTER_WRITE,
+ .nmsrs = 1,
+ /* Test an MSR the kernel doesn't know about. */
+ .base = MSR_IA32_FLUSH_CMD,
+ .bitmap = (uint8_t*)&deny_bits,
+ }, {
+ .flags = KVM_MSR_FILTER_READ |
+ KVM_MSR_FILTER_WRITE,
+ .nmsrs = 1,
+ /* Test a fabricated MSR that no one knows about. */
+ .base = MSR_NON_EXISTENT,
+ .bitmap = (uint8_t*)&deny_bits,
+ },
+ },
+};
+
+struct kvm_msr_filter filter_fs = {
+ .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
+ .ranges = {
+ {
+ .flags = KVM_MSR_FILTER_READ,
+ .nmsrs = 1,
+ .base = MSR_FS_BASE,
+ .bitmap = (uint8_t*)&deny_bits,
+ },
+ },
+};
+
+struct kvm_msr_filter filter_gs = {
+ .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
+ .ranges = {
+ {
+ .flags = KVM_MSR_FILTER_READ,
+ .nmsrs = 1,
+ .base = MSR_GS_BASE,
+ .bitmap = (uint8_t*)&deny_bits,
+ },
+ },
+};
+
+static uint64_t msr_non_existent_data;
+static int guest_exception_count;
+static u32 msr_reads, msr_writes;
+
+static u8 bitmap_00000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
+static u8 bitmap_00000000_write[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
+static u8 bitmap_40000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
+static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
+static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
+static u8 bitmap_deadbeef[1] = { 0x1 };
+
+static void deny_msr(uint8_t *bitmap, u32 msr)
+{
+ u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1);
+
+ bitmap[idx / 8] &= ~(1 << (idx % 8));
+}
+
+static void prepare_bitmaps(void)
+{
+ memset(bitmap_00000000, 0xff, sizeof(bitmap_00000000));
+ memset(bitmap_00000000_write, 0xff, sizeof(bitmap_00000000_write));
+ memset(bitmap_40000000, 0xff, sizeof(bitmap_40000000));
+ memset(bitmap_c0000000, 0xff, sizeof(bitmap_c0000000));
+ memset(bitmap_c0000000_read, 0xff, sizeof(bitmap_c0000000_read));
+
+ deny_msr(bitmap_00000000_write, MSR_IA32_POWER_CTL);
+ deny_msr(bitmap_c0000000_read, MSR_SYSCALL_MASK);
+ deny_msr(bitmap_c0000000_read, MSR_GS_BASE);
+}
+
+struct kvm_msr_filter filter_deny = {
+ .flags = KVM_MSR_FILTER_DEFAULT_DENY,
+ .ranges = {
+ {
+ .flags = KVM_MSR_FILTER_READ,
+ .base = 0x00000000,
+ .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
+ .bitmap = bitmap_00000000,
+ }, {
+ .flags = KVM_MSR_FILTER_WRITE,
+ .base = 0x00000000,
+ .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
+ .bitmap = bitmap_00000000_write,
+ }, {
+ .flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE,
+ .base = 0x40000000,
+ .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
+ .bitmap = bitmap_40000000,
+ }, {
+ .flags = KVM_MSR_FILTER_READ,
+ .base = 0xc0000000,
+ .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
+ .bitmap = bitmap_c0000000_read,
+ }, {
+ .flags = KVM_MSR_FILTER_WRITE,
+ .base = 0xc0000000,
+ .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
+ .bitmap = bitmap_c0000000,
+ }, {
+ .flags = KVM_MSR_FILTER_WRITE | KVM_MSR_FILTER_READ,
+ .base = 0xdeadbeef,
+ .nmsrs = 1,
+ .bitmap = bitmap_deadbeef,
+ },
+ },
+};
+
+struct kvm_msr_filter no_filter_deny = {
+ .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
+};
+
+/*
+ * Note: Force test_rdmsr() to not be inlined to prevent the labels,
+ * rdmsr_start and rdmsr_end, from being defined multiple times.
+ */
+static noinline uint64_t test_rdmsr(uint32_t msr)
+{
+ uint32_t a, d;
+
+ guest_exception_count = 0;
+
+ __asm__ __volatile__("rdmsr_start: rdmsr; rdmsr_end:" :
+ "=a"(a), "=d"(d) : "c"(msr) : "memory");
+
+ return a | ((uint64_t) d << 32);
+}
+
+/*
+ * Note: Force test_wrmsr() to not be inlined to prevent the labels,
+ * wrmsr_start and wrmsr_end, from being defined multiple times.
+ */
+static noinline void test_wrmsr(uint32_t msr, uint64_t value)
+{
+ uint32_t a = value;
+ uint32_t d = value >> 32;
+
+ guest_exception_count = 0;
+
+ __asm__ __volatile__("wrmsr_start: wrmsr; wrmsr_end:" ::
+ "a"(a), "d"(d), "c"(msr) : "memory");
+}
+
+extern char rdmsr_start, rdmsr_end;
+extern char wrmsr_start, wrmsr_end;
+
+/*
+ * Note: Force test_em_rdmsr() to not be inlined to prevent the labels,
+ * rdmsr_start and rdmsr_end, from being defined multiple times.
+ */
+static noinline uint64_t test_em_rdmsr(uint32_t msr)
+{
+ uint32_t a, d;
+
+ guest_exception_count = 0;
+
+ __asm__ __volatile__(KVM_FEP "em_rdmsr_start: rdmsr; em_rdmsr_end:" :
+ "=a"(a), "=d"(d) : "c"(msr) : "memory");
+
+ return a | ((uint64_t) d << 32);
+}
+
+/*
+ * Note: Force test_em_wrmsr() to not be inlined to prevent the labels,
+ * wrmsr_start and wrmsr_end, from being defined multiple times.
+ */
+static noinline void test_em_wrmsr(uint32_t msr, uint64_t value)
+{
+ uint32_t a = value;
+ uint32_t d = value >> 32;
+
+ guest_exception_count = 0;
+
+ __asm__ __volatile__(KVM_FEP "em_wrmsr_start: wrmsr; em_wrmsr_end:" ::
+ "a"(a), "d"(d), "c"(msr) : "memory");
+}
+
+extern char em_rdmsr_start, em_rdmsr_end;
+extern char em_wrmsr_start, em_wrmsr_end;
+
+static void guest_code_filter_allow(void)
+{
+ uint64_t data;
+
+ /*
+ * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_XSS.
+ *
+ * A GP is thrown if anything other than 0 is written to
+ * MSR_IA32_XSS.
+ */
+ data = test_rdmsr(MSR_IA32_XSS);
+ GUEST_ASSERT(data == 0);
+ GUEST_ASSERT(guest_exception_count == 0);
+
+ test_wrmsr(MSR_IA32_XSS, 0);
+ GUEST_ASSERT(guest_exception_count == 0);
+
+ test_wrmsr(MSR_IA32_XSS, 1);
+ GUEST_ASSERT(guest_exception_count == 1);
+
+ /*
+ * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_FLUSH_CMD.
+ *
+ * A GP is thrown if MSR_IA32_FLUSH_CMD is read
+ * from or if a value other than 1 is written to it.
+ */
+ test_rdmsr(MSR_IA32_FLUSH_CMD);
+ GUEST_ASSERT(guest_exception_count == 1);
+
+ test_wrmsr(MSR_IA32_FLUSH_CMD, 0);
+ GUEST_ASSERT(guest_exception_count == 1);
+
+ test_wrmsr(MSR_IA32_FLUSH_CMD, 1);
+ GUEST_ASSERT(guest_exception_count == 0);
+
+ /*
+ * Test userspace intercepting rdmsr / wrmsr for MSR_NON_EXISTENT.
+ *
+ * Test that a fabricated MSR can pass through the kernel
+ * and be handled in userspace.
+ */
+ test_wrmsr(MSR_NON_EXISTENT, 2);
+ GUEST_ASSERT(guest_exception_count == 0);
+
+ data = test_rdmsr(MSR_NON_EXISTENT);
+ GUEST_ASSERT(data == 2);
+ GUEST_ASSERT(guest_exception_count == 0);
+
+ /*
+ * Test to see if the instruction emulator is available (ie: the module
+ * parameter 'kvm.force_emulation_prefix=1' is set). This instruction
+ * will #UD if it isn't available.
+ */
+ __asm__ __volatile__(KVM_FEP "nop");
+
+ if (fep_available) {
+ /* Let userspace know we aren't done. */
+ GUEST_SYNC(0);
+
+ /*
+ * Now run the same tests with the instruction emulator.
+ */
+ data = test_em_rdmsr(MSR_IA32_XSS);
+ GUEST_ASSERT(data == 0);
+ GUEST_ASSERT(guest_exception_count == 0);
+ test_em_wrmsr(MSR_IA32_XSS, 0);
+ GUEST_ASSERT(guest_exception_count == 0);
+ test_em_wrmsr(MSR_IA32_XSS, 1);
+ GUEST_ASSERT(guest_exception_count == 1);
+
+ test_em_rdmsr(MSR_IA32_FLUSH_CMD);
+ GUEST_ASSERT(guest_exception_count == 1);
+ test_em_wrmsr(MSR_IA32_FLUSH_CMD, 0);
+ GUEST_ASSERT(guest_exception_count == 1);
+ test_em_wrmsr(MSR_IA32_FLUSH_CMD, 1);
+ GUEST_ASSERT(guest_exception_count == 0);
+
+ test_em_wrmsr(MSR_NON_EXISTENT, 2);
+ GUEST_ASSERT(guest_exception_count == 0);
+ data = test_em_rdmsr(MSR_NON_EXISTENT);
+ GUEST_ASSERT(data == 2);
+ GUEST_ASSERT(guest_exception_count == 0);
+ }
+
+ GUEST_DONE();
+}
+
+static void guest_msr_calls(bool trapped)
+{
+ /* This goes into the in-kernel emulation */
+ wrmsr(MSR_SYSCALL_MASK, 0);
+
+ if (trapped) {
+ /* This goes into user space emulation */
+ GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) == MSR_SYSCALL_MASK);
+ GUEST_ASSERT(rdmsr(MSR_GS_BASE) == MSR_GS_BASE);
+ } else {
+ GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) != MSR_SYSCALL_MASK);
+ GUEST_ASSERT(rdmsr(MSR_GS_BASE) != MSR_GS_BASE);
+ }
+
+ /* If trapped == true, this goes into user space emulation */
+ wrmsr(MSR_IA32_POWER_CTL, 0x1234);
+
+ /* This goes into the in-kernel emulation */
+ rdmsr(MSR_IA32_POWER_CTL);
+
+ /* Invalid MSR, should always be handled by user space exit */
+ GUEST_ASSERT(rdmsr(0xdeadbeef) == 0xdeadbeef);
+ wrmsr(0xdeadbeef, 0x1234);
+}
+
+static void guest_code_filter_deny(void)
+{
+ guest_msr_calls(true);
+
+ /*
+ * Disable msr filtering, so that the kernel
+ * handles everything in the next round
+ */
+ GUEST_SYNC(0);
+
+ guest_msr_calls(false);
+
+ GUEST_DONE();
+}
+
+static void guest_code_permission_bitmap(void)
+{
+ uint64_t data;
+
+ data = test_rdmsr(MSR_FS_BASE);
+ GUEST_ASSERT(data == MSR_FS_BASE);
+ data = test_rdmsr(MSR_GS_BASE);
+ GUEST_ASSERT(data != MSR_GS_BASE);
+
+ /* Let userspace know to switch the filter */
+ GUEST_SYNC(0);
+
+ data = test_rdmsr(MSR_FS_BASE);
+ GUEST_ASSERT(data != MSR_FS_BASE);
+ data = test_rdmsr(MSR_GS_BASE);
+ GUEST_ASSERT(data == MSR_GS_BASE);
+
+ GUEST_DONE();
+}
+
+static void __guest_gp_handler(struct ex_regs *regs,
+ char *r_start, char *r_end,
+ char *w_start, char *w_end)
+{
+ if (regs->rip == (uintptr_t)r_start) {
+ regs->rip = (uintptr_t)r_end;
+ regs->rax = 0;
+ regs->rdx = 0;
+ } else if (regs->rip == (uintptr_t)w_start) {
+ regs->rip = (uintptr_t)w_end;
+ } else {
+ GUEST_ASSERT(!"RIP is at an unknown location!");
+ }
+
+ ++guest_exception_count;
+}
+
+static void guest_gp_handler(struct ex_regs *regs)
+{
+ __guest_gp_handler(regs, &rdmsr_start, &rdmsr_end,
+ &wrmsr_start, &wrmsr_end);
+}
+
+static void guest_fep_gp_handler(struct ex_regs *regs)
+{
+ __guest_gp_handler(regs, &em_rdmsr_start, &em_rdmsr_end,
+ &em_wrmsr_start, &em_wrmsr_end);
+}
+
+static void guest_ud_handler(struct ex_regs *regs)
+{
+ fep_available = 0;
+ regs->rip += KVM_FEP_LENGTH;
+}
+
+static void run_guest(struct kvm_vm *vm)
+{
+ int rc;
+
+ rc = _vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
+}
+
+static void check_for_guest_assert(struct kvm_vm *vm)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc;
+
+ if (run->exit_reason == KVM_EXIT_IO &&
+ get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) {
+ TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
+ __FILE__, uc.args[1]);
+ }
+}
+
+static void process_rdmsr(struct kvm_vm *vm, uint32_t msr_index)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+
+ check_for_guest_assert(vm);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_RDMSR,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+ TEST_ASSERT(run->msr.index == msr_index,
+ "Unexpected msr (0x%04x), expected 0x%04x",
+ run->msr.index, msr_index);
+
+ switch (run->msr.index) {
+ case MSR_IA32_XSS:
+ run->msr.data = 0;
+ break;
+ case MSR_IA32_FLUSH_CMD:
+ run->msr.error = 1;
+ break;
+ case MSR_NON_EXISTENT:
+ run->msr.data = msr_non_existent_data;
+ break;
+ case MSR_FS_BASE:
+ run->msr.data = MSR_FS_BASE;
+ break;
+ case MSR_GS_BASE:
+ run->msr.data = MSR_GS_BASE;
+ break;
+ default:
+ TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
+ }
+}
+
+static void process_wrmsr(struct kvm_vm *vm, uint32_t msr_index)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+
+ check_for_guest_assert(vm);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_WRMSR,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+ TEST_ASSERT(run->msr.index == msr_index,
+ "Unexpected msr (0x%04x), expected 0x%04x",
+ run->msr.index, msr_index);
+
+ switch (run->msr.index) {
+ case MSR_IA32_XSS:
+ if (run->msr.data != 0)
+ run->msr.error = 1;
+ break;
+ case MSR_IA32_FLUSH_CMD:
+ if (run->msr.data != 1)
+ run->msr.error = 1;
+ break;
+ case MSR_NON_EXISTENT:
+ msr_non_existent_data = run->msr.data;
+ break;
+ default:
+ TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
+ }
+}
+
+static void process_ucall_done(struct kvm_vm *vm)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc;
+
+ check_for_guest_assert(vm);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s)",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ TEST_ASSERT(get_ucall(vm, VCPU_ID, &uc) == UCALL_DONE,
+ "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
+ uc.cmd, UCALL_DONE);
+}
+
+static uint64_t process_ucall(struct kvm_vm *vm)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc = {};
+
+ check_for_guest_assert(vm);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s)",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_SYNC:
+ break;
+ case UCALL_ABORT:
+ check_for_guest_assert(vm);
+ break;
+ case UCALL_DONE:
+ process_ucall_done(vm);
+ break;
+ default:
+ TEST_ASSERT(false, "Unexpected ucall");
+ }
+
+ return uc.cmd;
+}
+
+static void run_guest_then_process_rdmsr(struct kvm_vm *vm, uint32_t msr_index)
+{
+ run_guest(vm);
+ process_rdmsr(vm, msr_index);
+}
+
+static void run_guest_then_process_wrmsr(struct kvm_vm *vm, uint32_t msr_index)
+{
+ run_guest(vm);
+ process_wrmsr(vm, msr_index);
+}
+
+static uint64_t run_guest_then_process_ucall(struct kvm_vm *vm)
+{
+ run_guest(vm);
+ return process_ucall(vm);
+}
+
+static void run_guest_then_process_ucall_done(struct kvm_vm *vm)
+{
+ run_guest(vm);
+ process_ucall_done(vm);
+}
+
+static void test_msr_filter_allow(void) {
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_X86_USER_SPACE_MSR,
+ .args[0] = KVM_MSR_EXIT_REASON_FILTER,
+ };
+ struct kvm_vm *vm;
+ int rc;
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, guest_code_filter_allow);
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
+ TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
+ vm_enable_cap(vm, &cap);
+
+ rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
+ TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
+
+ vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow);
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vm, VCPU_ID);
+
+ vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
+
+ /* Process guest code userspace exits. */
+ run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
+ run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
+ run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
+
+ run_guest_then_process_rdmsr(vm, MSR_IA32_FLUSH_CMD);
+ run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
+ run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
+
+ run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT);
+ run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT);
+
+ vm_handle_exception(vm, UD_VECTOR, guest_ud_handler);
+ run_guest(vm);
+ vm_handle_exception(vm, UD_VECTOR, NULL);
+
+ if (process_ucall(vm) != UCALL_DONE) {
+ vm_handle_exception(vm, GP_VECTOR, guest_fep_gp_handler);
+
+ /* Process emulated rdmsr and wrmsr instructions. */
+ run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
+ run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
+ run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
+
+ run_guest_then_process_rdmsr(vm, MSR_IA32_FLUSH_CMD);
+ run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
+ run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
+
+ run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT);
+ run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT);
+
+ /* Confirm the guest completed without issues. */
+ run_guest_then_process_ucall_done(vm);
+ } else {
+ printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n");
+ }
+
+ kvm_vm_free(vm);
+}
+
+static int handle_ucall(struct kvm_vm *vm)
+{
+ struct ucall uc;
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_FAIL("Guest assertion not met");
+ break;
+ case UCALL_SYNC:
+ vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &no_filter_deny);
+ break;
+ case UCALL_DONE:
+ return 1;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+
+ return 0;
+}
+
+static void handle_rdmsr(struct kvm_run *run)
+{
+ run->msr.data = run->msr.index;
+ msr_reads++;
+
+ if (run->msr.index == MSR_SYSCALL_MASK ||
+ run->msr.index == MSR_GS_BASE) {
+ TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
+ "MSR read trap w/o access fault");
+ }
+
+ if (run->msr.index == 0xdeadbeef) {
+ TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
+ "MSR deadbeef read trap w/o inval fault");
+ }
+}
+
+static void handle_wrmsr(struct kvm_run *run)
+{
+ /* ignore */
+ msr_writes++;
+
+ if (run->msr.index == MSR_IA32_POWER_CTL) {
+ TEST_ASSERT(run->msr.data == 0x1234,
+ "MSR data for MSR_IA32_POWER_CTL incorrect");
+ TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
+ "MSR_IA32_POWER_CTL trap w/o access fault");
+ }
+
+ if (run->msr.index == 0xdeadbeef) {
+ TEST_ASSERT(run->msr.data == 0x1234,
+ "MSR data for deadbeef incorrect");
+ TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
+ "deadbeef trap w/o inval fault");
+ }
+}
+
+static void test_msr_filter_deny(void) {
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_X86_USER_SPACE_MSR,
+ .args[0] = KVM_MSR_EXIT_REASON_INVAL |
+ KVM_MSR_EXIT_REASON_UNKNOWN |
+ KVM_MSR_EXIT_REASON_FILTER,
+ };
+ struct kvm_vm *vm;
+ struct kvm_run *run;
+ int rc;
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, guest_code_filter_deny);
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+ run = vcpu_state(vm, VCPU_ID);
+
+ rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
+ TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
+ vm_enable_cap(vm, &cap);
+
+ rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
+ TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
+
+ prepare_bitmaps();
+ vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny);
+
+ while (1) {
+ rc = _vcpu_run(vm, VCPU_ID);
+
+ TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
+
+ switch (run->exit_reason) {
+ case KVM_EXIT_X86_RDMSR:
+ handle_rdmsr(run);
+ break;
+ case KVM_EXIT_X86_WRMSR:
+ handle_wrmsr(run);
+ break;
+ case KVM_EXIT_IO:
+ if (handle_ucall(vm))
+ goto done;
+ break;
+ }
+
+ }
+
+done:
+ TEST_ASSERT(msr_reads == 4, "Handled 4 rdmsr in user space");
+ TEST_ASSERT(msr_writes == 3, "Handled 3 wrmsr in user space");
+
+ kvm_vm_free(vm);
+}
+
+static void test_msr_permission_bitmap(void) {
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_X86_USER_SPACE_MSR,
+ .args[0] = KVM_MSR_EXIT_REASON_FILTER,
+ };
+ struct kvm_vm *vm;
+ int rc;
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, guest_code_permission_bitmap);
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
+ TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
+ vm_enable_cap(vm, &cap);
+
+ rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
+ TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
+
+ vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_fs);
+ run_guest_then_process_rdmsr(vm, MSR_FS_BASE);
+ TEST_ASSERT(run_guest_then_process_ucall(vm) == UCALL_SYNC, "Expected ucall state to be UCALL_SYNC.");
+ vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs);
+ run_guest_then_process_rdmsr(vm, MSR_GS_BASE);
+ run_guest_then_process_ucall_done(vm);
+
+ kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ test_msr_filter_allow();
+
+ test_msr_filter_deny();
+
+ test_msr_permission_bitmap();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c b/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
index 1f65342d6cb7..d14888b34adb 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
@@ -87,7 +87,6 @@ int main(int argc, char *argv[])
nested_vmx_check_supported();
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
kvm_get_cpu_address_width(&paddr_width, &vaddr_width);
high_gpa = (1ul << paddr_width) - getpagesize();
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
index fe40ade06a49..2835a17f1b7a 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
@@ -57,7 +57,6 @@ int main(int argc, char *argv[])
nested_vmx_check_supported();
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
/* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
index e894a638a155..537de1068554 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
@@ -82,7 +82,6 @@ int main(int argc, char *argv[])
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
run = vcpu_state(vm, VCPU_ID);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
index a7737af1224f..a07480aed397 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
@@ -169,20 +169,19 @@ int main(int argc, char *argv[])
*/
nested_vmx_check_supported();
+ if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
+ print_skip("KVM_CAP_NESTED_STATE not supported");
+ exit(KSFT_SKIP);
+ }
+
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
run = vcpu_state(vm, VCPU_ID);
vcpu_regs_get(vm, VCPU_ID, &regs1);
- if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
- vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
- } else {
- pr_info("will skip vmx preemption timer checks\n");
- goto done;
- }
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
for (stage = 1;; stage++) {
_vcpu_run(vm, VCPU_ID);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
index d59f3eb67c8f..5827b9bae468 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
@@ -244,6 +244,22 @@ void test_vmx_nested_state(struct kvm_vm *vm)
free(state);
}
+void disable_vmx(struct kvm_vm *vm)
+{
+ struct kvm_cpuid2 *cpuid = kvm_get_supported_cpuid();
+ int i;
+
+ for (i = 0; i < cpuid->nent; ++i)
+ if (cpuid->entries[i].function == 1 &&
+ cpuid->entries[i].index == 0)
+ break;
+ TEST_ASSERT(i != cpuid->nent, "CPUID function 1 not found");
+
+ cpuid->entries[i].ecx &= ~CPUID_VMX;
+ vcpu_set_cpuid(vm, VCPU_ID, cpuid);
+ cpuid->entries[i].ecx |= CPUID_VMX;
+}
+
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
@@ -264,6 +280,11 @@ int main(int argc, char *argv[])
vm = vm_create_default(VCPU_ID, 0, 0);
+ /*
+ * First run tests with VMX disabled to check error handling.
+ */
+ disable_vmx(vm);
+
/* Passing a NULL kvm_nested_state causes a EFAULT. */
test_nested_state_expect_efault(vm, NULL);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index fbe8417cbc2c..7e33a350b053 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -132,7 +132,6 @@ int main(int argc, char *argv[])
nested_vmx_check_supported();
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
/* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva);