diff options
Diffstat (limited to 'arch/x86/kvm/svm/svm.c')
-rw-r--r-- | arch/x86/kvm/svm/svm.c | 36 |
1 files changed, 21 insertions, 15 deletions
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 0194336b64a4..c44f3e9140d5 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2938,8 +2938,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) if (npt_enabled) vcpu->arch.cr3 = svm->vmcb->save.cr3; - svm_complete_interrupts(svm); - if (is_guest_mode(vcpu)) { int vmexit; @@ -3504,7 +3502,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) stgi(); /* Any pending NMI will happen here */ - exit_fastpath = svm_exit_handlers_fastpath(vcpu); if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_after_interrupt(&svm->vcpu); @@ -3518,6 +3515,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) } svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; + vmcb_mark_all_clean(svm->vmcb); /* if exit due to PF check for async PF */ if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) @@ -3537,7 +3535,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) SVM_EXIT_EXCP_BASE + MC_VECTOR)) svm_handle_mce(svm); - vmcb_mark_all_clean(svm->vmcb); + svm_complete_interrupts(svm); + exit_fastpath = svm_exit_handlers_fastpath(vcpu); return exit_fastpath; } @@ -3900,21 +3899,28 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) { struct vcpu_svm *svm = to_svm(vcpu); - struct vmcb *nested_vmcb; struct kvm_host_map map; - u64 guest; - u64 vmcb; int ret = 0; - guest = GET_SMSTATE(u64, smstate, 0x7ed8); - vmcb = GET_SMSTATE(u64, smstate, 0x7ee0); + if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { + u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); + u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8); + u64 vmcb = GET_SMSTATE(u64, smstate, 0x7ee0); - if (guest) { - if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL) - return 1; - nested_vmcb = map.hva; - ret = enter_svm_guest_mode(svm, vmcb, nested_vmcb); - kvm_vcpu_unmap(&svm->vcpu, &map, true); + if (guest) { + if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) + return 1; + + if (!(saved_efer & EFER_SVME)) + return 1; + + if (kvm_vcpu_map(&svm->vcpu, + gpa_to_gfn(vmcb), &map) == -EINVAL) + return 1; + + ret = enter_svm_guest_mode(svm, vmcb, map.hva); + kvm_vcpu_unmap(&svm->vcpu, &map, true); + } } return ret; |