summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/svm/svm.h
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2020-05-13 13:16:12 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2020-06-01 04:26:00 -0400
commite670bf68f4b701506d51f007917ab633894294d0 (patch)
tree6aa7aed4f3b9a201245170a2b3772f7e2951f7c0 /arch/x86/kvm/svm/svm.h
parent7923ef4f6ec4a25a902bd827446eac860b01fd1c (diff)
KVM: nSVM: save all control fields in svm->nested
In preparation for nested SVM save/restore, store all data that matters from the VMCB control area into svm->nested. It will then become part of the nested SVM state that is saved by KVM_SET_NESTED_STATE and restored by KVM_GET_NESTED_STATE, just like the cached vmcs12 for nVMX. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm/svm.h')
-rw-r--r--arch/x86/kvm/svm/svm.h20
1 files changed, 5 insertions, 15 deletions
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 33e3f09d7a8e..dd5418f20256 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -91,22 +91,12 @@ struct nested_state {
/* These are the merged vectors */
u32 *msrpm;
- /* gpa pointers to the real vectors */
- u64 vmcb_msrpm;
- u64 vmcb_iopm;
-
/* A VMRUN has started but has not yet been performed, so
* we cannot inject a nested vmexit yet. */
bool nested_run_pending;
- /* cache for intercepts of the guest */
- u32 intercept_cr;
- u32 intercept_dr;
- u32 intercept_exceptions;
- u64 intercept;
-
- /* Nested Paging related state */
- u64 nested_cr3;
+ /* cache for control fields of the guest */
+ struct vmcb_control_area ctl;
};
struct vcpu_svm {
@@ -381,17 +371,17 @@ static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
{
- return (svm->nested.intercept & (1ULL << INTERCEPT_SMI));
+ return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_SMI));
}
static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
{
- return (svm->nested.intercept & (1ULL << INTERCEPT_INTR));
+ return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INTR));
}
static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
{
- return (svm->nested.intercept & (1ULL << INTERCEPT_NMI));
+ return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_NMI));
}
void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,