summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-08-30 17:15:35 -0700
committerSean Christopherson <seanjc@google.com>2024-09-09 20:16:34 -0700
commit6b3dcabc10911711eba15816d808e2a18f130406 (patch)
tree66f6e7362bc4006443bb147639ab56b6065b7aca
parent2876624e1adcd9a3a3ffa8c4fe3bf8dbba969d95 (diff)
KVM: x86/mmu: Subsume kvm_mmu_unprotect_page() into the and_retry() version
Fold kvm_mmu_unprotect_page() into kvm_mmu_unprotect_gfn_and_retry() now that all other direct usage is gone. No functional change intended. Link: https://lore.kernel.org/r/20240831001538.336683-21-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/mmu/mmu.c33
2 files changed, 13 insertions, 21 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cdee59f3d15b..8f4164f58b6c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -2132,7 +2132,6 @@ int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
void kvm_update_dr7(struct kvm_vcpu *vcpu);
-int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
bool always_retry);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 4b4edaf7dc06..29305403f956 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2695,27 +2695,12 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
write_unlock(&kvm->mmu_lock);
}
-int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
-{
- struct kvm_mmu_page *sp;
- LIST_HEAD(invalid_list);
- int r;
-
- r = 0;
- write_lock(&kvm->mmu_lock);
- for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
- r = 1;
- kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
- }
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
- write_unlock(&kvm->mmu_lock);
-
- return r;
-}
-
bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
bool always_retry)
{
+ struct kvm *kvm = vcpu->kvm;
+ LIST_HEAD(invalid_list);
+ struct kvm_mmu_page *sp;
gpa_t gpa = cr2_or_gpa;
bool r = false;
@@ -2727,7 +2712,7 @@ bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
* positive is benign, and a false negative will simply result in KVM
* skipping the unprotect+retry path, which is also an optimization.
*/
- if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
+ if (!READ_ONCE(kvm->arch.indirect_shadow_pages))
goto out;
if (!vcpu->arch.mmu->root_role.direct) {
@@ -2736,7 +2721,15 @@ bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
goto out;
}
- r = kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
+ r = false;
+ write_lock(&kvm->mmu_lock);
+ for_each_gfn_valid_sp_with_gptes(kvm, sp, gpa_to_gfn(gpa)) {
+ r = true;
+ kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+ }
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ write_unlock(&kvm->mmu_lock);
+
out:
if (r || always_retry) {
vcpu->arch.last_retry_eip = kvm_rip_read(vcpu);