diff options
author | Sean Christopherson <sean.j.christopherson@intel.com> | 2019-09-12 19:46:07 -0700 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2019-09-24 14:35:35 +0200 |
commit | fbb158cb88b6e4f8279707a1842ee0332f64b1a3 (patch) | |
tree | b37438e6dc9823c893a6b77416a3ef33b4abe920 /arch | |
parent | 14a3c4f498edac65ce2ff1f05d849cd72e886d78 (diff) |
KVM: x86/mmu: Revert "Revert "KVM: MMU: zap pages in batch""
Now that the fast invalidate mechanism has been reintroduced, restore
the performance tweaks for fast invalidation that existed prior to its
removal.
Paraphrashing the original changelog:
Zap at least 10 shadow pages before releasing mmu_lock to reduce the
overhead associated with re-acquiring the lock.
Note: "10" is an arbitrary number, speculated to be high enough so
that a vCPU isn't stuck zapping obsolete pages for an extended period,
but small enough so that other vCPUs aren't starved waiting for
mmu_lock.
This reverts commit 43d2b14b105fb00b8864c7b0ee7043cc1cc4a969.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/mmu.c | 35 |
1 files changed, 9 insertions, 26 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2ab0ac88d86e..3acc4b046d47 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5671,12 +5671,12 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) return ret; } - +#define BATCH_ZAP_PAGES 10 static void kvm_zap_obsolete_pages(struct kvm *kvm) { struct kvm_mmu_page *sp, *node; LIST_HEAD(invalid_list); - int ign; + int nr_zapped, batch = 0; restart: list_for_each_entry_safe_reverse(sp, node, @@ -5689,28 +5689,6 @@ restart: break; /* - * Do not repeatedly zap a root page to avoid unnecessary - * KVM_REQ_MMU_RELOAD, otherwise we may not be able to - * progress: - * vcpu 0 vcpu 1 - * call vcpu_enter_guest(): - * 1): handle KVM_REQ_MMU_RELOAD - * and require mmu-lock to - * load mmu - * repeat: - * 1): zap root page and - * send KVM_REQ_MMU_RELOAD - * - * 2): if (cond_resched_lock(mmu-lock)) - * - * 2): hold mmu-lock and load mmu - * - * 3): see KVM_REQ_MMU_RELOAD bit - * on vcpu->requests is set - * then return 1 to call - * vcpu_enter_guest() again. - * goto repeat; - * * Since we are reversely walking the list and the invalid * list will be moved to the head, skip the invalid page * can help us to avoid the infinity list walking. @@ -5718,14 +5696,19 @@ restart: if (sp->role.invalid) continue; - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (batch >= BATCH_ZAP_PAGES && + (need_resched() || spin_needbreak(&kvm->mmu_lock))) { + batch = 0; kvm_mmu_commit_zap_page(kvm, &invalid_list); cond_resched_lock(&kvm->mmu_lock); goto restart; } - if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) + if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, + &nr_zapped)) { + batch += nr_zapped; goto restart; + } } kvm_mmu_commit_zap_page(kvm, &invalid_list); |