summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-05-19 16:01:50 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2015-05-28 10:43:33 +0200
commitd9ef13c2b3983de8dd1373ef670799dbb6498122 (patch)
tree8fa5aedbdb534029ca8a69d1b4608720fd90e817 /arch/x86/kvm/mmu.c
parentf36f3f2846b5578d62910ee0b6dbef59fdd1cfa4 (diff)
KVM: pass kvm_memory_slot to gfn_to_page_many_atomic
The memory slot is already available from gfn_to_memslot_dirty_bitmap. Isn't it a shame to look it up again? Plus, it makes gfn_to_page_many_atomic agnostic of multiple VCPU address spaces. Reviewed-by: Radim Krcmar <rkrcmar@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1bf2ae9ca521..6a7e5b6246b1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2728,15 +2728,17 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
u64 *start, u64 *end)
{
struct page *pages[PTE_PREFETCH_NUM];
+ struct kvm_memory_slot *slot;
unsigned access = sp->role.access;
int i, ret;
gfn_t gfn;
gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
- if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK))
+ slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
+ if (!slot)
return -1;
- ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
+ ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
if (ret <= 0)
return -1;