From 27592ae8dbe41033261b6fdf27d78998aabd2665 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 16 Nov 2021 16:03:57 +0000 Subject: KVM: Move wiping of the kvm->vcpus array to common code All architectures have similar loops iterating over the vcpus, freeing one vcpu at a time, and eventually wiping the reference off the vcpus array. They are also inconsistently taking the kvm->lock mutex when wiping the references from the array. Make this code common, which will simplify further changes. The locking is dropped altogether, as this should only be called when there is no further references on the kvm structure. Reviewed-by: Claudio Imbrenda Signed-off-by: Marc Zyngier Message-Id: <20211116160403.4074052-2-maz@kernel.org> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 72c4e6b39389..0a504c7988dc 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -435,7 +435,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) vcpu->last_used_slot = 0; } -void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) +static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) { kvm_dirty_ring_free(&vcpu->dirty_ring); kvm_arch_vcpu_destroy(vcpu); @@ -450,7 +450,20 @@ void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) free_page((unsigned long)vcpu->run); kmem_cache_free(kvm_vcpu_cache, vcpu); } -EXPORT_SYMBOL_GPL(kvm_vcpu_destroy); + +void kvm_destroy_vcpus(struct kvm *kvm) +{ + unsigned int i; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) { + kvm_vcpu_destroy(vcpu); + kvm->vcpus[i] = NULL; + } + + atomic_set(&kvm->online_vcpus, 0); +} +EXPORT_SYMBOL_GPL(kvm_destroy_vcpus); #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) -- cgit v1.2.3-58-ga151 From c5b077549136584618a66258f09d8d4b41e7409c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 16 Nov 2021 16:04:01 +0000 Subject: KVM: Convert the kvm->vcpus array to a xarray At least on arm64 and x86, the vcpus array is pretty huge (up to 1024 entries on x86) and is mostly empty in the majority of the cases (running 1k vcpu VMs is not that common). This mean that we end-up with a 4kB block of unused memory in the middle of the kvm structure. Instead of wasting away this memory, let's use an xarray instead, which gives us almost the same flexibility as a normal array, but with a reduced memory usage with smaller VMs. Signed-off-by: Marc Zyngier Message-Id: <20211116160403.4074052-6-maz@kernel.org> Signed-off-by: Paolo Bonzini --- include/linux/kvm_host.h | 5 +++-- virt/kvm/kvm_main.c | 15 +++++++++------ 2 files changed, 12 insertions(+), 8 deletions(-) (limited to 'virt') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index e2f9f8f67c58..2201dc07126a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -552,7 +553,7 @@ struct kvm { struct mutex slots_arch_lock; struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; - struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + struct xarray vcpu_array; /* Used to wait for completion of MMU notifiers. */ spinlock_t mn_invalidate_lock; @@ -701,7 +702,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ smp_rmb(); - return kvm->vcpus[i]; + return xa_load(&kvm->vcpu_array, i); } #define kvm_for_each_vcpu(idx, vcpup, kvm) \ diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0a504c7988dc..594f90307b20 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -458,7 +458,7 @@ void kvm_destroy_vcpus(struct kvm *kvm) kvm_for_each_vcpu(i, vcpu, kvm) { kvm_vcpu_destroy(vcpu); - kvm->vcpus[i] = NULL; + xa_erase(&kvm->vcpu_array, i); } atomic_set(&kvm->online_vcpus, 0); @@ -1063,6 +1063,7 @@ static struct kvm *kvm_create_vm(unsigned long type) mutex_init(&kvm->slots_arch_lock); spin_lock_init(&kvm->mn_invalidate_lock); rcuwait_init(&kvm->mn_memslots_update_rcuwait); + xa_init(&kvm->vcpu_array); INIT_LIST_HEAD(&kvm->devices); @@ -3598,7 +3599,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) } vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); - BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); + r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT); + BUG_ON(r == -EBUSY); + if (r) + goto unlock_vcpu_destroy; /* Fill the stats id string for the vcpu */ snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", @@ -3608,15 +3612,14 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) kvm_get_kvm(kvm); r = create_vcpu_fd(vcpu); if (r < 0) { + xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx); kvm_put_kvm_no_destroy(kvm); goto unlock_vcpu_destroy; } - kvm->vcpus[vcpu->vcpu_idx] = vcpu; - /* - * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus - * before kvm->online_vcpu's incremented value. + * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu + * pointer before kvm->online_vcpu's incremented value. */ smp_wmb(); atomic_inc(&kvm->online_vcpus); -- cgit v1.2.3-58-ga151 From 46808a4cb89708c2e5b264eb9d1035762581921b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 16 Nov 2021 16:04:02 +0000 Subject: KVM: Use 'unsigned long' as kvm_for_each_vcpu()'s index Everywhere we use kvm_for_each_vpcu(), we use an int as the vcpu index. Unfortunately, we're about to move rework the iterator, which requires this to be upgrade to an unsigned long. Let's bite the bullet and repaint all of it in one go. Signed-off-by: Marc Zyngier Message-Id: <20211116160403.4074052-7-maz@kernel.org> Signed-off-by: Paolo Bonzini --- arch/arm64/kvm/arch_timer.c | 8 ++++---- arch/arm64/kvm/arm.c | 6 +++--- arch/arm64/kvm/pmu-emul.c | 2 +- arch/arm64/kvm/psci.c | 6 +++--- arch/arm64/kvm/reset.c | 2 +- arch/arm64/kvm/vgic/vgic-init.c | 10 ++++++---- arch/arm64/kvm/vgic/vgic-kvm-device.c | 2 +- arch/arm64/kvm/vgic/vgic-mmio-v2.c | 3 +-- arch/arm64/kvm/vgic/vgic-mmio-v3.c | 7 ++++--- arch/arm64/kvm/vgic/vgic-v3.c | 4 ++-- arch/arm64/kvm/vgic/vgic-v4.c | 5 +++-- arch/arm64/kvm/vgic/vgic.c | 2 +- arch/powerpc/kvm/book3s_32_mmu.c | 2 +- arch/powerpc/kvm/book3s_64_mmu.c | 2 +- arch/powerpc/kvm/book3s_hv.c | 8 ++++---- arch/powerpc/kvm/book3s_pr.c | 2 +- arch/powerpc/kvm/book3s_xics.c | 6 +++--- arch/powerpc/kvm/book3s_xics.h | 2 +- arch/powerpc/kvm/book3s_xive.c | 15 +++++++++------ arch/powerpc/kvm/book3s_xive.h | 4 ++-- arch/powerpc/kvm/book3s_xive_native.c | 8 ++++---- arch/powerpc/kvm/e500_emulate.c | 2 +- arch/riscv/kvm/vcpu_sbi.c | 2 +- arch/riscv/kvm/vmid.c | 2 +- arch/s390/kvm/interrupt.c | 2 +- arch/s390/kvm/kvm-s390.c | 21 +++++++++++---------- arch/s390/kvm/kvm-s390.h | 4 ++-- arch/x86/kvm/hyperv.c | 7 ++++--- arch/x86/kvm/i8254.c | 2 +- arch/x86/kvm/i8259.c | 5 +++-- arch/x86/kvm/ioapic.c | 4 ++-- arch/x86/kvm/irq_comm.c | 7 ++++--- arch/x86/kvm/kvm_onhyperv.c | 3 ++- arch/x86/kvm/lapic.c | 6 +++--- arch/x86/kvm/svm/avic.c | 2 +- arch/x86/kvm/svm/sev.c | 9 +++++---- arch/x86/kvm/x86.c | 23 ++++++++++++----------- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 13 +++++++------ 39 files changed, 118 insertions(+), 104 deletions(-) (limited to 'virt') diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 3df67c127489..d6f4114f1d11 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -750,7 +750,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) /* Make the updates of cntvoff for all vtimer contexts atomic */ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) { - int i; + unsigned long i; struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tmp; @@ -1189,8 +1189,8 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu) { - int vtimer_irq, ptimer_irq; - int i, ret; + int vtimer_irq, ptimer_irq, ret; + unsigned long i; vtimer_irq = vcpu_vtimer(vcpu)->irq.irq; ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu)); @@ -1297,7 +1297,7 @@ void kvm_timer_init_vhe(void) static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { vcpu_vtimer(vcpu)->irq.irq = vtimer_irq; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 362b10cb992c..b6e65c6eb1d3 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -631,7 +631,7 @@ bool kvm_arch_intc_initialized(struct kvm *kvm) void kvm_arm_halt_guest(struct kvm *kvm) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) @@ -641,7 +641,7 @@ void kvm_arm_halt_guest(struct kvm *kvm) void kvm_arm_resume_guest(struct kvm *kvm) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { @@ -2027,7 +2027,7 @@ static int finalize_hyp_mode(void) struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; mpidr &= MPIDR_HWID_BITMASK; kvm_for_each_vcpu(i, vcpu, kvm) { diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index a5e4bbf5e68f..0404357705a8 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -900,7 +900,7 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) */ static bool pmu_irq_is_valid(struct kvm *kvm, int irq) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 74c47d420253..ed675fce8fb7 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -121,8 +121,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) { - int i, matching_cpus = 0; - unsigned long mpidr; + int matching_cpus = 0; + unsigned long i, mpidr; unsigned long target_affinity; unsigned long target_affinity_mask; unsigned long lowest_affinity_level; @@ -164,7 +164,7 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) { - int i; + unsigned long i; struct kvm_vcpu *tmp; /* diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 426bd7fbc3fd..97de30a79770 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -170,7 +170,7 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu) { struct kvm_vcpu *tmp; bool is32bit; - int i; + unsigned long i; is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT); if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit) diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 0a06d0648970..a7382bda9676 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -70,8 +70,9 @@ void kvm_vgic_early_init(struct kvm *kvm) */ int kvm_vgic_create(struct kvm *kvm, u32 type) { - int i, ret; struct kvm_vcpu *vcpu; + unsigned long i; + int ret; if (irqchip_in_kernel(kvm)) return -EEXIST; @@ -255,7 +256,8 @@ int vgic_init(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; - int ret = 0, i, idx; + int ret = 0, i; + unsigned long idx; if (vgic_initialized(kvm)) return 0; @@ -308,7 +310,7 @@ int vgic_init(struct kvm *kvm) goto out; } - kvm_for_each_vcpu(i, vcpu, kvm) + kvm_for_each_vcpu(idx, vcpu, kvm) kvm_vgic_vcpu_enable(vcpu); ret = kvm_vgic_setup_default_irq_routing(kvm); @@ -370,7 +372,7 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) static void __kvm_vgic_destroy(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; vgic_debug_destroy(kvm); diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index 0d000d2fe8d2..c6d52a1fd9c8 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -325,7 +325,7 @@ void unlock_all_vcpus(struct kvm *kvm) bool lock_all_vcpus(struct kvm *kvm) { struct kvm_vcpu *tmp_vcpu; - int c; + unsigned long c; /* * Any time a vcpu is run, vcpu_load is called which tries to grab the diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v2.c b/arch/arm64/kvm/vgic/vgic-mmio-v2.c index 5f9014ae595b..12e4c223e6b8 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v2.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v2.c @@ -113,9 +113,8 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu, int intid = val & 0xf; int targets = (val >> 16) & 0xff; int mode = (val >> 24) & 0x03; - int c; struct kvm_vcpu *vcpu; - unsigned long flags; + unsigned long flags, c; switch (mode) { case 0x0: /* as specified by targets */ diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index bf7ec4a78497..82906cb3f713 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -754,7 +754,8 @@ static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) static int vgic_register_all_redist_iodevs(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int c, ret = 0; + unsigned long c; + int ret = 0; kvm_for_each_vcpu(c, vcpu, kvm) { ret = vgic_register_redist_iodev(vcpu); @@ -995,10 +996,10 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) struct kvm_vcpu *c_vcpu; u16 target_cpus; u64 mpidr; - int sgi, c; + int sgi; int vcpu_id = vcpu->vcpu_id; bool broadcast; - unsigned long flags; + unsigned long c, flags; sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT); diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 04f62c4b07fb..5fedaee15e72 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -542,13 +542,13 @@ int vgic_v3_map_resources(struct kvm *kvm) struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; int ret = 0; - int c; + unsigned long c; kvm_for_each_vcpu(c, vcpu, kvm) { struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) { - kvm_debug("vcpu %d redistributor base not set\n", c); + kvm_debug("vcpu %ld redistributor base not set\n", c); return -ENXIO; } } diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 772dd15a22c7..ad06ba6c9b00 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -189,7 +189,7 @@ void vgic_v4_configure_vsgis(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_arm_halt_guest(kvm); @@ -235,7 +235,8 @@ int vgic_v4_init(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; - int i, nr_vcpus, ret; + int nr_vcpus, ret; + unsigned long i; if (!kvm_vgic_global_state.has_gicv4) return 0; /* Nothing to see here... move along. */ diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index 5dad4996cfb2..9b98876a8a93 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -990,7 +990,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) void vgic_kick_vcpus(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int c; + unsigned long c; /* * We've injected an interrupt, time to find out who deserves diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 3fbd570f9c1e..0215f32932a9 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -337,7 +337,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) { - int i; + unsigned long i; struct kvm_vcpu *v; /* flush this VA on all cpus */ diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index feee40cb2ba1..61290282fd9e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -530,7 +530,7 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, bool large) { u64 mask = 0xFFFFFFFFFULL; - long i; + unsigned long i; struct kvm_vcpu *v; dprintk("KVM MMU: tlbie(0x%lx)\n", va); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 7b74fc0a986b..32873c6985f9 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1993,7 +1993,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, */ if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->arch.vcore != vc) @@ -4786,8 +4786,8 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int i, r; - unsigned long n; + int r; + unsigned long n, i; unsigned long *buf, *p; struct kvm_vcpu *vcpu; @@ -5861,7 +5861,7 @@ static int kvmhv_svm_off(struct kvm *kvm) int mmu_was_ready; int srcu_idx; int ret = 0; - int i; + unsigned long i; if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) return ret; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 6bc9425acb32..bb0612c49b92 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -428,7 +428,7 @@ static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) /************* MMU Notifiers *************/ static bool do_kvm_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - long i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index ebd5d920de8c..9cc466006e8b 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -942,8 +942,8 @@ static int xics_debug_show(struct seq_file *m, void *private) struct kvmppc_xics *xics = m->private; struct kvm *kvm = xics->kvm; struct kvm_vcpu *vcpu; - int icsid, i; - unsigned long flags; + int icsid; + unsigned long flags, i; unsigned long t_rm_kick_vcpu, t_rm_check_resend; unsigned long t_rm_notify_eoi; unsigned long t_reject, t_check_resend; @@ -1340,7 +1340,7 @@ static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) static void kvmppc_xics_release(struct kvm_device *dev) { struct kvmppc_xics *xics = dev->private; - int i; + unsigned long i; struct kvm *kvm = xics->kvm; struct kvm_vcpu *vcpu; diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h index 6231f76bdd66..8e4c79e2fcd8 100644 --- a/arch/powerpc/kvm/book3s_xics.h +++ b/arch/powerpc/kvm/book3s_xics.h @@ -116,7 +116,7 @@ static inline struct kvmppc_icp *kvmppc_xics_find_server(struct kvm *kvm, u32 nr) { struct kvm_vcpu *vcpu = NULL; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->arch.icp && nr == vcpu->arch.icp->server_num) diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 225008882958..e216c068075d 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -368,7 +368,8 @@ static int xive_check_provisioning(struct kvm *kvm, u8 prio) { struct kvmppc_xive *xive = kvm->arch.xive; struct kvm_vcpu *vcpu; - int i, rc; + unsigned long i; + int rc; lockdep_assert_held(&xive->lock); @@ -439,7 +440,8 @@ static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio) int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio) { struct kvm_vcpu *vcpu; - int i, rc; + unsigned long i; + int rc; /* Locate target server */ vcpu = kvmppc_xive_find_server(kvm, *server); @@ -1519,7 +1521,8 @@ static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q) static void xive_pre_save_scan(struct kvmppc_xive *xive) { struct kvm_vcpu *vcpu = NULL; - int i, j; + unsigned long i; + int j; /* * See comment in xive_get_source() about how this @@ -1700,7 +1703,7 @@ static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq) { struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu = NULL; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; @@ -2037,7 +2040,7 @@ static void kvmppc_xive_release(struct kvm_device *dev) struct kvmppc_xive *xive = dev->private; struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; - int i; + unsigned long i; pr_devel("Releasing xive device\n"); @@ -2291,7 +2294,7 @@ static int xive_debug_show(struct seq_file *m, void *private) u64 t_vm_h_cppr = 0; u64 t_vm_h_eoi = 0; u64 t_vm_h_ipi = 0; - unsigned int i; + unsigned long i; if (!kvm) return 0; diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index e6a9651c6f1e..09d0657596c3 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -199,7 +199,7 @@ struct kvmppc_xive_vcpu { static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr) { struct kvm_vcpu *vcpu = NULL; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num) @@ -240,7 +240,7 @@ static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server) static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id) { struct kvm_vcpu *vcpu = NULL; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id) diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 99db9ac49901..561a5bfe0468 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -807,7 +807,7 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive) { struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; - unsigned int i; + unsigned long i; pr_devel("%s\n", __func__); @@ -916,7 +916,7 @@ static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive) { struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; - unsigned int i; + unsigned long i; pr_devel("%s\n", __func__); @@ -1017,7 +1017,7 @@ static void kvmppc_xive_native_release(struct kvm_device *dev) struct kvmppc_xive *xive = dev->private; struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; - int i; + unsigned long i; pr_devel("Releasing xive native device\n"); @@ -1214,7 +1214,7 @@ static int xive_native_debug_show(struct seq_file *m, void *private) struct kvmppc_xive *xive = m->private; struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; - unsigned int i; + unsigned long i; if (!kvm) return 0; diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 64eb833e9f02..051102d50c31 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -65,7 +65,7 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) ulong param = vcpu->arch.regs.gpr[rb]; int prio = dbell2prio(rb); int pir = param & PPC_DBELL_PIR_MASK; - int i; + unsigned long i; struct kvm_vcpu *cvcpu; if (prio < 0) diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 3b0e703d22cf..d0d2bcab2f7b 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -60,7 +60,7 @@ int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run) static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, struct kvm_run *run, u32 type) { - int i; + unsigned long i; struct kvm_vcpu *tmp; kvm_for_each_vcpu(i, tmp, vcpu->kvm) diff --git a/arch/riscv/kvm/vmid.c b/arch/riscv/kvm/vmid.c index 2c6253b293bc..807228f8f409 100644 --- a/arch/riscv/kvm/vmid.c +++ b/arch/riscv/kvm/vmid.c @@ -65,7 +65,7 @@ bool kvm_riscv_stage2_vmid_ver_changed(struct kvm_vmid *vmid) void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu) { - int i; + unsigned long i; struct kvm_vcpu *v; struct cpumask hmask; struct kvm_vmid *vmid = &vcpu->kvm->arch.vmid; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c3bd993fdd0c..1aa094810f6d 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2659,7 +2659,7 @@ static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr) static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { int r = 0; - unsigned int i; + unsigned long i; struct kvm_vcpu *vcpu; switch (attr->group) { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 7a0f5abaa484..fd5f4ec1b4b9 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -295,7 +295,7 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val, { struct kvm *kvm; struct kvm_vcpu *vcpu; - int i; + unsigned long i; unsigned long long *delta = v; list_for_each_entry(kvm, &vm_list, vm_list) { @@ -682,7 +682,7 @@ out: static void icpt_operexc_on_all_vcpus(struct kvm *kvm) { - unsigned int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { @@ -936,7 +936,7 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_s390_vcpu_block_all(kvm); @@ -1021,7 +1021,7 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) { - int cx; + unsigned long cx; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(cx, vcpu, kvm) @@ -2206,7 +2206,7 @@ static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp) struct kvm_vcpu *vcpu; u16 rc, rrc; int ret = 0; - int i; + unsigned long i; /* * We ignore failures and try to destroy as many CPUs as possible. @@ -2230,7 +2230,8 @@ static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp) static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) { - int i, r = 0; + unsigned long i; + int r = 0; u16 dummy; struct kvm_vcpu *vcpu; @@ -2929,7 +2930,7 @@ static int sca_switch_to_extended(struct kvm *kvm) struct bsca_block *old_sca = kvm->arch.sca; struct esca_block *new_sca; struct kvm_vcpu *vcpu; - unsigned int vcpu_idx; + unsigned long vcpu_idx; u32 scaol, scaoh; if (kvm->arch.use_esca) @@ -3411,7 +3412,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, struct kvm *kvm = gmap->private; struct kvm_vcpu *vcpu; unsigned long prefix; - int i; + unsigned long i; if (gmap_is_shadow(gmap)) return; @@ -3904,7 +3905,7 @@ void kvm_s390_set_tod_clock(struct kvm *kvm, { struct kvm_vcpu *vcpu; union tod_clock clk; - int i; + unsigned long i; mutex_lock(&kvm->lock); preempt_disable(); @@ -4536,7 +4537,7 @@ static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) static void __disable_ibs_on_all_vcpus(struct kvm *kvm) { - unsigned int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index c07a050d757d..b887fe7a7064 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -357,7 +357,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); static inline void kvm_s390_vcpu_block_all(struct kvm *kvm) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; WARN_ON(!mutex_is_locked(&kvm->lock)); @@ -367,7 +367,7 @@ static inline void kvm_s390_vcpu_block_all(struct kvm *kvm) static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 5e19e6e4c2ce..7179fa645eda 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -164,7 +164,7 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) { struct kvm_vcpu *vcpu = NULL; - int i; + unsigned long i; if (vpidx >= KVM_MAX_VCPUS) return NULL; @@ -1716,7 +1716,8 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask( { struct kvm_hv *hv = to_kvm_hv(kvm); struct kvm_vcpu *vcpu; - int i, bank, sbank = 0; + int bank, sbank = 0; + unsigned long i; memset(vp_bitmap, 0, KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap)); @@ -1863,7 +1864,7 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, .vector = vector }; struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu_bitmap && !test_bit(i, vcpu_bitmap)) diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 5a69cce4d72d..0b65a764ed3a 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -242,7 +242,7 @@ static void pit_do_work(struct kthread_work *work) struct kvm_pit *pit = container_of(work, struct kvm_pit, expired); struct kvm *kvm = pit->kvm; struct kvm_vcpu *vcpu; - int i; + unsigned long i; struct kvm_kpit_state *ps = &pit->pit_state; if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0)) diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 0b80263d46d8..814064d06016 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -50,7 +50,7 @@ static void pic_unlock(struct kvm_pic *s) { bool wakeup = s->wakeup_needed; struct kvm_vcpu *vcpu; - int i; + unsigned long i; s->wakeup_needed = false; @@ -270,7 +270,8 @@ int kvm_pic_read_irq(struct kvm *kvm) static void kvm_pic_reset(struct kvm_kpic_state *s) { - int irq, i; + int irq; + unsigned long i; struct kvm_vcpu *vcpu; u8 edge_irr = s->irr & ~s->elcr; bool found = false; diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 816a82515dcd..decfa36b7891 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -149,7 +149,7 @@ void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; if (RTC_GSI >= IOAPIC_NUM_PINS) return; @@ -184,7 +184,7 @@ static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic) static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index d5b72a08e566..39ad02d6dc63 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -45,9 +45,9 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, struct dest_map *dest_map) { - int i, r = -1; + int r = -1; struct kvm_vcpu *vcpu, *lowest = NULL; - unsigned long dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]; + unsigned long i, dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]; unsigned int dest_vcpus = 0; if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map)) @@ -320,7 +320,8 @@ int kvm_set_routing_entry(struct kvm *kvm, bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, struct kvm_vcpu **dest_vcpu) { - int i, r = 0; + int r = 0; + unsigned long i; struct kvm_vcpu *vcpu; if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu)) diff --git a/arch/x86/kvm/kvm_onhyperv.c b/arch/x86/kvm/kvm_onhyperv.c index c7db2df50a7a..b469f45e3fe4 100644 --- a/arch/x86/kvm/kvm_onhyperv.c +++ b/arch/x86/kvm/kvm_onhyperv.c @@ -33,7 +33,8 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm, { struct kvm_arch *kvm_arch = &kvm->arch; struct kvm_vcpu *vcpu; - int ret = 0, i, nr_unique_valid_roots; + int ret = 0, nr_unique_valid_roots; + unsigned long i; hpa_t root; spin_lock(&kvm_arch->hv_root_tdp_lock); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index f206fc35deff..451e80306b51 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -185,7 +185,7 @@ void kvm_recalculate_apic_map(struct kvm *kvm) { struct kvm_apic_map *new, *old = NULL; struct kvm_vcpu *vcpu; - int i; + unsigned long i; u32 max_id = 255; /* enough space for any xAPIC ID */ /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */ @@ -1172,8 +1172,8 @@ void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq, struct kvm_lapic *src = NULL; struct kvm_apic_map *map; struct kvm_vcpu *vcpu; - unsigned long bitmap; - int i, vcpu_idx; + unsigned long bitmap, i; + int vcpu_idx; bool ret; rcu_read_lock(); diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 8f9af7b7dbbe..b7200595cbd4 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -293,7 +293,7 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source, u32 icrl, u32 icrh) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { bool m = kvm_apic_match_dest(vcpu, source, diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 7656a2c5662a..322553322202 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -636,7 +636,8 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_vcpu *vcpu; - int i, ret; + unsigned long i; + int ret; if (!sev_es_guest(kvm)) return -ENOTTY; @@ -1593,7 +1594,7 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) static int sev_lock_vcpus_for_migration(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int i, j; + unsigned long i, j; kvm_for_each_vcpu(i, vcpu, kvm) { if (mutex_lock_killable(&vcpu->mutex)) @@ -1615,7 +1616,7 @@ out_unlock: static void sev_unlock_vcpus_for_migration(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { mutex_unlock(&vcpu->mutex); @@ -1642,7 +1643,7 @@ static void sev_migrate_from(struct kvm_sev_info *dst, static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) { - int i; + unsigned long i; struct kvm_vcpu *dst_vcpu, *src_vcpu; struct vcpu_svm *dst_svm, *src_svm; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0e6d11a726cd..96bcf2035bdc 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2816,7 +2816,7 @@ static void kvm_end_pvclock_update(struct kvm *kvm) { struct kvm_arch *ka = &kvm->arch; struct kvm_vcpu *vcpu; - int i; + unsigned long i; write_seqcount_end(&ka->pvclock_sc); raw_spin_unlock_irq(&ka->tsc_write_lock); @@ -3065,7 +3065,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) static void kvmclock_update_fn(struct work_struct *work) { - int i; + unsigned long i; struct delayed_work *dwork = to_delayed_work(work); struct kvm_arch *ka = container_of(dwork, struct kvm_arch, kvmclock_update_work); @@ -5692,7 +5692,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) * VM-Exit. */ struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) kvm_vcpu_kick(vcpu); @@ -5961,7 +5961,8 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) static int kvm_arch_suspend_notifier(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int i, ret = 0; + unsigned long i; + int ret = 0; mutex_lock(&kvm->lock); kvm_for_each_vcpu(i, vcpu, kvm) { @@ -8388,7 +8389,8 @@ static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) { struct kvm *kvm; struct kvm_vcpu *vcpu; - int i, send_ipi = 0; + int send_ipi = 0; + unsigned long i; /* * We allow guests to temporarily run on slowing clocks, @@ -8561,9 +8563,8 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = { static void pvclock_gtod_update_fn(struct work_struct *work) { struct kvm *kvm; - struct kvm_vcpu *vcpu; - int i; + unsigned long i; mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) @@ -10672,7 +10673,7 @@ static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm) { bool inhibit = false; struct kvm_vcpu *vcpu; - int i; + unsigned long i; down_write(&kvm->arch.apicv_update_lock); @@ -11160,7 +11161,7 @@ int kvm_arch_hardware_enable(void) { struct kvm *kvm; struct kvm_vcpu *vcpu; - int i; + unsigned long i; int ret; u64 local_tsc; u64 max_tsc = 0; @@ -11413,7 +11414,7 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) static void kvm_free_vcpus(struct kvm *kvm) { - unsigned int i; + unsigned long i; struct kvm_vcpu *vcpu; /* @@ -11659,7 +11660,7 @@ out_free: void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; /* * memslots->generation has been incremented. diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2201dc07126a..7da6086262c6 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -714,7 +714,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) { struct kvm_vcpu *vcpu = NULL; - int i; + unsigned long i; if (id < 0) return NULL; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 594f90307b20..1c68384a7c4b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -305,8 +305,9 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, { struct kvm_vcpu *vcpu; struct cpumask *cpus; + unsigned long i; bool called; - int i, me; + int me; me = get_cpu(); @@ -453,7 +454,7 @@ static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_destroy_vcpus(struct kvm *kvm) { - unsigned int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { @@ -3389,10 +3390,10 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) struct kvm *kvm = me->kvm; struct kvm_vcpu *vcpu; int last_boosted_vcpu = me->kvm->last_boosted_vcpu; + unsigned long i; int yielded = 0; int try = 3; int pass; - int i; kvm_vcpu_set_in_spin_loop(me, true); /* @@ -4201,7 +4202,7 @@ static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; int cleared = 0; @@ -5120,7 +5121,7 @@ static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; *val = 0; @@ -5133,7 +5134,7 @@ static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) -- cgit v1.2.3-58-ga151 From afa319a54a8c760ba59683cd3c4318635049a664 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:07 +0100 Subject: KVM: Require total number of memslot pages to fit in an unsigned long Explicitly disallow creating more memslot pages than can fit in an unsigned long, KVM doesn't correctly handle a total number of memslot pages that doesn't fit in an unsigned long and remedying that would be a waste of time. For a 64-bit kernel, this is a nop as memslots are not allowed to overlap in the gfn address space. With a 32-bit kernel, userspace can at most address 3gb of virtual memory, whereas wrapping the total number of pages would require 4tb+ of guest physical memory. Even with x86's second address space for SMM, userspace would need to alias all of guest memory more than one _thousand_ times. And on older x86 hardware with MAXPHYADDR < 43, the guest couldn't actually access any of those aliases even if userspace lied about guest.MAXPHYADDR. On 390 and arm64, this is a nop as they don't support 32-bit hosts. On x86, practically speaking this is simply acknowledging reality as the existing kvm_mmu_calculate_default_mmu_pages() assumes the total number of pages fits in an "unsigned long". On PPC, this is likely a nop as every flavor of PPC KVM assumes gfns (and gpas!) fit in unsigned long. arch/powerpc/kvm/book3s_32_mmu_host.c goes a step further and fails the build if CONFIG_PTE_64BIT=y, which presumably means that it does't support 64-bit physical addresses. On MIPS, this is also likely a nop as the core MMU helpers assume gpas fit in unsigned long, e.g. see kvm_mips_##name##_pte. And finally, RISC-V is a "don't care" as it doesn't exist in any release, i.e. there is no established ABI to break. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: <1c2c91baf8e78acccd4dad38da591002e61c013c.1638817638.git.maciej.szmigiero@oracle.com> --- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) (limited to 'virt') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 66548287ed42..e38705359af5 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -552,6 +552,7 @@ struct kvm { */ struct mutex slots_arch_lock; struct mm_struct *mm; /* userspace tied to this vm */ + unsigned long nr_memslot_pages; struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; struct xarray vcpu_array; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1c68384a7c4b..538fd57ea339 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1638,6 +1638,15 @@ static int kvm_set_memslot(struct kvm *kvm, update_memslots(slots, new, change); slots = install_new_memslots(kvm, as_id, slots); + /* + * Update the total number of memslot pages before calling the arch + * hook so that architectures can consume the result directly. + */ + if (change == KVM_MR_DELETE) + kvm->nr_memslot_pages -= old.npages; + else if (change == KVM_MR_CREATE) + kvm->nr_memslot_pages += new->npages; + kvm_arch_commit_memory_region(kvm, mem, &old, new, change); /* Free the old memslot's metadata. Note, this is the full copy!!! */ @@ -1668,6 +1677,9 @@ static int kvm_delete_memslot(struct kvm *kvm, if (!old->npages) return -EINVAL; + if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) + return -EIO; + memset(&new, 0, sizeof(new)); new.id = old->id; /* @@ -1751,6 +1763,13 @@ int __kvm_set_memory_region(struct kvm *kvm, if (!old.npages) { change = KVM_MR_CREATE; new.dirty_bitmap = NULL; + + /* + * To simplify KVM internals, the total number of pages across + * all memslots must fit in an unsigned long. + */ + if ((kvm->nr_memslot_pages + new.npages) < kvm->nr_memslot_pages) + return -EINVAL; } else { /* Modify an existing slot. */ if ((new.userspace_addr != old.userspace_addr) || (new.npages != old.npages) || -- cgit v1.2.3-58-ga151 From 47ea7d900b1cc66ec7a35a8b173ed16b01f9781b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:08 +0100 Subject: KVM: Open code kvm_delete_memslot() into its only caller Fold kvm_delete_memslot() into __kvm_set_memory_region() to free up the "kvm_delete_memslot()" name for use in a future helper. The delete logic isn't so complex/long that it truly needs a helper, and it will be simplified a wee bit further in upcoming commits. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: <2887631c31a82947faa488ab72f55f8c68b7c194.1638817638.git.maciej.szmigiero@oracle.com> --- virt/kvm/kvm_main.c | 42 +++++++++++++++++------------------------- 1 file changed, 17 insertions(+), 25 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 538fd57ea339..af2730858ebd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1668,29 +1668,6 @@ out_slots: return r; } -static int kvm_delete_memslot(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot *old, int as_id) -{ - struct kvm_memory_slot new; - - if (!old->npages) - return -EINVAL; - - if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) - return -EIO; - - memset(&new, 0, sizeof(new)); - new.id = old->id; - /* - * This is only for debugging purpose; it should never be referenced - * for a removed memslot. - */ - new.as_id = as_id; - - return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE); -} - /* * Allocate some memory and give it an address in the guest physical address * space. @@ -1747,8 +1724,23 @@ int __kvm_set_memory_region(struct kvm *kvm, old.id = id; } - if (!mem->memory_size) - return kvm_delete_memslot(kvm, mem, &old, as_id); + if (!mem->memory_size) { + if (!old.npages) + return -EINVAL; + + if (WARN_ON_ONCE(kvm->nr_memslot_pages < old.npages)) + return -EIO; + + memset(&new, 0, sizeof(new)); + new.id = id; + /* + * This is only for debugging purpose; it should never be + * referenced for a removed memslot. + */ + new.as_id = as_id; + + return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE); + } new.as_id = as_id; new.id = id; -- cgit v1.2.3-58-ga151 From 4e4d30cb9b8740e178731406aa28b96f12c6edbd Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:09 +0100 Subject: KVM: Resync only arch fields when slots_arch_lock gets reacquired There is no need to copy the whole memslot data after releasing slots_arch_lock for a moment to install temporary memslots copy in kvm_set_memslot() since this lock only protects the arch field of each memslot. Just resync this particular field after reacquiring slots_arch_lock. Note, this also eliminates the need to manually clear the INVALID flag when restoring memslots; the "setting" of the INVALID flag was an unwanted side effect of copying the entire memslots. Since kvm_copy_memslots() has just one caller remaining now open-code it instead. Signed-off-by: Maciej S. Szmigiero [sean: tweak shortlog, note INVALID flag in changelog, revert comment] Reviewed-by: Sean Christopherson Message-Id: --- virt/kvm/kvm_main.c | 45 +++++++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 20 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index af2730858ebd..615d69bcde2c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1515,12 +1515,6 @@ static size_t kvm_memslots_size(int slots) (sizeof(struct kvm_memory_slot) * slots); } -static void kvm_copy_memslots(struct kvm_memslots *to, - struct kvm_memslots *from) -{ - memcpy(to, from, kvm_memslots_size(from->used_slots)); -} - /* * Note, at a minimum, the current number of used slots must be allocated, even * when deleting a memslot, as we need a complete duplicate of the memslots for @@ -1539,11 +1533,22 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT); if (likely(slots)) - kvm_copy_memslots(slots, old); + memcpy(slots, old, kvm_memslots_size(old->used_slots)); return slots; } +static void kvm_copy_memslots_arch(struct kvm_memslots *to, + struct kvm_memslots *from) +{ + int i; + + WARN_ON_ONCE(to->used_slots != from->used_slots); + + for (i = 0; i < from->used_slots; i++) + to->memslots[i].arch = from->memslots[i].arch; +} + static int kvm_set_memslot(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *new, int as_id, @@ -1584,9 +1589,10 @@ static int kvm_set_memslot(struct kvm *kvm, slot->flags |= KVM_MEMSLOT_INVALID; /* - * We can re-use the memory from the old memslots. - * It will be overwritten with a copy of the new memslots - * after reacquiring the slots_arch_lock below. + * We can re-use the old memslots, the only difference from the + * newly installed memslots is the invalid flag, which will get + * dropped by update_memslots anyway. We'll also revert to the + * old memslots if preparing the new memory region fails. */ slots = install_new_memslots(kvm, as_id, slots); @@ -1603,12 +1609,14 @@ static int kvm_set_memslot(struct kvm *kvm, mutex_lock(&kvm->slots_arch_lock); /* - * The arch-specific fields of the memslots could have changed - * between releasing the slots_arch_lock in - * install_new_memslots and here, so get a fresh copy of the - * slots. + * The arch-specific fields of the now-active memslots could + * have been modified between releasing slots_arch_lock in + * install_new_memslots and re-acquiring slots_arch_lock above. + * Copy them to the inactive memslots. Arch code is required + * to retrieve memslots *after* acquiring slots_arch_lock, thus + * the active memslots are guaranteed to be fresh. */ - kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id)); + kvm_copy_memslots_arch(slots, __kvm_memslots(kvm, as_id)); } /* @@ -1657,13 +1665,10 @@ static int kvm_set_memslot(struct kvm *kvm, return 0; out_slots: - if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { - slot = id_to_memslot(slots, new->id); - slot->flags &= ~KVM_MEMSLOT_INVALID; + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) slots = install_new_memslots(kvm, as_id, slots); - } else { + else mutex_unlock(&kvm->slots_arch_lock); - } kvfree(slots); return r; } -- cgit v1.2.3-58-ga151 From ce5f0215620c11a5829da7f30bebf3adeeef3345 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:10 +0100 Subject: KVM: Use "new" memslot's address space ID instead of dedicated param Now that the address space ID is stored in every slot, including fake slots used for deletion, use the slot's as_id instead of passing in the redundant information as a param to kvm_set_memslot(). This will greatly simplify future memslot work by avoiding passing a large number of variables around purely to honor @as_id. Drop a comment in the DELETE path about new->as_id being provided purely for debug, as that's now a lie. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: <03189577be214ab8530a4b3a3ee3ed1c2f9e5815.1638817639.git.maciej.szmigiero@oracle.com> --- virt/kvm/kvm_main.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 615d69bcde2c..a7a1c872fe6d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1551,7 +1551,7 @@ static void kvm_copy_memslots_arch(struct kvm_memslots *to, static int kvm_set_memslot(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot *new, int as_id, + struct kvm_memory_slot *new, enum kvm_mr_change change) { struct kvm_memory_slot *slot, old; @@ -1574,7 +1574,7 @@ static int kvm_set_memslot(struct kvm *kvm, */ mutex_lock(&kvm->slots_arch_lock); - slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change); + slots = kvm_dup_memslots(__kvm_memslots(kvm, new->as_id), change); if (!slots) { mutex_unlock(&kvm->slots_arch_lock); return -ENOMEM; @@ -1594,7 +1594,7 @@ static int kvm_set_memslot(struct kvm *kvm, * dropped by update_memslots anyway. We'll also revert to the * old memslots if preparing the new memory region fails. */ - slots = install_new_memslots(kvm, as_id, slots); + slots = install_new_memslots(kvm, new->as_id, slots); /* From this point no new shadow pages pointing to a deleted, * or moved, memslot will be created. @@ -1616,7 +1616,7 @@ static int kvm_set_memslot(struct kvm *kvm, * to retrieve memslots *after* acquiring slots_arch_lock, thus * the active memslots are guaranteed to be fresh. */ - kvm_copy_memslots_arch(slots, __kvm_memslots(kvm, as_id)); + kvm_copy_memslots_arch(slots, __kvm_memslots(kvm, new->as_id)); } /* @@ -1633,7 +1633,7 @@ static int kvm_set_memslot(struct kvm *kvm, WARN_ON_ONCE(change != KVM_MR_CREATE); memset(&old, 0, sizeof(old)); old.id = new->id; - old.as_id = as_id; + old.as_id = new->as_id; } /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */ @@ -1644,7 +1644,7 @@ static int kvm_set_memslot(struct kvm *kvm, goto out_slots; update_memslots(slots, new, change); - slots = install_new_memslots(kvm, as_id, slots); + slots = install_new_memslots(kvm, new->as_id, slots); /* * Update the total number of memslot pages before calling the arch @@ -1666,7 +1666,7 @@ static int kvm_set_memslot(struct kvm *kvm, out_slots: if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) - slots = install_new_memslots(kvm, as_id, slots); + slots = install_new_memslots(kvm, new->as_id, slots); else mutex_unlock(&kvm->slots_arch_lock); kvfree(slots); @@ -1738,13 +1738,9 @@ int __kvm_set_memory_region(struct kvm *kvm, memset(&new, 0, sizeof(new)); new.id = id; - /* - * This is only for debugging purpose; it should never be - * referenced for a removed memslot. - */ new.as_id = as_id; - return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE); + return kvm_set_memslot(kvm, mem, &new, KVM_MR_DELETE); } new.as_id = as_id; @@ -1807,7 +1803,7 @@ int __kvm_set_memory_region(struct kvm *kvm, bitmap_set(new.dirty_bitmap, 0, new.npages); } - r = kvm_set_memslot(kvm, mem, &new, as_id, change); + r = kvm_set_memslot(kvm, mem, &new, change); if (r) goto out_bitmap; -- cgit v1.2.3-58-ga151 From 537a17b3149300987456e8949ccb991e604047d6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:11 +0100 Subject: KVM: Let/force architectures to deal with arch specific memslot data Pass the "old" slot to kvm_arch_prepare_memory_region() and force arch code to handle propagating arch specific data from "new" to "old" when necessary. This is a baby step towards dynamically allocating "new" from the get go, and is a (very) minor performance boost on x86 due to not unnecessarily copying arch data. For PPC HV, copy the rmap in the !CREATE and !DELETE paths, i.e. for MOVE and FLAGS_ONLY. This is functionally a nop as the previous behavior would overwrite the pointer for CREATE, and eventually discard/ignore it for DELETE. For x86, copy the arch data only for FLAGS_ONLY changes. Unlike PPC HV, x86 needs to reallocate arch data in the MOVE case as the size of x86's allocations depend on the alignment of the memslot's gfn. Opportunistically tweak kvm_arch_prepare_memory_region()'s param order to match the "commit" prototype. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero [mss: add missing RISCV kvm_arch_prepare_memory_region() change] Signed-off-by: Maciej S. Szmigiero Message-Id: <67dea5f11bbcfd71e3da5986f11e87f5dd4013f9.1638817639.git.maciej.szmigiero@oracle.com> --- arch/arm64/kvm/mmu.c | 7 ++++--- arch/mips/kvm/mips.c | 3 ++- arch/powerpc/include/asm/kvm_ppc.h | 10 ++++++---- arch/powerpc/kvm/book3s.c | 12 ++++++------ arch/powerpc/kvm/book3s_hv.c | 17 ++++++++++------- arch/powerpc/kvm/book3s_pr.c | 9 +++++---- arch/powerpc/kvm/booke.c | 5 +++-- arch/powerpc/kvm/powerpc.c | 5 +++-- arch/riscv/kvm/mmu.c | 7 ++++--- arch/s390/kvm/kvm-s390.c | 3 ++- arch/x86/kvm/x86.c | 15 +++++++++++---- include/linux/kvm_host.h | 3 ++- virt/kvm/kvm_main.c | 5 +---- 13 files changed, 59 insertions(+), 42 deletions(-) (limited to 'virt') diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 326cdfec74a1..5d474360bf6c 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1486,8 +1486,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, } int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) { hva_t hva = mem->userspace_addr; @@ -1502,7 +1503,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, * Prevent userspace from creating a memory region outside of the IPA * space addressable by the KVM guest IPA space. */ - if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) + if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) return -EFAULT; mmap_read_lock(current->mm); @@ -1536,7 +1537,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if (vma->vm_flags & VM_PFNMAP) { /* IO region dirty page logging not allowed */ - if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { + if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { ret = -EINVAL; break; } diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 043204cd585f..b2ce10784eb0 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -214,8 +214,9 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, } int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) { return 0; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 671fbd1a765e..b01760dd1374 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -200,12 +200,13 @@ extern void kvmppc_core_destroy_vm(struct kvm *kvm); extern void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change); extern void kvmppc_core_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, + struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change); extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, @@ -274,12 +275,13 @@ struct kvmppc_ops { int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log); void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot); int (*prepare_memory_region)(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change); void (*commit_memory_region)(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, + struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change); bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index b785f6772391..8250e8308674 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -847,17 +847,17 @@ void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) } int kvmppc_core_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - const struct kvm_userspace_memory_region *mem, - enum kvm_mr_change change) + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) { - return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem, - change); + return kvm->arch.kvm_ops->prepare_memory_region(kvm, mem, old, new, change); } void kvmppc_core_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, + struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 32873c6985f9..d7594d49d288 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4854,17 +4854,20 @@ static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot) } static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, - struct kvm_memory_slot *slot, - const struct kvm_userspace_memory_region *mem, - enum kvm_mr_change change) + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) { unsigned long npages = mem->memory_size >> PAGE_SHIFT; if (change == KVM_MR_CREATE) { - slot->arch.rmap = vzalloc(array_size(npages, - sizeof(*slot->arch.rmap))); - if (!slot->arch.rmap) + new->arch.rmap = vzalloc(array_size(npages, + sizeof(*new->arch.rmap))); + if (!new->arch.rmap) return -ENOMEM; + } else if (change != KVM_MR_DELETE) { + new->arch.rmap = old->arch.rmap; } return 0; @@ -4872,7 +4875,7 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, + struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index bb0612c49b92..ffb559cf25f4 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1899,16 +1899,17 @@ static void kvmppc_core_flush_memslot_pr(struct kvm *kvm, } static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, - struct kvm_memory_slot *memslot, - const struct kvm_userspace_memory_region *mem, - enum kvm_mr_change change) + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) { return 0; } static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, + struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 8c15c90dd3a9..93c2ac2bee09 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -1821,8 +1821,9 @@ void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) } int kvmppc_core_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) { return 0; @@ -1830,7 +1831,7 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, void kvmppc_core_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, + struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 98f5d90ebf5a..e875874cf836 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -698,11 +698,12 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) } int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) { - return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change); + return kvmppc_core_prepare_memory_region(kvm, mem, old, new, change); } void kvm_arch_commit_memory_region(struct kvm *kvm, diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index fc058ff5f4b6..50380f525345 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -477,8 +477,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, } int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) { hva_t hva = mem->userspace_addr; @@ -494,7 +495,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, * Prevent userspace from creating a memory region outside of the GPA * space addressable by the KVM guest GPA space. */ - if ((memslot->base_gfn + memslot->npages) >= + if ((new->base_gfn + new->npages) >= (stage2_gpa_size >> PAGE_SHIFT)) return -EFAULT; @@ -541,7 +542,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, pa += vm_start - vma->vm_start; /* IO region dirty page logging not allowed */ - if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { + if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { ret = -EINVAL; goto out; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index fd5f4ec1b4b9..3beefadda0db 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5007,8 +5007,9 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) /* Section: memory related */ int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) { /* A few sanity checks. We can have memory slots which have to be diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 96bcf2035bdc..287ff4e43a13 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11674,13 +11674,20 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) } int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - const struct kvm_userspace_memory_region *mem, - enum kvm_mr_change change) + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) { if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) - return kvm_alloc_memslot_metadata(kvm, memslot, + return kvm_alloc_memslot_metadata(kvm, new, mem->memory_size >> PAGE_SHIFT); + + if (change == KVM_MR_FLAGS_ONLY) + memcpy(&new->arch, &old->arch, sizeof(old->arch)); + else if (WARN_ON_ONCE(change != KVM_MR_DELETE)) + return -EIO; + return 0; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index e38705359af5..cb7311dc6f32 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -833,8 +833,9 @@ int __kvm_set_memory_region(struct kvm *kvm, void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change); void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a7a1c872fe6d..46060cc542ef 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1636,10 +1636,7 @@ static int kvm_set_memslot(struct kvm *kvm, old.as_id = new->as_id; } - /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */ - memcpy(&new->arch, &old.arch, sizeof(old.arch)); - - r = kvm_arch_prepare_memory_region(kvm, new, mem, change); + r = kvm_arch_prepare_memory_region(kvm, mem, &old, new, change); if (r) goto out_slots; -- cgit v1.2.3-58-ga151 From 6a99c6e3f52a6f0d4c6ebcfa7359c718a19ffbe6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:18 +0100 Subject: KVM: Stop passing kvm_userspace_memory_region to arch memslot hooks Drop the @mem param from kvm_arch_{prepare,commit}_memory_region() now that its use has been removed in all architectures. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: --- arch/arm64/kvm/mmu.c | 2 -- arch/mips/kvm/mips.c | 2 -- arch/powerpc/kvm/powerpc.c | 2 -- arch/riscv/kvm/mmu.c | 2 -- arch/s390/kvm/kvm-s390.c | 2 -- arch/x86/kvm/x86.c | 2 -- include/linux/kvm_host.h | 2 -- virt/kvm/kvm_main.c | 9 ++++----- 8 files changed, 4 insertions(+), 19 deletions(-) (limited to 'virt') diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index dd95350ea15d..9b2d881ccf49 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1463,7 +1463,6 @@ out: } void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -1486,7 +1485,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, } int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index bda717301db8..e59cb6246f76 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -214,7 +214,6 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, } int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -223,7 +222,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, } void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 575140ecb23c..f1233500f4dc 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -698,7 +698,6 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) } int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -707,7 +706,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, } void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 573ade138204..7d884b15cf5e 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -462,7 +462,6 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, } void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -477,7 +476,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, } int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 959a568a97be..5dddd7817905 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5007,7 +5007,6 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) /* Section: memory related */ int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -5035,7 +5034,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, } void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2a7567adb799..f862c514c2c0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11674,7 +11674,6 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) } int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -11778,7 +11777,6 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, } void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index cb7311dc6f32..da0d4f21a150 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -833,12 +833,10 @@ int __kvm_set_memory_region(struct kvm *kvm, void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change); void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 46060cc542ef..373079a03710 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1550,7 +1550,6 @@ static void kvm_copy_memslots_arch(struct kvm_memslots *to, } static int kvm_set_memslot(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *new, enum kvm_mr_change change) { @@ -1636,7 +1635,7 @@ static int kvm_set_memslot(struct kvm *kvm, old.as_id = new->as_id; } - r = kvm_arch_prepare_memory_region(kvm, mem, &old, new, change); + r = kvm_arch_prepare_memory_region(kvm, &old, new, change); if (r) goto out_slots; @@ -1652,7 +1651,7 @@ static int kvm_set_memslot(struct kvm *kvm, else if (change == KVM_MR_CREATE) kvm->nr_memslot_pages += new->npages; - kvm_arch_commit_memory_region(kvm, mem, &old, new, change); + kvm_arch_commit_memory_region(kvm, &old, new, change); /* Free the old memslot's metadata. Note, this is the full copy!!! */ if (change == KVM_MR_DELETE) @@ -1737,7 +1736,7 @@ int __kvm_set_memory_region(struct kvm *kvm, new.id = id; new.as_id = as_id; - return kvm_set_memslot(kvm, mem, &new, KVM_MR_DELETE); + return kvm_set_memslot(kvm, &new, KVM_MR_DELETE); } new.as_id = as_id; @@ -1800,7 +1799,7 @@ int __kvm_set_memory_region(struct kvm *kvm, bitmap_set(new.dirty_bitmap, 0, new.npages); } - r = kvm_set_memslot(kvm, mem, &new, change); + r = kvm_set_memslot(kvm, &new, change); if (r) goto out_bitmap; -- cgit v1.2.3-58-ga151 From 07921665a651918350bc6653d4ca8a516a867b4b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:19 +0100 Subject: KVM: Use prepare/commit hooks to handle generic memslot metadata updates Handle the generic memslot metadata, a.k.a. dirty bitmap, updates at the same time that arch handles it's own metadata updates, i.e. at memslot prepare and commit. This will simplify converting @new to a dynamically allocated object, and more closely aligns common KVM with architecture code. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: <2ddd5446e3706fe3c1e52e3df279f04c458be830.1638817640.git.maciej.szmigiero@oracle.com> --- virt/kvm/kvm_main.c | 109 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 66 insertions(+), 43 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 373079a03710..ec5567e8442b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1549,6 +1549,69 @@ static void kvm_copy_memslots_arch(struct kvm_memslots *to, to->memslots[i].arch = from->memslots[i].arch; } +static int kvm_prepare_memory_region(struct kvm *kvm, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + int r; + + /* + * If dirty logging is disabled, nullify the bitmap; the old bitmap + * will be freed on "commit". If logging is enabled in both old and + * new, reuse the existing bitmap. If logging is enabled only in the + * new and KVM isn't using a ring buffer, allocate and initialize a + * new bitmap. + */ + if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) + new->dirty_bitmap = NULL; + else if (old->dirty_bitmap) + new->dirty_bitmap = old->dirty_bitmap; + else if (!kvm->dirty_ring_size) { + r = kvm_alloc_dirty_bitmap(new); + if (r) + return r; + + if (kvm_dirty_log_manual_protect_and_init_set(kvm)) + bitmap_set(new->dirty_bitmap, 0, new->npages); + } + + r = kvm_arch_prepare_memory_region(kvm, old, new, change); + + /* Free the bitmap on failure if it was allocated above. */ + if (r && new->dirty_bitmap && !old->dirty_bitmap) + kvm_destroy_dirty_bitmap(new); + + return r; +} + +static void kvm_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + /* + * Update the total number of memslot pages before calling the arch + * hook so that architectures can consume the result directly. + */ + if (change == KVM_MR_DELETE) + kvm->nr_memslot_pages -= old->npages; + else if (change == KVM_MR_CREATE) + kvm->nr_memslot_pages += new->npages; + + kvm_arch_commit_memory_region(kvm, old, new, change); + + /* + * Free the old memslot's metadata. On DELETE, free the whole thing, + * otherwise free the dirty bitmap as needed (the below effectively + * checks both the flags and whether a ring buffer is being used). + */ + if (change == KVM_MR_DELETE) + kvm_free_memslot(kvm, old); + else if (old->dirty_bitmap && !new->dirty_bitmap) + kvm_destroy_dirty_bitmap(old); +} + static int kvm_set_memslot(struct kvm *kvm, struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -1635,27 +1698,14 @@ static int kvm_set_memslot(struct kvm *kvm, old.as_id = new->as_id; } - r = kvm_arch_prepare_memory_region(kvm, &old, new, change); + r = kvm_prepare_memory_region(kvm, &old, new, change); if (r) goto out_slots; update_memslots(slots, new, change); slots = install_new_memslots(kvm, new->as_id, slots); - /* - * Update the total number of memslot pages before calling the arch - * hook so that architectures can consume the result directly. - */ - if (change == KVM_MR_DELETE) - kvm->nr_memslot_pages -= old.npages; - else if (change == KVM_MR_CREATE) - kvm->nr_memslot_pages += new->npages; - - kvm_arch_commit_memory_region(kvm, &old, new, change); - - /* Free the old memslot's metadata. Note, this is the full copy!!! */ - if (change == KVM_MR_DELETE) - kvm_free_memslot(kvm, &old); + kvm_commit_memory_region(kvm, &old, new, change); kvfree(slots); return 0; @@ -1751,7 +1801,6 @@ int __kvm_set_memory_region(struct kvm *kvm, if (!old.npages) { change = KVM_MR_CREATE; - new.dirty_bitmap = NULL; /* * To simplify KVM internals, the total number of pages across @@ -1771,9 +1820,6 @@ int __kvm_set_memory_region(struct kvm *kvm, change = KVM_MR_FLAGS_ONLY; else /* Nothing to change. */ return 0; - - /* Copy dirty_bitmap from the current memslot. */ - new.dirty_bitmap = old.dirty_bitmap; } if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { @@ -1787,30 +1833,7 @@ int __kvm_set_memory_region(struct kvm *kvm, } } - /* Allocate/free page dirty bitmap as needed */ - if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) - new.dirty_bitmap = NULL; - else if (!new.dirty_bitmap && !kvm->dirty_ring_size) { - r = kvm_alloc_dirty_bitmap(&new); - if (r) - return r; - - if (kvm_dirty_log_manual_protect_and_init_set(kvm)) - bitmap_set(new.dirty_bitmap, 0, new.npages); - } - - r = kvm_set_memslot(kvm, &new, change); - if (r) - goto out_bitmap; - - if (old.dirty_bitmap && !new.dirty_bitmap) - kvm_destroy_dirty_bitmap(&old); - return 0; - -out_bitmap: - if (new.dirty_bitmap && !old.dirty_bitmap) - kvm_destroy_dirty_bitmap(&new); - return r; + return kvm_set_memslot(kvm, &new, change); } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); -- cgit v1.2.3-58-ga151 From 7cd08553ab103a7ebca79035eb35b73418b2f475 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:22 +0100 Subject: KVM: Don't make a full copy of the old memslot in __kvm_set_memory_region() Stop making a full copy of the old memslot in __kvm_set_memory_region() now that metadata updates are handled by kvm_set_memslot(), i.e. now that the old memslot's dirty bitmap doesn't need to be referenced after the memslot and its pointer is modified/invalidated by kvm_set_memslot(). No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: <5dce0946b41bba8c83f6e3424c6955c56bcc9f86.1638817640.git.maciej.szmigiero@oracle.com> --- virt/kvm/kvm_main.c | 35 +++++++++++++---------------------- 1 file changed, 13 insertions(+), 22 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ec5567e8442b..8ccb1ac82d38 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1730,8 +1730,8 @@ out_slots: int __kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem) { - struct kvm_memory_slot old, new; - struct kvm_memory_slot *tmp; + struct kvm_memory_slot *old, *tmp; + struct kvm_memory_slot new; enum kvm_mr_change change; int as_id, id; int r; @@ -1761,25 +1761,16 @@ int __kvm_set_memory_region(struct kvm *kvm, return -EINVAL; /* - * Make a full copy of the old memslot, the pointer will become stale - * when the memslots are re-sorted by update_memslots(), and the old - * memslot needs to be referenced after calling update_memslots(), e.g. - * to free its resources and for arch specific behavior. + * Note, the old memslot (and the pointer itself!) may be invalidated + * and/or destroyed by kvm_set_memslot(). */ - tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id); - if (tmp) { - old = *tmp; - tmp = NULL; - } else { - memset(&old, 0, sizeof(old)); - old.id = id; - } + old = id_to_memslot(__kvm_memslots(kvm, as_id), id); if (!mem->memory_size) { - if (!old.npages) + if (!old || !old->npages) return -EINVAL; - if (WARN_ON_ONCE(kvm->nr_memslot_pages < old.npages)) + if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) return -EIO; memset(&new, 0, sizeof(new)); @@ -1799,7 +1790,7 @@ int __kvm_set_memory_region(struct kvm *kvm, if (new.npages > KVM_MEM_MAX_NR_PAGES) return -EINVAL; - if (!old.npages) { + if (!old || !old->npages) { change = KVM_MR_CREATE; /* @@ -1809,14 +1800,14 @@ int __kvm_set_memory_region(struct kvm *kvm, if ((kvm->nr_memslot_pages + new.npages) < kvm->nr_memslot_pages) return -EINVAL; } else { /* Modify an existing slot. */ - if ((new.userspace_addr != old.userspace_addr) || - (new.npages != old.npages) || - ((new.flags ^ old.flags) & KVM_MEM_READONLY)) + if ((new.userspace_addr != old->userspace_addr) || + (new.npages != old->npages) || + ((new.flags ^ old->flags) & KVM_MEM_READONLY)) return -EINVAL; - if (new.base_gfn != old.base_gfn) + if (new.base_gfn != old->base_gfn) change = KVM_MR_MOVE; - else if (new.flags != old.flags) + else if (new.flags != old->flags) change = KVM_MR_FLAGS_ONLY; else /* Nothing to change. */ return 0; -- cgit v1.2.3-58-ga151 From c928bfc2632fa3dd6a3bd4504ac6d8e42302287a Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:25 +0100 Subject: KVM: Integrate gfn_to_memslot_approx() into search_memslots() s390 arch has gfn_to_memslot_approx() which is almost identical to search_memslots(), differing only in that in case the gfn falls in a hole one of the memslots bordering the hole is returned. Add this lookup mode as an option to search_memslots() so we don't have two almost identical functions for looking up a memslot by its gfn. Signed-off-by: Maciej S. Szmigiero [sean: tweaked helper names to keep gfn_to_memslot_approx() in s390] Reviewed-by: Sean Christopherson Message-Id: <171cd89b52c718dbe180ecd909b4437a64a7e2ec.1638817640.git.maciej.szmigiero@oracle.com> --- arch/s390/kvm/kvm-s390.c | 45 ++++++++------------------------------------- include/linux/kvm_host.h | 35 +++++++++++++++++++++++++++-------- virt/kvm/kvm_main.c | 2 +- 3 files changed, 36 insertions(+), 46 deletions(-) (limited to 'virt') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 251059ff81fc..631be750af08 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1943,41 +1943,6 @@ out: /* for consistency */ #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX) -/* - * Similar to gfn_to_memslot, but returns the index of a memslot also when the - * address falls in a hole. In that case the index of one of the memslots - * bordering the hole is returned. - */ -static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn) -{ - int start = 0, end = slots->used_slots; - int slot = atomic_read(&slots->last_used_slot); - struct kvm_memory_slot *memslots = slots->memslots; - - if (gfn >= memslots[slot].base_gfn && - gfn < memslots[slot].base_gfn + memslots[slot].npages) - return slot; - - while (start < end) { - slot = start + (end - start) / 2; - - if (gfn >= memslots[slot].base_gfn) - end = slot; - else - start = slot + 1; - } - - if (start >= slots->used_slots) - return slots->used_slots - 1; - - if (gfn >= memslots[start].base_gfn && - gfn < memslots[start].base_gfn + memslots[start].npages) { - atomic_set(&slots->last_used_slot, start); - } - - return start; -} - static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, u8 *res, unsigned long bufsize) { @@ -2001,11 +1966,17 @@ static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, return 0; } +static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots, + gfn_t gfn) +{ + return ____gfn_to_memslot(slots, gfn, true); +} + static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots, unsigned long cur_gfn) { - int slotidx = gfn_to_memslot_approx(slots, cur_gfn); - struct kvm_memory_slot *ms = slots->memslots + slotidx; + struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn); + int slotidx = ms - slots->memslots; unsigned long ofs = cur_gfn - ms->base_gfn; if (ms->base_gfn + ms->npages <= cur_gfn) { diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index da0d4f21a150..2f80ce84fbcf 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1233,10 +1233,14 @@ try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn) * Returns a pointer to the memslot that contains gfn and records the index of * the slot in index. Otherwise returns NULL. * + * With "approx" set returns the memslot also when the address falls + * in a hole. In that case one of the memslots bordering the hole is + * returned. + * * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! */ static inline struct kvm_memory_slot * -search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index) +search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index, bool approx) { int start = 0, end = slots->used_slots; struct kvm_memory_slot *memslots = slots->memslots; @@ -1254,22 +1258,26 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index) start = slot + 1; } + if (approx && start >= slots->used_slots) { + *index = slots->used_slots - 1; + return &memslots[slots->used_slots - 1]; + } + slot = try_get_memslot(slots, start, gfn); if (slot) { *index = start; return slot; } + if (approx) { + *index = start; + return &memslots[start]; + } return NULL; } -/* - * __gfn_to_memslot() and its descendants are here because it is called from - * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot() - * itself isn't here as an inline because that would bloat other code too much. - */ static inline struct kvm_memory_slot * -__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) +____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx) { struct kvm_memory_slot *slot; int slot_index = atomic_read(&slots->last_used_slot); @@ -1278,7 +1286,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) if (slot) return slot; - slot = search_memslots(slots, gfn, &slot_index); + slot = search_memslots(slots, gfn, &slot_index, approx); if (slot) { atomic_set(&slots->last_used_slot, slot_index); return slot; @@ -1287,6 +1295,17 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) return NULL; } +/* + * __gfn_to_memslot() and its descendants are here to allow arch code to inline + * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline + * because that would bloat other code too much. + */ +static inline struct kvm_memory_slot * +__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) +{ + return ____gfn_to_memslot(slots, gfn, false); +} + static inline unsigned long __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 8ccb1ac82d38..6ca076ae64a2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2141,7 +2141,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn * search_memslots() instead of __gfn_to_memslot() to avoid * thrashing the VM-wide last_used_index in kvm_memslots. */ - slot = search_memslots(slots, gfn, &slot_index); + slot = search_memslots(slots, gfn, &slot_index, false); if (slot) { vcpu->last_used_slot = slot_index; return slot; -- cgit v1.2.3-58-ga151 From 1e8617d37fc36407f9fce9c08ef8d254613c00de Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:26 +0100 Subject: KVM: Move WARN on invalid memslot index to update_memslots() Since kvm_memslot_move_forward() can theoretically return a negative memslot index even when kvm_memslot_move_backward() returned a positive one (and so did not WARN) let's just move the warning to the common code. Signed-off-by: Maciej S. Szmigiero Reviewed-by: Claudio Imbrenda Reviewed-by: Sean Christopherson Message-Id: --- virt/kvm/kvm_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6ca076ae64a2..a60d09beef61 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1322,8 +1322,7 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, struct kvm_memory_slot *mslots = slots->memslots; int i; - if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) || - WARN_ON_ONCE(!slots->used_slots)) + if (slots->id_to_index[memslot->id] == -1 || !slots->used_slots) return -1; /* @@ -1427,6 +1426,9 @@ static void update_memslots(struct kvm_memslots *slots, i = kvm_memslot_move_backward(slots, memslot); i = kvm_memslot_move_forward(slots, memslot, i); + if (WARN_ON_ONCE(i < 0)) + return; + /* * Copy the memslot to its new position in memslots and update * its index accordingly. -- cgit v1.2.3-58-ga151 From 26b8345abc75a7404716864710930407b7d873f9 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:27 +0100 Subject: KVM: Resolve memslot ID via a hash table instead of via a static array Memslot ID to the corresponding memslot mappings are currently kept as indices in static id_to_index array. The size of this array depends on the maximum allowed memslot count (regardless of the number of memslots actually in use). This has become especially problematic recently, when memslot count cap was removed, so the maximum count is now full 32k memslots - the maximum allowed by the current KVM API. Keeping these IDs in a hash table (instead of an array) avoids this problem. Resolving a memslot ID to the actual memslot (instead of its index) will also enable transitioning away from an array-based implementation of the whole memslots structure in a later commit. Co-developed-by: Sean Christopherson Signed-off-by: Sean Christopherson Signed-off-by: Maciej S. Szmigiero Message-Id: <117fb2c04320e6cd6cf34f205a72eadb0aa8d5f9.1638817640.git.maciej.szmigiero@oracle.com> --- include/linux/kvm_host.h | 25 ++++++++----- virt/kvm/kvm_main.c | 95 ++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 91 insertions(+), 29 deletions(-) (limited to 'virt') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2f80ce84fbcf..79db70a8323e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -426,6 +427,7 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) struct kvm_memory_slot { + struct hlist_node id_node; gfn_t base_gfn; unsigned long npages; unsigned long *dirty_bitmap; @@ -527,8 +529,15 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) */ struct kvm_memslots { u64 generation; - /* The mapping table from slot id to the index in memslots[]. */ - short id_to_index[KVM_MEM_SLOTS_NUM]; + /* + * The mapping table from slot id to the index in memslots[]. + * + * 7-bit bucket count matches the size of the old id to index array for + * 512 slots, while giving good performance with this slot count. + * Higher bucket counts bring only small performance improvements but + * always result in higher memory usage (even for lower memslot counts). + */ + DECLARE_HASHTABLE(id_hash, 7); atomic_t last_used_slot; int used_slots; struct kvm_memory_slot memslots[]; @@ -796,16 +805,14 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) static inline struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) { - int index = slots->id_to_index[id]; struct kvm_memory_slot *slot; - if (index < 0) - return NULL; - - slot = &slots->memslots[index]; + hash_for_each_possible(slots->id_hash, slot, id_node, id) { + if (slot->id == id) + return slot; + } - WARN_ON(slot->id != id); - return slot; + return NULL; } /* diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a60d09beef61..dbff2ac9a8e3 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -867,15 +867,13 @@ static void kvm_destroy_pm_notifier(struct kvm *kvm) static struct kvm_memslots *kvm_alloc_memslots(void) { - int i; struct kvm_memslots *slots; slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); if (!slots) return NULL; - for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) - slots->id_to_index[i] = -1; + hash_init(slots->id_hash); return slots; } @@ -1274,17 +1272,48 @@ static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) return 0; } +static void kvm_replace_memslot(struct kvm_memslots *slots, + struct kvm_memory_slot *old, + struct kvm_memory_slot *new) +{ + /* + * Remove the old memslot from the hash list, copying the node data + * would corrupt the list. + */ + if (old) { + hash_del(&old->id_node); + + if (!new) + return; + + /* Copy the source *data*, not the pointer, to the destination. */ + *new = *old; + } + + /* (Re)Add the new memslot. */ + hash_add(slots->id_hash, &new->id_node, new->id); +} + +static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src) +{ + struct kvm_memory_slot *mslots = slots->memslots; + + kvm_replace_memslot(slots, &mslots[src], &mslots[dst]); +} + /* * Delete a memslot by decrementing the number of used slots and shifting all * other entries in the array forward one spot. + * @memslot is a detached dummy struct with just .id and .as_id filled. */ static inline void kvm_memslot_delete(struct kvm_memslots *slots, struct kvm_memory_slot *memslot) { struct kvm_memory_slot *mslots = slots->memslots; + struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id); int i; - if (WARN_ON(slots->id_to_index[memslot->id] == -1)) + if (WARN_ON(!oldslot)) return; slots->used_slots--; @@ -1292,12 +1321,17 @@ static inline void kvm_memslot_delete(struct kvm_memslots *slots, if (atomic_read(&slots->last_used_slot) >= slots->used_slots) atomic_set(&slots->last_used_slot, 0); - for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) { - mslots[i] = mslots[i + 1]; - slots->id_to_index[mslots[i].id] = i; - } + /* + * Remove the to-be-deleted memslot from the list _before_ shifting + * the trailing memslots forward, its data will be overwritten. + * Defer the (somewhat pointless) copying of the memslot until after + * the last slot has been shifted to avoid overwriting said last slot. + */ + kvm_replace_memslot(slots, oldslot, NULL); + + for (i = oldslot - mslots; i < slots->used_slots; i++) + kvm_shift_memslot(slots, i, i + 1); mslots[i] = *memslot; - slots->id_to_index[memslot->id] = -1; } /* @@ -1315,30 +1349,39 @@ static inline int kvm_memslot_insert_back(struct kvm_memslots *slots) * itself is not preserved in the array, i.e. not swapped at this time, only * its new index into the array is tracked. Returns the changed memslot's * current index into the memslots array. + * The memslot at the returned index will not be in @slots->id_hash by then. + * @memslot is a detached struct with desired final data of the changed slot. */ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, struct kvm_memory_slot *memslot) { struct kvm_memory_slot *mslots = slots->memslots; + struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id); int i; - if (slots->id_to_index[memslot->id] == -1 || !slots->used_slots) + if (!oldslot || !slots->used_slots) return -1; + /* + * Delete the slot from the hash table before sorting the remaining + * slots, the slot's data may be overwritten when copying slots as part + * of the sorting proccess. update_memslots() will unconditionally + * rewrite the entire slot and re-add it to the hash table. + */ + kvm_replace_memslot(slots, oldslot, NULL); + /* * Move the target memslot backward in the array by shifting existing * memslots with a higher GFN (than the target memslot) towards the * front of the array. */ - for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) { + for (i = oldslot - mslots; i < slots->used_slots - 1; i++) { if (memslot->base_gfn > mslots[i + 1].base_gfn) break; WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn); - /* Shift the next memslot forward one and update its index. */ - mslots[i] = mslots[i + 1]; - slots->id_to_index[mslots[i].id] = i; + kvm_shift_memslot(slots, i, i + 1); } return i; } @@ -1349,6 +1392,10 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, * is not preserved in the array, i.e. not swapped at this time, only its new * index into the array is tracked. Returns the changed memslot's final index * into the memslots array. + * The memslot at the returned index will not be in @slots->id_hash by then. + * @memslot is a detached struct with desired final data of the new or + * changed slot. + * Assumes that the memslot at @start index is not in @slots->id_hash. */ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, struct kvm_memory_slot *memslot, @@ -1363,9 +1410,7 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn); - /* Shift the next memslot back one and update its index. */ - mslots[i] = mslots[i - 1]; - slots->id_to_index[mslots[i].id] = i; + kvm_shift_memslot(slots, i, i - 1); } return i; } @@ -1410,6 +1455,9 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, * most likely to be referenced, sorting it to the front of the array was * advantageous. The current binary search starts from the middle of the array * and uses an LRU pointer to improve performance for all memslots and GFNs. + * + * @memslot is a detached struct, not a part of the current or new memslot + * array. */ static void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *memslot, @@ -1434,7 +1482,7 @@ static void update_memslots(struct kvm_memslots *slots, * its index accordingly. */ slots->memslots[i] = *memslot; - slots->id_to_index[memslot->id] = i; + kvm_replace_memslot(slots, NULL, &slots->memslots[i]); } } @@ -1527,6 +1575,7 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, { struct kvm_memslots *slots; size_t new_size; + struct kvm_memory_slot *memslot; if (change == KVM_MR_CREATE) new_size = kvm_memslots_size(old->used_slots + 1); @@ -1534,8 +1583,14 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, new_size = kvm_memslots_size(old->used_slots); slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT); - if (likely(slots)) - memcpy(slots, old, kvm_memslots_size(old->used_slots)); + if (unlikely(!slots)) + return NULL; + + memcpy(slots, old, kvm_memslots_size(old->used_slots)); + + hash_init(slots->id_hash); + kvm_for_each_memslot(memslot, slots) + hash_add(slots->id_hash, &memslot->id_node, memslot->id); return slots; } -- cgit v1.2.3-58-ga151 From ed922739c9199bf515a3e7fec3e319ce1edeef2a Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:28 +0100 Subject: KVM: Use interval tree to do fast hva lookup in memslots The current memslots implementation only allows quick binary search by gfn, quick lookup by hva is not possible - the implementation has to do a linear scan of the whole memslots array, even though the operation being performed might apply just to a single memslot. This significantly hurts performance of per-hva operations with higher memslot counts. Since hva ranges can overlap between memslots an interval tree is needed for tracking them. [sean: handle interval tree updates in kvm_replace_memslot()] Signed-off-by: Maciej S. Szmigiero Message-Id: --- arch/arm64/kvm/Kconfig | 1 + arch/mips/kvm/Kconfig | 1 + arch/powerpc/kvm/Kconfig | 1 + arch/s390/kvm/Kconfig | 1 + arch/x86/kvm/Kconfig | 1 + include/linux/kvm_host.h | 3 +++ virt/kvm/kvm_main.c | 53 +++++++++++++++++++++++++++++++++++------------- 7 files changed, 47 insertions(+), 14 deletions(-) (limited to 'virt') diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 8ffcbe29395e..f1f8fc069a97 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -39,6 +39,7 @@ menuconfig KVM select HAVE_KVM_IRQ_BYPASS select HAVE_KVM_VCPU_RUN_PID_CHANGE select SCHED_INFO + select INTERVAL_TREE help Support hosting virtualized guest machines. diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index a77297480f56..91d197bee9c0 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -27,6 +27,7 @@ config KVM select KVM_MMIO select MMU_NOTIFIER select SRCU + select INTERVAL_TREE help Support for hosting Guest kernels. diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index ff581d70f20c..e4c24f524ba8 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -26,6 +26,7 @@ config KVM select KVM_VFIO select IRQ_BYPASS_MANAGER select HAVE_KVM_IRQ_BYPASS + select INTERVAL_TREE config KVM_BOOK3S_HANDLER bool diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 67a8e770e369..2e84d3922f7c 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -33,6 +33,7 @@ config KVM select HAVE_KVM_NO_POLL select SRCU select KVM_VFIO + select INTERVAL_TREE help Support hosting paravirtualized guest machines using the SIE virtualization capability on the mainframe. This should work diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 619186138176..7618bef0a4a9 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -43,6 +43,7 @@ config KVM select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_VFIO select SRCU + select INTERVAL_TREE select HAVE_KVM_PM_NOTIFIER if PM help Support hosting fully virtualized guest machines using hardware diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 79db70a8323e..9552ad6d6652 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -428,6 +429,7 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) struct kvm_memory_slot { struct hlist_node id_node; + struct interval_tree_node hva_node; gfn_t base_gfn; unsigned long npages; unsigned long *dirty_bitmap; @@ -529,6 +531,7 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) */ struct kvm_memslots { u64 generation; + struct rb_root_cached hva_tree; /* * The mapping table from slot id to the index in memslots[]. * diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index dbff2ac9a8e3..6ba7468bdbe3 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -512,6 +512,12 @@ static void kvm_null_fn(void) } #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) +/* Iterate over each memslot intersecting [start, last] (inclusive) range */ +#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ + for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \ + node; \ + node = interval_tree_iter_next(node, start, last)) \ + static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, const struct kvm_hva_range *range) { @@ -521,6 +527,9 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, struct kvm_memslots *slots; int i, idx; + if (WARN_ON_ONCE(range->end <= range->start)) + return 0; + /* A null handler is allowed if and only if on_lock() is provided. */ if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && IS_KVM_NULL_FN(range->handler))) @@ -529,15 +538,17 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, idx = srcu_read_lock(&kvm->srcu); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + struct interval_tree_node *node; + slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(slot, slots) { + kvm_for_each_memslot_in_hva_range(node, slots, + range->start, range->end - 1) { unsigned long hva_start, hva_end; + slot = container_of(node, struct kvm_memory_slot, hva_node); hva_start = max(range->start, slot->userspace_addr); hva_end = min(range->end, slot->userspace_addr + (slot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; /* * To optimize for the likely case where the address @@ -873,6 +884,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void) if (!slots) return NULL; + slots->hva_tree = RB_ROOT_CACHED; hash_init(slots->id_hash); return slots; @@ -1277,21 +1289,28 @@ static void kvm_replace_memslot(struct kvm_memslots *slots, struct kvm_memory_slot *new) { /* - * Remove the old memslot from the hash list, copying the node data - * would corrupt the list. + * Remove the old memslot from the hash list and interval tree, copying + * the node data would corrupt the structures. */ if (old) { hash_del(&old->id_node); + interval_tree_remove(&old->hva_node, &slots->hva_tree); if (!new) return; /* Copy the source *data*, not the pointer, to the destination. */ *new = *old; + } else { + /* If @old is NULL, initialize @new's hva range. */ + new->hva_node.start = new->userspace_addr; + new->hva_node.last = new->userspace_addr + + (new->npages << PAGE_SHIFT) - 1; } /* (Re)Add the new memslot. */ hash_add(slots->id_hash, &new->id_node, new->id); + interval_tree_insert(&new->hva_node, &slots->hva_tree); } static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src) @@ -1322,7 +1341,7 @@ static inline void kvm_memslot_delete(struct kvm_memslots *slots, atomic_set(&slots->last_used_slot, 0); /* - * Remove the to-be-deleted memslot from the list _before_ shifting + * Remove the to-be-deleted memslot from the list/tree _before_ shifting * the trailing memslots forward, its data will be overwritten. * Defer the (somewhat pointless) copying of the memslot until after * the last slot has been shifted to avoid overwriting said last slot. @@ -1349,7 +1368,8 @@ static inline int kvm_memslot_insert_back(struct kvm_memslots *slots) * itself is not preserved in the array, i.e. not swapped at this time, only * its new index into the array is tracked. Returns the changed memslot's * current index into the memslots array. - * The memslot at the returned index will not be in @slots->id_hash by then. + * The memslot at the returned index will not be in @slots->hva_tree or + * @slots->id_hash by then. * @memslot is a detached struct with desired final data of the changed slot. */ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, @@ -1363,10 +1383,10 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, return -1; /* - * Delete the slot from the hash table before sorting the remaining - * slots, the slot's data may be overwritten when copying slots as part - * of the sorting proccess. update_memslots() will unconditionally - * rewrite the entire slot and re-add it to the hash table. + * Delete the slot from the hash table and interval tree before sorting + * the remaining slots, the slot's data may be overwritten when copying + * slots as part of the sorting proccess. update_memslots() will + * unconditionally rewrite and re-add the entire slot. */ kvm_replace_memslot(slots, oldslot, NULL); @@ -1392,10 +1412,12 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, * is not preserved in the array, i.e. not swapped at this time, only its new * index into the array is tracked. Returns the changed memslot's final index * into the memslots array. - * The memslot at the returned index will not be in @slots->id_hash by then. + * The memslot at the returned index will not be in @slots->hva_tree or + * @slots->id_hash by then. * @memslot is a detached struct with desired final data of the new or * changed slot. - * Assumes that the memslot at @start index is not in @slots->id_hash. + * Assumes that the memslot at @start index is not in @slots->hva_tree or + * @slots->id_hash. */ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, struct kvm_memory_slot *memslot, @@ -1588,9 +1610,12 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, memcpy(slots, old, kvm_memslots_size(old->used_slots)); + slots->hva_tree = RB_ROOT_CACHED; hash_init(slots->id_hash); - kvm_for_each_memslot(memslot, slots) + kvm_for_each_memslot(memslot, slots) { + interval_tree_insert(&memslot->hva_node, &slots->hva_tree); hash_add(slots->id_hash, &memslot->id_node, memslot->id); + } return slots; } -- cgit v1.2.3-58-ga151 From a54d806688fe1e482350ce759a8a0fc9ebf814b0 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:30 +0100 Subject: KVM: Keep memslots in tree-based structures instead of array-based ones The current memslot code uses a (reverse gfn-ordered) memslot array for keeping track of them. Because the memslot array that is currently in use cannot be modified every memslot management operation (create, delete, move, change flags) has to make a copy of the whole array so it has a scratch copy to work on. Strictly speaking, however, it is only necessary to make copy of the memslot that is being modified, copying all the memslots currently present is just a limitation of the array-based memslot implementation. Two memslot sets, however, are still needed so the VM continues to run on the currently active set while the requested operation is being performed on the second, currently inactive one. In order to have two memslot sets, but only one copy of actual memslots it is necessary to split out the memslot data from the memslot sets. The memslots themselves should be also kept independent of each other so they can be individually added or deleted. These two memslot sets should normally point to the same set of memslots. They can, however, be desynchronized when performing a memslot management operation by replacing the memslot to be modified by its copy. After the operation is complete, both memslot sets once again point to the same, common set of memslot data. This commit implements the aforementioned idea. For tracking of gfns an ordinary rbtree is used since memslots cannot overlap in the guest address space and so this data structure is sufficient for ensuring that lookups are done quickly. The "last used slot" mini-caches (both per-slot set one and per-vCPU one), that keep track of the last found-by-gfn memslot, are still present in the new code. Co-developed-by: Sean Christopherson Signed-off-by: Sean Christopherson Signed-off-by: Maciej S. Szmigiero Message-Id: <17c0cf3663b760a0d3753d4ac08c0753e941b811.1638817641.git.maciej.szmigiero@oracle.com> --- arch/arm64/kvm/mmu.c | 8 +- arch/powerpc/kvm/book3s_64_mmu_hv.c | 4 +- arch/powerpc/kvm/book3s_hv.c | 3 +- arch/powerpc/kvm/book3s_hv_nested.c | 4 +- arch/powerpc/kvm/book3s_hv_uvmem.c | 14 +- arch/s390/kvm/kvm-s390.c | 24 +- arch/s390/kvm/kvm-s390.h | 6 +- arch/x86/kvm/debugfs.c | 6 +- arch/x86/kvm/mmu/mmu.c | 8 +- include/linux/kvm_host.h | 143 ++++--- virt/kvm/kvm_main.c | 761 +++++++++++++++++++----------------- 11 files changed, 503 insertions(+), 478 deletions(-) (limited to 'virt') diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 9b2d881ccf49..e65acf35cee3 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -210,13 +210,13 @@ static void stage2_flush_vm(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int idx; + int idx, bkt; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) + kvm_for_each_memslot(memslot, bkt, slots) stage2_flush_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); @@ -595,14 +595,14 @@ void stage2_unmap_vm(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int idx; + int idx, bkt; idx = srcu_read_lock(&kvm->srcu); mmap_read_lock(current->mm); spin_lock(&kvm->mmu_lock); slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) + kvm_for_each_memslot(memslot, bkt, slots) stage2_unmap_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index c63e263312a4..213232914367 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -734,11 +734,11 @@ void kvmppc_rmap_reset(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int srcu_idx; + int srcu_idx, bkt; srcu_idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) { + kvm_for_each_memslot(memslot, bkt, slots) { /* Mutual exclusion with kvm_unmap_hva_range etc. */ spin_lock(&kvm->mmu_lock); /* diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 2b59ecc5f8c6..51e1c29a6fa0 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -5880,11 +5880,12 @@ static int kvmhv_svm_off(struct kvm *kvm) for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { struct kvm_memory_slot *memslot; struct kvm_memslots *slots = __kvm_memslots(kvm, i); + int bkt; if (!slots) continue; - kvm_for_each_memslot(memslot, slots) { + kvm_for_each_memslot(memslot, bkt, slots) { kvmppc_uvmem_drop_pages(memslot, kvm, true); uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); } diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index ed8a2c9f5629..9435e482d514 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -749,7 +749,7 @@ void kvmhv_release_all_nested(struct kvm *kvm) struct kvm_nested_guest *gp; struct kvm_nested_guest *freelist = NULL; struct kvm_memory_slot *memslot; - int srcu_idx; + int srcu_idx, bkt; spin_lock(&kvm->mmu_lock); for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { @@ -770,7 +770,7 @@ void kvmhv_release_all_nested(struct kvm *kvm) } srcu_idx = srcu_read_lock(&kvm->srcu); - kvm_for_each_memslot(memslot, kvm_memslots(kvm)) + kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm)) kvmhv_free_memslot_nest_rmap(memslot); srcu_read_unlock(&kvm->srcu, srcu_idx); } diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 28c436df9935..e414ca44839f 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -459,7 +459,7 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) struct kvm_memslots *slots; struct kvm_memory_slot *memslot, *m; int ret = H_SUCCESS; - int srcu_idx; + int srcu_idx, bkt; kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START; @@ -478,7 +478,7 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) /* register the memslot */ slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) { + kvm_for_each_memslot(memslot, bkt, slots) { ret = __kvmppc_uvmem_memslot_create(kvm, memslot); if (ret) break; @@ -486,7 +486,7 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) if (ret) { slots = kvm_memslots(kvm); - kvm_for_each_memslot(m, slots) { + kvm_for_each_memslot(m, bkt, slots) { if (m == memslot) break; __kvmppc_uvmem_memslot_delete(kvm, memslot); @@ -647,7 +647,7 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot, unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm) { - int srcu_idx; + int srcu_idx, bkt; struct kvm_memory_slot *memslot; /* @@ -662,7 +662,7 @@ unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm) srcu_idx = srcu_read_lock(&kvm->srcu); - kvm_for_each_memslot(memslot, kvm_memslots(kvm)) + kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm)) kvmppc_uvmem_drop_pages(memslot, kvm, false); srcu_read_unlock(&kvm->srcu, srcu_idx); @@ -821,7 +821,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int srcu_idx; + int srcu_idx, bkt; long ret = H_SUCCESS; if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) @@ -830,7 +830,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) /* migrate any unmoved normal pfn to device pfns*/ srcu_idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) { + kvm_for_each_memslot(memslot, bkt, slots) { ret = kvmppc_uv_migrate_mem_slot(kvm, memslot); if (ret) { /* diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 5044b2a2c0cc..b943a589ee41 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1037,13 +1037,13 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) struct kvm_memory_slot *ms; struct kvm_memslots *slots; unsigned long ram_pages = 0; - int slotnr; + int bkt; /* migration mode already enabled */ if (kvm->arch.migration_mode) return 0; slots = kvm_memslots(kvm); - if (!slots || !slots->used_slots) + if (!slots || kvm_memslots_empty(slots)) return -EINVAL; if (!kvm->arch.use_cmma) { @@ -1051,8 +1051,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) return 0; } /* mark all the pages in active slots as dirty */ - for (slotnr = 0; slotnr < slots->used_slots; slotnr++) { - ms = slots->memslots + slotnr; + kvm_for_each_memslot(ms, bkt, slots) { if (!ms->dirty_bitmap) return -EINVAL; /* @@ -1976,22 +1975,21 @@ static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots, unsigned long cur_gfn) { struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn); - int slotidx = ms - slots->memslots; unsigned long ofs = cur_gfn - ms->base_gfn; + struct rb_node *mnode = &ms->gfn_node[slots->node_idx]; if (ms->base_gfn + ms->npages <= cur_gfn) { - slotidx--; + mnode = rb_next(mnode); /* If we are above the highest slot, wrap around */ - if (slotidx < 0) - slotidx = slots->used_slots - 1; + if (!mnode) + mnode = rb_first(&slots->gfn_tree); - ms = slots->memslots + slotidx; + ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); ofs = 0; } ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); - while ((slotidx > 0) && (ofs >= ms->npages)) { - slotidx--; - ms = slots->memslots + slotidx; + while (ofs >= ms->npages && (mnode = rb_next(mnode))) { + ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0); } return ms->base_gfn + ofs; @@ -2004,7 +2002,7 @@ static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memory_slot *ms; - if (unlikely(!slots->used_slots)) + if (unlikely(kvm_memslots_empty(slots))) return 0; cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index cc309cc37e96..60f0effcce99 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -220,12 +220,14 @@ static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm) /* get the end gfn of the last (highest gfn) memslot */ static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots) { + struct rb_node *node; struct kvm_memory_slot *ms; - if (WARN_ON(!slots->used_slots)) + if (WARN_ON(kvm_memslots_empty(slots))) return 0; - ms = slots->memslots; + node = rb_last(&slots->gfn_tree); + ms = container_of(node, struct kvm_memory_slot, gfn_node[slots->node_idx]); return ms->base_gfn + ms->npages; } diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c index 54a83a744538..543a8c04025c 100644 --- a/arch/x86/kvm/debugfs.c +++ b/arch/x86/kvm/debugfs.c @@ -107,9 +107,10 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v) write_lock(&kvm->mmu_lock); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + int bkt; + slots = __kvm_memslots(kvm, i); - for (j = 0; j < slots->used_slots; j++) { - slot = &slots->memslots[j]; + kvm_for_each_memslot(slot, bkt, slots) for (k = 0; k < KVM_NR_PAGE_SIZES; k++) { rmap = slot->arch.rmap[k]; lpage_size = kvm_mmu_slot_lpages(slot, k + 1); @@ -121,7 +122,6 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v) cur[index]++; } } - } } write_unlock(&kvm->mmu_lock); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index e41cf095f2d1..c61430994d19 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3409,7 +3409,7 @@ static int mmu_first_shadow_root_alloc(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *slot; - int r = 0, i; + int r = 0, i, bkt; /* * Check if this is the first shadow root being allocated before @@ -3434,7 +3434,7 @@ static int mmu_first_shadow_root_alloc(struct kvm *kvm) for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(slot, slots) { + kvm_for_each_memslot(slot, bkt, slots) { /* * Both of these functions are no-ops if the target is * already allocated, so unconditionally calling both @@ -5730,14 +5730,14 @@ static bool __kvm_zap_rmaps(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) struct kvm_memslots *slots; bool flush = false; gfn_t start, end; - int i; + int i, bkt; if (!kvm_memslots_have_rmaps(kvm)) return flush; for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(memslot, slots) { + kvm_for_each_memslot(memslot, bkt, slots) { start = max(gfn_start, memslot->base_gfn); end = min(gfn_end, memslot->base_gfn + memslot->npages); if (start >= end) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9552ad6d6652..9eda8a63feae 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -358,11 +359,13 @@ struct kvm_vcpu { struct kvm_dirty_ring dirty_ring; /* - * The index of the most recently used memslot by this vCPU. It's ok - * if this becomes stale due to memslot changes since we always check - * it is a valid slot. + * The most recently used memslot by this vCPU and the slots generation + * for which it is valid. + * No wraparound protection is needed since generations won't overflow in + * thousands of years, even assuming 1M memslot operations per second. */ - int last_used_slot; + struct kvm_memory_slot *last_used_slot; + u64 last_used_slot_gen; }; /* must be called with irqs disabled */ @@ -427,9 +430,26 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) */ #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) +/* + * Since at idle each memslot belongs to two memslot sets it has to contain + * two embedded nodes for each data structure that it forms a part of. + * + * Two memslot sets (one active and one inactive) are necessary so the VM + * continues to run on one memslot set while the other is being modified. + * + * These two memslot sets normally point to the same set of memslots. + * They can, however, be desynchronized when performing a memslot management + * operation by replacing the memslot to be modified by its copy. + * After the operation is complete, both memslot sets once again point to + * the same, common set of memslot data. + * + * The memslots themselves are independent of each other so they can be + * individually added or deleted. + */ struct kvm_memory_slot { - struct hlist_node id_node; - struct interval_tree_node hva_node; + struct hlist_node id_node[2]; + struct interval_tree_node hva_node[2]; + struct rb_node gfn_node[2]; gfn_t base_gfn; unsigned long npages; unsigned long *dirty_bitmap; @@ -524,16 +544,13 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) } #endif -/* - * Note: - * memslots are not sorted by id anymore, please use id_to_memslot() - * to get the memslot by its id. - */ struct kvm_memslots { u64 generation; + atomic_long_t last_used_slot; struct rb_root_cached hva_tree; + struct rb_root gfn_tree; /* - * The mapping table from slot id to the index in memslots[]. + * The mapping table from slot id to memslot. * * 7-bit bucket count matches the size of the old id to index array for * 512 slots, while giving good performance with this slot count. @@ -541,9 +558,7 @@ struct kvm_memslots { * always result in higher memory usage (even for lower memslot counts). */ DECLARE_HASHTABLE(id_hash, 7); - atomic_t last_used_slot; - int used_slots; - struct kvm_memory_slot memslots[]; + int node_idx; }; struct kvm { @@ -565,6 +580,9 @@ struct kvm { struct mutex slots_arch_lock; struct mm_struct *mm; /* userspace tied to this vm */ unsigned long nr_memslot_pages; + /* The two memslot sets - active and inactive (per address space) */ + struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2]; + /* The current active memslot set for each address space */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; struct xarray vcpu_array; @@ -739,11 +757,10 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) return NULL; } -#define kvm_for_each_memslot(memslot, slots) \ - for (memslot = &slots->memslots[0]; \ - memslot < slots->memslots + slots->used_slots; memslot++) \ - if (WARN_ON_ONCE(!memslot->npages)) { \ - } else +static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) +{ + return vcpu->vcpu_idx; +} void kvm_destroy_vcpus(struct kvm *kvm); @@ -805,12 +822,23 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) return __kvm_memslots(vcpu->kvm, as_id); } +static inline bool kvm_memslots_empty(struct kvm_memslots *slots) +{ + return RB_EMPTY_ROOT(&slots->gfn_tree); +} + +#define kvm_for_each_memslot(memslot, bkt, slots) \ + hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \ + if (WARN_ON_ONCE(!memslot->npages)) { \ + } else + static inline struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) { struct kvm_memory_slot *slot; + int idx = slots->node_idx; - hash_for_each_possible(slots->id_hash, slot, id_node, id) { + hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) { if (slot->id == id) return slot; } @@ -1214,25 +1242,15 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); /* - * Returns a pointer to the memslot at slot_index if it contains gfn. + * Returns a pointer to the memslot if it contains gfn. * Otherwise returns NULL. */ static inline struct kvm_memory_slot * -try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn) +try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { - struct kvm_memory_slot *slot; - - if (slot_index < 0 || slot_index >= slots->used_slots) + if (!slot) return NULL; - /* - * slot_index can come from vcpu->last_used_slot which is not kept - * in sync with userspace-controllable memslot deletion. So use nospec - * to prevent the CPU from speculating past the end of memslots[]. - */ - slot_index = array_index_nospec(slot_index, slots->used_slots); - slot = &slots->memslots[slot_index]; - if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) return slot; else @@ -1240,65 +1258,46 @@ try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn) } /* - * Returns a pointer to the memslot that contains gfn and records the index of - * the slot in index. Otherwise returns NULL. + * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL. * * With "approx" set returns the memslot also when the address falls * in a hole. In that case one of the memslots bordering the hole is * returned. - * - * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! */ static inline struct kvm_memory_slot * -search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index, bool approx) +search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx) { - int start = 0, end = slots->used_slots; - struct kvm_memory_slot *memslots = slots->memslots; struct kvm_memory_slot *slot; - - if (unlikely(!slots->used_slots)) - return NULL; - - while (start < end) { - int slot = start + (end - start) / 2; - - if (gfn >= memslots[slot].base_gfn) - end = slot; - else - start = slot + 1; - } - - if (approx && start >= slots->used_slots) { - *index = slots->used_slots - 1; - return &memslots[slots->used_slots - 1]; - } - - slot = try_get_memslot(slots, start, gfn); - if (slot) { - *index = start; - return slot; - } - if (approx) { - *index = start; - return &memslots[start]; + struct rb_node *node; + int idx = slots->node_idx; + + slot = NULL; + for (node = slots->gfn_tree.rb_node; node; ) { + slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]); + if (gfn >= slot->base_gfn) { + if (gfn < slot->base_gfn + slot->npages) + return slot; + node = node->rb_right; + } else + node = node->rb_left; } - return NULL; + return approx ? slot : NULL; } static inline struct kvm_memory_slot * ____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx) { struct kvm_memory_slot *slot; - int slot_index = atomic_read(&slots->last_used_slot); - slot = try_get_memslot(slots, slot_index, gfn); + slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot); + slot = try_get_memslot(slot, gfn); if (slot) return slot; - slot = search_memslots(slots, gfn, &slot_index, approx); + slot = search_memslots(slots, gfn, approx); if (slot) { - atomic_set(&slots->last_used_slot, slot_index); + atomic_long_set(&slots->last_used_slot, (unsigned long)slot); return slot; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6ba7468bdbe3..a87df97e0b14 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -433,7 +433,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) vcpu->preempted = false; vcpu->ready = false; preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); - vcpu->last_used_slot = 0; + vcpu->last_used_slot = NULL; } static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) @@ -545,7 +545,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, range->start, range->end - 1) { unsigned long hva_start, hva_end; - slot = container_of(node, struct kvm_memory_slot, hva_node); + slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]); hva_start = max(range->start, slot->userspace_addr); hva_end = min(range->end, slot->userspace_addr + (slot->npages << PAGE_SHIFT)); @@ -876,20 +876,6 @@ static void kvm_destroy_pm_notifier(struct kvm *kvm) } #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ -static struct kvm_memslots *kvm_alloc_memslots(void) -{ - struct kvm_memslots *slots; - - slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); - if (!slots) - return NULL; - - slots->hva_tree = RB_ROOT_CACHED; - hash_init(slots->id_hash); - - return slots; -} - static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) { if (!memslot->dirty_bitmap) @@ -899,27 +885,33 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) memslot->dirty_bitmap = NULL; } +/* This does not remove the slot from struct kvm_memslots data structures */ static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_destroy_dirty_bitmap(slot); kvm_arch_free_memslot(kvm, slot); - slot->flags = 0; - slot->npages = 0; + kfree(slot); } static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) { + struct hlist_node *idnode; struct kvm_memory_slot *memslot; + int bkt; - if (!slots) + /* + * The same memslot objects live in both active and inactive sets, + * arbitrarily free using index '1' so the second invocation of this + * function isn't operating over a structure with dangling pointers + * (even though this function isn't actually touching them). + */ + if (!slots->node_idx) return; - kvm_for_each_memslot(memslot, slots) + hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1]) kvm_free_memslot(kvm, memslot); - - kvfree(slots); } static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) @@ -1058,8 +1050,9 @@ int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) static struct kvm *kvm_create_vm(unsigned long type) { struct kvm *kvm = kvm_arch_alloc_vm(); + struct kvm_memslots *slots; int r = -ENOMEM; - int i; + int i, j; if (!kvm) return ERR_PTR(-ENOMEM); @@ -1087,13 +1080,20 @@ static struct kvm *kvm_create_vm(unsigned long type) refcount_set(&kvm->users_count, 1); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - struct kvm_memslots *slots = kvm_alloc_memslots(); + for (j = 0; j < 2; j++) { + slots = &kvm->__memslots[i][j]; - if (!slots) - goto out_err_no_arch_destroy_vm; - /* Generations must be different for each address space. */ - slots->generation = i; - rcu_assign_pointer(kvm->memslots[i], slots); + atomic_long_set(&slots->last_used_slot, (unsigned long)NULL); + slots->hva_tree = RB_ROOT_CACHED; + slots->gfn_tree = RB_ROOT; + hash_init(slots->id_hash); + slots->node_idx = j; + + /* Generations must be different for each address space. */ + slots->generation = i; + } + + rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]); } for (i = 0; i < KVM_NR_BUSES; i++) { @@ -1147,8 +1147,6 @@ out_err_no_arch_destroy_vm: WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); for (i = 0; i < KVM_NR_BUSES; i++) kfree(kvm_get_bus(kvm, i)); - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) - kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); cleanup_srcu_struct(&kvm->irq_srcu); out_err_no_irq_srcu: cleanup_srcu_struct(&kvm->srcu); @@ -1213,8 +1211,10 @@ static void kvm_destroy_vm(struct kvm *kvm) #endif kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) - kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + kvm_free_memslots(kvm, &kvm->__memslots[i][0]); + kvm_free_memslots(kvm, &kvm->__memslots[i][1]); + } cleanup_srcu_struct(&kvm->irq_srcu); cleanup_srcu_struct(&kvm->srcu); kvm_arch_free_vm(kvm); @@ -1284,227 +1284,136 @@ static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) return 0; } -static void kvm_replace_memslot(struct kvm_memslots *slots, - struct kvm_memory_slot *old, - struct kvm_memory_slot *new) -{ - /* - * Remove the old memslot from the hash list and interval tree, copying - * the node data would corrupt the structures. - */ - if (old) { - hash_del(&old->id_node); - interval_tree_remove(&old->hva_node, &slots->hva_tree); - - if (!new) - return; - - /* Copy the source *data*, not the pointer, to the destination. */ - *new = *old; - } else { - /* If @old is NULL, initialize @new's hva range. */ - new->hva_node.start = new->userspace_addr; - new->hva_node.last = new->userspace_addr + - (new->npages << PAGE_SHIFT) - 1; - } - - /* (Re)Add the new memslot. */ - hash_add(slots->id_hash, &new->id_node, new->id); - interval_tree_insert(&new->hva_node, &slots->hva_tree); -} - -static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src) +static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) { - struct kvm_memory_slot *mslots = slots->memslots; + struct kvm_memslots *active = __kvm_memslots(kvm, as_id); + int node_idx_inactive = active->node_idx ^ 1; - kvm_replace_memslot(slots, &mslots[src], &mslots[dst]); + return &kvm->__memslots[as_id][node_idx_inactive]; } /* - * Delete a memslot by decrementing the number of used slots and shifting all - * other entries in the array forward one spot. - * @memslot is a detached dummy struct with just .id and .as_id filled. + * Helper to get the address space ID when one of memslot pointers may be NULL. + * This also serves as a sanity that at least one of the pointers is non-NULL, + * and that their address space IDs don't diverge. */ -static inline void kvm_memslot_delete(struct kvm_memslots *slots, - struct kvm_memory_slot *memslot) +static int kvm_memslots_get_as_id(struct kvm_memory_slot *a, + struct kvm_memory_slot *b) { - struct kvm_memory_slot *mslots = slots->memslots; - struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id); - int i; - - if (WARN_ON(!oldslot)) - return; - - slots->used_slots--; + if (WARN_ON_ONCE(!a && !b)) + return 0; - if (atomic_read(&slots->last_used_slot) >= slots->used_slots) - atomic_set(&slots->last_used_slot, 0); + if (!a) + return b->as_id; + if (!b) + return a->as_id; - /* - * Remove the to-be-deleted memslot from the list/tree _before_ shifting - * the trailing memslots forward, its data will be overwritten. - * Defer the (somewhat pointless) copying of the memslot until after - * the last slot has been shifted to avoid overwriting said last slot. - */ - kvm_replace_memslot(slots, oldslot, NULL); - - for (i = oldslot - mslots; i < slots->used_slots; i++) - kvm_shift_memslot(slots, i, i + 1); - mslots[i] = *memslot; + WARN_ON_ONCE(a->as_id != b->as_id); + return a->as_id; } -/* - * "Insert" a new memslot by incrementing the number of used slots. Returns - * the new slot's initial index into the memslots array. - */ -static inline int kvm_memslot_insert_back(struct kvm_memslots *slots) +static void kvm_insert_gfn_node(struct kvm_memslots *slots, + struct kvm_memory_slot *slot) { - return slots->used_slots++; -} - -/* - * Move a changed memslot backwards in the array by shifting existing slots - * with a higher GFN toward the front of the array. Note, the changed memslot - * itself is not preserved in the array, i.e. not swapped at this time, only - * its new index into the array is tracked. Returns the changed memslot's - * current index into the memslots array. - * The memslot at the returned index will not be in @slots->hva_tree or - * @slots->id_hash by then. - * @memslot is a detached struct with desired final data of the changed slot. - */ -static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, - struct kvm_memory_slot *memslot) -{ - struct kvm_memory_slot *mslots = slots->memslots; - struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id); - int i; - - if (!oldslot || !slots->used_slots) - return -1; - - /* - * Delete the slot from the hash table and interval tree before sorting - * the remaining slots, the slot's data may be overwritten when copying - * slots as part of the sorting proccess. update_memslots() will - * unconditionally rewrite and re-add the entire slot. - */ - kvm_replace_memslot(slots, oldslot, NULL); - - /* - * Move the target memslot backward in the array by shifting existing - * memslots with a higher GFN (than the target memslot) towards the - * front of the array. - */ - for (i = oldslot - mslots; i < slots->used_slots - 1; i++) { - if (memslot->base_gfn > mslots[i + 1].base_gfn) - break; + struct rb_root *gfn_tree = &slots->gfn_tree; + struct rb_node **node, *parent; + int idx = slots->node_idx; - WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn); + parent = NULL; + for (node = &gfn_tree->rb_node; *node; ) { + struct kvm_memory_slot *tmp; - kvm_shift_memslot(slots, i, i + 1); + tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]); + parent = *node; + if (slot->base_gfn < tmp->base_gfn) + node = &(*node)->rb_left; + else if (slot->base_gfn > tmp->base_gfn) + node = &(*node)->rb_right; + else + BUG(); } - return i; + + rb_link_node(&slot->gfn_node[idx], parent, node); + rb_insert_color(&slot->gfn_node[idx], gfn_tree); } -/* - * Move a changed memslot forwards in the array by shifting existing slots with - * a lower GFN toward the back of the array. Note, the changed memslot itself - * is not preserved in the array, i.e. not swapped at this time, only its new - * index into the array is tracked. Returns the changed memslot's final index - * into the memslots array. - * The memslot at the returned index will not be in @slots->hva_tree or - * @slots->id_hash by then. - * @memslot is a detached struct with desired final data of the new or - * changed slot. - * Assumes that the memslot at @start index is not in @slots->hva_tree or - * @slots->id_hash. - */ -static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, - struct kvm_memory_slot *memslot, - int start) +static void kvm_erase_gfn_node(struct kvm_memslots *slots, + struct kvm_memory_slot *slot) { - struct kvm_memory_slot *mslots = slots->memslots; - int i; + rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree); +} - for (i = start; i > 0; i--) { - if (memslot->base_gfn < mslots[i - 1].base_gfn) - break; +static void kvm_replace_gfn_node(struct kvm_memslots *slots, + struct kvm_memory_slot *old, + struct kvm_memory_slot *new) +{ + int idx = slots->node_idx; - WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn); + WARN_ON_ONCE(old->base_gfn != new->base_gfn); - kvm_shift_memslot(slots, i, i - 1); - } - return i; + rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx], + &slots->gfn_tree); } /* - * Re-sort memslots based on their GFN to account for an added, deleted, or - * moved memslot. Sorting memslots by GFN allows using a binary search during - * memslot lookup. - * - * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! I.e. the entry - * at memslots[0] has the highest GFN. - * - * The sorting algorithm takes advantage of having initially sorted memslots - * and knowing the position of the changed memslot. Sorting is also optimized - * by not swapping the updated memslot and instead only shifting other memslots - * and tracking the new index for the update memslot. Only once its final - * index is known is the updated memslot copied into its position in the array. - * - * - When deleting a memslot, the deleted memslot simply needs to be moved to - * the end of the array. - * - * - When creating a memslot, the algorithm "inserts" the new memslot at the - * end of the array and then it forward to its correct location. - * - * - When moving a memslot, the algorithm first moves the updated memslot - * backward to handle the scenario where the memslot's GFN was changed to a - * lower value. update_memslots() then falls through and runs the same flow - * as creating a memslot to move the memslot forward to handle the scenario - * where its GFN was changed to a higher value. + * Replace @old with @new in the inactive memslots. * - * Note, slots are sorted from highest->lowest instead of lowest->highest for - * historical reasons. Originally, invalid memslots where denoted by having - * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots - * to the end of the array. The current algorithm uses dedicated logic to - * delete a memslot and thus does not rely on invalid memslots having GFN=0. + * With NULL @old this simply adds @new. + * With NULL @new this simply removes @old. * - * The other historical motiviation for highest->lowest was to improve the - * performance of memslot lookup. KVM originally used a linear search starting - * at memslots[0]. On x86, the largest memslot usually has one of the highest, - * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a - * single memslot above the 4gb boundary. As the largest memslot is also the - * most likely to be referenced, sorting it to the front of the array was - * advantageous. The current binary search starts from the middle of the array - * and uses an LRU pointer to improve performance for all memslots and GFNs. - * - * @memslot is a detached struct, not a part of the current or new memslot - * array. + * If @new is non-NULL its hva_node[slots_idx] range has to be set + * appropriately. */ -static void update_memslots(struct kvm_memslots *slots, - struct kvm_memory_slot *memslot, - enum kvm_mr_change change) +static void kvm_replace_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, + struct kvm_memory_slot *new) { - int i; + int as_id = kvm_memslots_get_as_id(old, new); + struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); + int idx = slots->node_idx; - if (change == KVM_MR_DELETE) { - kvm_memslot_delete(slots, memslot); - } else { - if (change == KVM_MR_CREATE) - i = kvm_memslot_insert_back(slots); - else - i = kvm_memslot_move_backward(slots, memslot); - i = kvm_memslot_move_forward(slots, memslot, i); + if (old) { + hash_del(&old->id_node[idx]); + interval_tree_remove(&old->hva_node[idx], &slots->hva_tree); - if (WARN_ON_ONCE(i < 0)) + if ((long)old == atomic_long_read(&slots->last_used_slot)) + atomic_long_set(&slots->last_used_slot, (long)new); + + if (!new) { + kvm_erase_gfn_node(slots, old); return; + } + } - /* - * Copy the memslot to its new position in memslots and update - * its index accordingly. - */ - slots->memslots[i] = *memslot; - kvm_replace_memslot(slots, NULL, &slots->memslots[i]); + /* + * Initialize @new's hva range. Do this even when replacing an @old + * slot, kvm_copy_memslot() deliberately does not touch node data. + */ + new->hva_node[idx].start = new->userspace_addr; + new->hva_node[idx].last = new->userspace_addr + + (new->npages << PAGE_SHIFT) - 1; + + /* + * (Re)Add the new memslot. There is no O(1) interval_tree_replace(), + * hva_node needs to be swapped with remove+insert even though hva can't + * change when replacing an existing slot. + */ + hash_add(slots->id_hash, &new->id_node[idx], new->id); + interval_tree_insert(&new->hva_node[idx], &slots->hva_tree); + + /* + * If the memslot gfn is unchanged, rb_replace_node() can be used to + * switch the node in the gfn tree instead of removing the old and + * inserting the new as two separate operations. Replacement is a + * single O(1) operation versus two O(log(n)) operations for + * remove+insert. + */ + if (old && old->base_gfn == new->base_gfn) { + kvm_replace_gfn_node(slots, old, new); + } else { + if (old) + kvm_erase_gfn_node(slots, old); + kvm_insert_gfn_node(slots, new); } } @@ -1522,11 +1431,12 @@ static int check_memory_region_flags(const struct kvm_userspace_memory_region *m return 0; } -static struct kvm_memslots *install_new_memslots(struct kvm *kvm, - int as_id, struct kvm_memslots *slots) +static void kvm_swap_active_memslots(struct kvm *kvm, int as_id) { - struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); - u64 gen = old_memslots->generation; + struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); + + /* Grab the generation from the activate memslots. */ + u64 gen = __kvm_memslots(kvm, as_id)->generation; WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; @@ -1577,58 +1487,6 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, kvm_arch_memslots_updated(kvm, gen); slots->generation = gen; - - return old_memslots; -} - -static size_t kvm_memslots_size(int slots) -{ - return sizeof(struct kvm_memslots) + - (sizeof(struct kvm_memory_slot) * slots); -} - -/* - * Note, at a minimum, the current number of used slots must be allocated, even - * when deleting a memslot, as we need a complete duplicate of the memslots for - * use when invalidating a memslot prior to deleting/moving the memslot. - */ -static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, - enum kvm_mr_change change) -{ - struct kvm_memslots *slots; - size_t new_size; - struct kvm_memory_slot *memslot; - - if (change == KVM_MR_CREATE) - new_size = kvm_memslots_size(old->used_slots + 1); - else - new_size = kvm_memslots_size(old->used_slots); - - slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT); - if (unlikely(!slots)) - return NULL; - - memcpy(slots, old, kvm_memslots_size(old->used_slots)); - - slots->hva_tree = RB_ROOT_CACHED; - hash_init(slots->id_hash); - kvm_for_each_memslot(memslot, slots) { - interval_tree_insert(&memslot->hva_node, &slots->hva_tree); - hash_add(slots->id_hash, &memslot->id_node, memslot->id); - } - - return slots; -} - -static void kvm_copy_memslots_arch(struct kvm_memslots *to, - struct kvm_memslots *from) -{ - int i; - - WARN_ON_ONCE(to->used_slots != from->used_slots); - - for (i = 0; i < from->used_slots; i++) - to->memslots[i].arch = from->memslots[i].arch; } static int kvm_prepare_memory_region(struct kvm *kvm, @@ -1683,31 +1541,214 @@ static void kvm_commit_memory_region(struct kvm *kvm, kvm_arch_commit_memory_region(kvm, old, new, change); + switch (change) { + case KVM_MR_CREATE: + /* Nothing more to do. */ + break; + case KVM_MR_DELETE: + /* Free the old memslot and all its metadata. */ + kvm_free_memslot(kvm, old); + break; + case KVM_MR_MOVE: + case KVM_MR_FLAGS_ONLY: + /* + * Free the dirty bitmap as needed; the below check encompasses + * both the flags and whether a ring buffer is being used) + */ + if (old->dirty_bitmap && !new->dirty_bitmap) + kvm_destroy_dirty_bitmap(old); + + /* + * The final quirk. Free the detached, old slot, but only its + * memory, not any metadata. Metadata, including arch specific + * data, may be reused by @new. + */ + kfree(old); + break; + default: + BUG(); + } +} + +/* + * Activate @new, which must be installed in the inactive slots by the caller, + * by swapping the active slots and then propagating @new to @old once @old is + * unreachable and can be safely modified. + * + * With NULL @old this simply adds @new to @active (while swapping the sets). + * With NULL @new this simply removes @old from @active and frees it + * (while also swapping the sets). + */ +static void kvm_activate_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, + struct kvm_memory_slot *new) +{ + int as_id = kvm_memslots_get_as_id(old, new); + + kvm_swap_active_memslots(kvm, as_id); + + /* Propagate the new memslot to the now inactive memslots. */ + kvm_replace_memslot(kvm, old, new); +} + +static void kvm_copy_memslot(struct kvm_memory_slot *dest, + const struct kvm_memory_slot *src) +{ + dest->base_gfn = src->base_gfn; + dest->npages = src->npages; + dest->dirty_bitmap = src->dirty_bitmap; + dest->arch = src->arch; + dest->userspace_addr = src->userspace_addr; + dest->flags = src->flags; + dest->id = src->id; + dest->as_id = src->as_id; +} + +static void kvm_invalidate_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, + struct kvm_memory_slot *working_slot) +{ /* - * Free the old memslot's metadata. On DELETE, free the whole thing, - * otherwise free the dirty bitmap as needed (the below effectively - * checks both the flags and whether a ring buffer is being used). + * Mark the current slot INVALID. As with all memslot modifications, + * this must be done on an unreachable slot to avoid modifying the + * current slot in the active tree. */ - if (change == KVM_MR_DELETE) - kvm_free_memslot(kvm, old); - else if (old->dirty_bitmap && !new->dirty_bitmap) - kvm_destroy_dirty_bitmap(old); + kvm_copy_memslot(working_slot, old); + working_slot->flags |= KVM_MEMSLOT_INVALID; + kvm_replace_memslot(kvm, old, working_slot); + + /* + * Activate the slot that is now marked INVALID, but don't propagate + * the slot to the now inactive slots. The slot is either going to be + * deleted or recreated as a new slot. + */ + kvm_swap_active_memslots(kvm, old->as_id); + + /* + * From this point no new shadow pages pointing to a deleted, or moved, + * memslot will be created. Validation of sp->gfn happens in: + * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) + * - kvm_is_visible_gfn (mmu_check_root) + */ + kvm_arch_flush_shadow_memslot(kvm, working_slot); + + /* Was released by kvm_swap_active_memslots, reacquire. */ + mutex_lock(&kvm->slots_arch_lock); + + /* + * Copy the arch-specific field of the newly-installed slot back to the + * old slot as the arch data could have changed between releasing + * slots_arch_lock in install_new_memslots() and re-acquiring the lock + * above. Writers are required to retrieve memslots *after* acquiring + * slots_arch_lock, thus the active slot's data is guaranteed to be fresh. + */ + old->arch = working_slot->arch; +} + +static void kvm_create_memslot(struct kvm *kvm, + const struct kvm_memory_slot *new, + struct kvm_memory_slot *working) +{ + /* + * Add the new memslot to the inactive set as a copy of the + * new memslot data provided by userspace. + */ + kvm_copy_memslot(working, new); + kvm_replace_memslot(kvm, NULL, working); + kvm_activate_memslot(kvm, NULL, working); +} + +static void kvm_delete_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, + struct kvm_memory_slot *invalid_slot) +{ + /* + * Remove the old memslot (in the inactive memslots) by passing NULL as + * the "new" slot. + */ + kvm_replace_memslot(kvm, old, NULL); + + /* And do the same for the invalid version in the active slot. */ + kvm_activate_memslot(kvm, invalid_slot, NULL); + + /* Free the invalid slot, the caller will clean up the old slot. */ + kfree(invalid_slot); +} + +static struct kvm_memory_slot *kvm_move_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + struct kvm_memory_slot *invalid_slot) +{ + struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, old->as_id); + + /* + * The memslot's gfn is changing, remove it from the inactive tree, it + * will be re-added with its updated gfn. Because its range is + * changing, an in-place replace is not possible. + */ + kvm_erase_gfn_node(slots, old); + + /* + * The old slot is now fully disconnected, reuse its memory for the + * persistent copy of "new". + */ + kvm_copy_memslot(old, new); + + /* Re-add to the gfn tree with the updated gfn */ + kvm_insert_gfn_node(slots, old); + + /* Replace the current INVALID slot with the updated memslot. */ + kvm_activate_memslot(kvm, invalid_slot, old); + + /* + * Clear the INVALID flag so that the invalid_slot is now a perfect + * copy of the old slot. Return it for cleanup in the caller. + */ + WARN_ON_ONCE(!(invalid_slot->flags & KVM_MEMSLOT_INVALID)); + invalid_slot->flags &= ~KVM_MEMSLOT_INVALID; + return invalid_slot; +} + +static void kvm_update_flags_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + struct kvm_memory_slot *working_slot) +{ + /* + * Similar to the MOVE case, but the slot doesn't need to be zapped as + * an intermediate step. Instead, the old memslot is simply replaced + * with a new, updated copy in both memslot sets. + */ + kvm_copy_memslot(working_slot, new); + kvm_replace_memslot(kvm, old, working_slot); + kvm_activate_memslot(kvm, old, working_slot); } static int kvm_set_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) { - struct kvm_memory_slot *slot, old; - struct kvm_memslots *slots; + struct kvm_memory_slot *working; int r; /* - * Released in install_new_memslots. + * Modifications are done on an unreachable slot. Any changes are then + * (eventually) propagated to both the active and inactive slots. This + * allocation would ideally be on-demand (in helpers), but is done here + * to avoid having to handle failure after kvm_prepare_memory_region(). + */ + working = kzalloc(sizeof(*working), GFP_KERNEL_ACCOUNT); + if (!working) + return -ENOMEM; + + /* + * Released in kvm_swap_active_memslots. * * Must be held from before the current memslots are copied until * after the new memslots are installed with rcu_assign_pointer, - * then released before the synchronize srcu in install_new_memslots. + * then released before the synchronize srcu in kvm_swap_active_memslots. * * When modifying memslots outside of the slots_lock, must be held * before reading the pointer to the current memslots until after all @@ -1718,87 +1759,60 @@ static int kvm_set_memslot(struct kvm *kvm, */ mutex_lock(&kvm->slots_arch_lock); - slots = kvm_dup_memslots(__kvm_memslots(kvm, new->as_id), change); - if (!slots) { - mutex_unlock(&kvm->slots_arch_lock); - return -ENOMEM; - } - - if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { - /* - * Note, the INVALID flag needs to be in the appropriate entry - * in the freshly allocated memslots, not in @old or @new. - */ - slot = id_to_memslot(slots, new->id); - slot->flags |= KVM_MEMSLOT_INVALID; - - /* - * We can re-use the old memslots, the only difference from the - * newly installed memslots is the invalid flag, which will get - * dropped by update_memslots anyway. We'll also revert to the - * old memslots if preparing the new memory region fails. - */ - slots = install_new_memslots(kvm, new->as_id, slots); - - /* From this point no new shadow pages pointing to a deleted, - * or moved, memslot will be created. - * - * validation of sp->gfn happens in: - * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) - * - kvm_is_visible_gfn (mmu_check_root) - */ - kvm_arch_flush_shadow_memslot(kvm, slot); - - /* Released in install_new_memslots. */ - mutex_lock(&kvm->slots_arch_lock); + /* + * Invalidate the old slot if it's being deleted or moved. This is + * done prior to actually deleting/moving the memslot to allow vCPUs to + * continue running by ensuring there are no mappings or shadow pages + * for the memslot when it is deleted/moved. Without pre-invalidation + * (and without a lock), a window would exist between effecting the + * delete/move and committing the changes in arch code where KVM or a + * guest could access a non-existent memslot. + */ + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) + kvm_invalidate_memslot(kvm, old, working); + r = kvm_prepare_memory_region(kvm, old, new, change); + if (r) { /* - * The arch-specific fields of the now-active memslots could - * have been modified between releasing slots_arch_lock in - * install_new_memslots and re-acquiring slots_arch_lock above. - * Copy them to the inactive memslots. Arch code is required - * to retrieve memslots *after* acquiring slots_arch_lock, thus - * the active memslots are guaranteed to be fresh. + * For DELETE/MOVE, revert the above INVALID change. No + * modifications required since the original slot was preserved + * in the inactive slots. Changing the active memslots also + * release slots_arch_lock. */ - kvm_copy_memslots_arch(slots, __kvm_memslots(kvm, new->as_id)); + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) + kvm_activate_memslot(kvm, working, old); + else + mutex_unlock(&kvm->slots_arch_lock); + kfree(working); + return r; } /* - * Make a full copy of the old memslot, the pointer will become stale - * when the memslots are re-sorted by update_memslots(), and the old - * memslot needs to be referenced after calling update_memslots(), e.g. - * to free its resources and for arch specific behavior. This needs to - * happen *after* (re)acquiring slots_arch_lock. + * For DELETE and MOVE, the working slot is now active as the INVALID + * version of the old slot. MOVE is particularly special as it reuses + * the old slot and returns a copy of the old slot (in working_slot). + * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the + * old slot is detached but otherwise preserved. */ - slot = id_to_memslot(slots, new->id); - if (slot) { - old = *slot; - } else { - WARN_ON_ONCE(change != KVM_MR_CREATE); - memset(&old, 0, sizeof(old)); - old.id = new->id; - old.as_id = new->as_id; - } - - r = kvm_prepare_memory_region(kvm, &old, new, change); - if (r) - goto out_slots; - - update_memslots(slots, new, change); - slots = install_new_memslots(kvm, new->as_id, slots); + if (change == KVM_MR_CREATE) + kvm_create_memslot(kvm, new, working); + else if (change == KVM_MR_DELETE) + kvm_delete_memslot(kvm, old, working); + else if (change == KVM_MR_MOVE) + old = kvm_move_memslot(kvm, old, new, working); + else if (change == KVM_MR_FLAGS_ONLY) + kvm_update_flags_memslot(kvm, old, new, working); + else + BUG(); - kvm_commit_memory_region(kvm, &old, new, change); + /* + * No need to refresh new->arch, changes after dropping slots_arch_lock + * will directly hit the final, active memsot. Architectures are + * responsible for knowing that new->arch may be stale. + */ + kvm_commit_memory_region(kvm, old, new, change); - kvfree(slots); return 0; - -out_slots: - if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) - slots = install_new_memslots(kvm, new->as_id, slots); - else - mutex_unlock(&kvm->slots_arch_lock); - kvfree(slots); - return r; } /* @@ -1859,7 +1873,7 @@ int __kvm_set_memory_region(struct kvm *kvm, new.id = id; new.as_id = as_id; - return kvm_set_memslot(kvm, &new, KVM_MR_DELETE); + return kvm_set_memslot(kvm, old, &new, KVM_MR_DELETE); } new.as_id = as_id; @@ -1896,8 +1910,10 @@ int __kvm_set_memory_region(struct kvm *kvm, } if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { + int bkt; + /* Check for overlaps */ - kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) { + kvm_for_each_memslot(tmp, bkt, __kvm_memslots(kvm, as_id)) { if (tmp->id == id) continue; if (!((new.base_gfn + new.npages <= tmp->base_gfn) || @@ -1906,7 +1922,7 @@ int __kvm_set_memory_region(struct kvm *kvm, } } - return kvm_set_memslot(kvm, &new, change); + return kvm_set_memslot(kvm, old, &new, change); } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); @@ -2211,21 +2227,30 @@ EXPORT_SYMBOL_GPL(gfn_to_memslot); struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) { struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); + u64 gen = slots->generation; struct kvm_memory_slot *slot; - int slot_index; - slot = try_get_memslot(slots, vcpu->last_used_slot, gfn); + /* + * This also protects against using a memslot from a different address space, + * since different address spaces have different generation numbers. + */ + if (unlikely(gen != vcpu->last_used_slot_gen)) { + vcpu->last_used_slot = NULL; + vcpu->last_used_slot_gen = gen; + } + + slot = try_get_memslot(vcpu->last_used_slot, gfn); if (slot) return slot; /* * Fall back to searching all memslots. We purposely use * search_memslots() instead of __gfn_to_memslot() to avoid - * thrashing the VM-wide last_used_index in kvm_memslots. + * thrashing the VM-wide last_used_slot in kvm_memslots. */ - slot = search_memslots(slots, gfn, &slot_index, false); + slot = search_memslots(slots, gfn, false); if (slot) { - vcpu->last_used_slot = slot_index; + vcpu->last_used_slot = slot; return slot; } -- cgit v1.2.3-58-ga151 From bcb63dcde829945487bad4917b614c28aaa59141 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:31 +0100 Subject: KVM: Call kvm_arch_flush_shadow_memslot() on the old slot in kvm_invalidate_memslot() kvm_invalidate_memslot() calls kvm_arch_flush_shadow_memslot() on the active, but KVM_MEMSLOT_INVALID slot. Do it on the inactive (but valid) old slot instead since arch code really should not get passed such invalid slot. Note that this means that the "arch" field of the slot provided to kvm_arch_flush_shadow_memslot() may have stale data since this function is called with slots_arch_lock released. Suggested-by: Sean Christopherson Signed-off-by: Maciej S. Szmigiero Reviewed-by: Sean Christopherson Message-Id: <813595ecc193d6ae39a87709899d4251523b05f8.1638817641.git.maciej.szmigiero@oracle.com> --- virt/kvm/kvm_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a87df97e0b14..130eaf1c5711 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1630,7 +1630,7 @@ static void kvm_invalidate_memslot(struct kvm *kvm, * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) * - kvm_is_visible_gfn (mmu_check_root) */ - kvm_arch_flush_shadow_memslot(kvm, working_slot); + kvm_arch_flush_shadow_memslot(kvm, old); /* Was released by kvm_swap_active_memslots, reacquire. */ mutex_lock(&kvm->slots_arch_lock); -- cgit v1.2.3-58-ga151 From 44401a204734ce837e0b36c8418af4fad6a21f95 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:33 +0100 Subject: KVM: Optimize overlapping memslots check Do a quick lookup for possibly overlapping gfns when creating or moving a memslot instead of performing a linear scan of the whole memslot set. Signed-off-by: Maciej S. Szmigiero [sean: tweaked params to avoid churn in future cleanup] Reviewed-by: Sean Christopherson Message-Id: --- virt/kvm/kvm_main.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 130eaf1c5711..d27568b3b984 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1815,6 +1815,19 @@ static int kvm_set_memslot(struct kvm *kvm, return 0; } +static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, + gfn_t start, gfn_t end) +{ + struct kvm_memslot_iter iter; + + kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { + if (iter.slot->id != id) + return true; + } + + return false; +} + /* * Allocate some memory and give it an address in the guest physical address * space. @@ -1826,8 +1839,9 @@ static int kvm_set_memslot(struct kvm *kvm, int __kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem) { - struct kvm_memory_slot *old, *tmp; + struct kvm_memory_slot *old; struct kvm_memory_slot new; + struct kvm_memslots *slots; enum kvm_mr_change change; int as_id, id; int r; @@ -1856,11 +1870,13 @@ int __kvm_set_memory_region(struct kvm *kvm, if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) return -EINVAL; + slots = __kvm_memslots(kvm, as_id); + /* * Note, the old memslot (and the pointer itself!) may be invalidated * and/or destroyed by kvm_set_memslot(). */ - old = id_to_memslot(__kvm_memslots(kvm, as_id), id); + old = id_to_memslot(slots, id); if (!mem->memory_size) { if (!old || !old->npages) @@ -1909,18 +1925,10 @@ int __kvm_set_memory_region(struct kvm *kvm, return 0; } - if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { - int bkt; - - /* Check for overlaps */ - kvm_for_each_memslot(tmp, bkt, __kvm_memslots(kvm, as_id)) { - if (tmp->id == id) - continue; - if (!((new.base_gfn + new.npages <= tmp->base_gfn) || - (new.base_gfn >= tmp->base_gfn + tmp->npages))) - return -EEXIST; - } - } + if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && + kvm_check_memslot_overlap(slots, id, new.base_gfn, + new.base_gfn + new.npages)) + return -EEXIST; return kvm_set_memslot(kvm, old, &new, change); } -- cgit v1.2.3-58-ga151 From 0f9bdef3d933ba10d577b446c703a901fa5fdc30 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:34 +0100 Subject: KVM: Wait 'til the bitter end to initialize the "new" memslot Initialize the "new" memslot in the !DELETE path only after the various sanity checks have passed. This will allow a future commit to allocate @new dynamically without having to copy a memslot, and without having to deal with freeing @new in error paths and in the "nothing to change" path that's hiding in the sanity checks. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: --- virt/kvm/kvm_main.c | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d27568b3b984..71815e75e41c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1843,6 +1843,8 @@ int __kvm_set_memory_region(struct kvm *kvm, struct kvm_memory_slot new; struct kvm_memslots *slots; enum kvm_mr_change change; + unsigned long npages; + gfn_t base_gfn; int as_id, id; int r; @@ -1869,6 +1871,8 @@ int __kvm_set_memory_region(struct kvm *kvm, return -EINVAL; if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) return -EINVAL; + if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES) + return -EINVAL; slots = __kvm_memslots(kvm, as_id); @@ -1892,15 +1896,8 @@ int __kvm_set_memory_region(struct kvm *kvm, return kvm_set_memslot(kvm, old, &new, KVM_MR_DELETE); } - new.as_id = as_id; - new.id = id; - new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; - new.npages = mem->memory_size >> PAGE_SHIFT; - new.flags = mem->flags; - new.userspace_addr = mem->userspace_addr; - - if (new.npages > KVM_MEM_MAX_NR_PAGES) - return -EINVAL; + base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); + npages = (mem->memory_size >> PAGE_SHIFT); if (!old || !old->npages) { change = KVM_MR_CREATE; @@ -1909,27 +1906,33 @@ int __kvm_set_memory_region(struct kvm *kvm, * To simplify KVM internals, the total number of pages across * all memslots must fit in an unsigned long. */ - if ((kvm->nr_memslot_pages + new.npages) < kvm->nr_memslot_pages) + if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) return -EINVAL; } else { /* Modify an existing slot. */ - if ((new.userspace_addr != old->userspace_addr) || - (new.npages != old->npages) || - ((new.flags ^ old->flags) & KVM_MEM_READONLY)) + if ((mem->userspace_addr != old->userspace_addr) || + (npages != old->npages) || + ((mem->flags ^ old->flags) & KVM_MEM_READONLY)) return -EINVAL; - if (new.base_gfn != old->base_gfn) + if (base_gfn != old->base_gfn) change = KVM_MR_MOVE; - else if (new.flags != old->flags) + else if (mem->flags != old->flags) change = KVM_MR_FLAGS_ONLY; else /* Nothing to change. */ return 0; } if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && - kvm_check_memslot_overlap(slots, id, new.base_gfn, - new.base_gfn + new.npages)) + kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) return -EEXIST; + new.as_id = as_id; + new.id = id; + new.base_gfn = base_gfn; + new.npages = npages; + new.flags = mem->flags; + new.userspace_addr = mem->userspace_addr; + return kvm_set_memslot(kvm, old, &new, change); } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); -- cgit v1.2.3-58-ga151 From 244893fa2859d656e2caf88683211604eb9afd37 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:35 +0100 Subject: KVM: Dynamically allocate "new" memslots from the get-go Allocate the "new" memslot for !DELETE memslot updates straight away instead of filling an intermediate on-stack object and forcing kvm_set_memslot() to juggle the allocation and do weird things like reuse the old memslot object in MOVE. In the MOVE case, this results in an "extra" memslot allocation due to allocating both the "new" slot and the "invalid" slot, but that's a temporary and not-huge allocation, and MOVE is a relatively rare memslot operation. Regarding MOVE, drop the open-coded management of the gfn tree with a call to kvm_replace_memslot(), which already handles the case where new->base_gfn != old->base_gfn. This is made possible by virtue of not having to copy the "new" memslot data after erasing the old memslot from the gfn tree. Using kvm_replace_memslot(), and more specifically not reusing the old memslot, means the MOVE case now does hva tree and hash list updates, but that's a small price to pay for simplifying the code and making MOVE align with all the other flavors of updates. The "extra" updates are firmly in the noise from a performance perspective, e.g. the "move (in)active area" selfttests show a (very, very) slight improvement. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: --- virt/kvm/kvm_main.c | 178 +++++++++++++++++++++++----------------------------- 1 file changed, 77 insertions(+), 101 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 71815e75e41c..e588dc4f9b7d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1503,23 +1503,25 @@ static int kvm_prepare_memory_region(struct kvm *kvm, * new and KVM isn't using a ring buffer, allocate and initialize a * new bitmap. */ - if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) - new->dirty_bitmap = NULL; - else if (old->dirty_bitmap) - new->dirty_bitmap = old->dirty_bitmap; - else if (!kvm->dirty_ring_size) { - r = kvm_alloc_dirty_bitmap(new); - if (r) - return r; + if (change != KVM_MR_DELETE) { + if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) + new->dirty_bitmap = NULL; + else if (old && old->dirty_bitmap) + new->dirty_bitmap = old->dirty_bitmap; + else if (!kvm->dirty_ring_size) { + r = kvm_alloc_dirty_bitmap(new); + if (r) + return r; - if (kvm_dirty_log_manual_protect_and_init_set(kvm)) - bitmap_set(new->dirty_bitmap, 0, new->npages); + if (kvm_dirty_log_manual_protect_and_init_set(kvm)) + bitmap_set(new->dirty_bitmap, 0, new->npages); + } } r = kvm_arch_prepare_memory_region(kvm, old, new, change); /* Free the bitmap on failure if it was allocated above. */ - if (r && new->dirty_bitmap && !old->dirty_bitmap) + if (r && new && new->dirty_bitmap && old && !old->dirty_bitmap) kvm_destroy_dirty_bitmap(new); return r; @@ -1606,16 +1608,16 @@ static void kvm_copy_memslot(struct kvm_memory_slot *dest, static void kvm_invalidate_memslot(struct kvm *kvm, struct kvm_memory_slot *old, - struct kvm_memory_slot *working_slot) + struct kvm_memory_slot *invalid_slot) { /* * Mark the current slot INVALID. As with all memslot modifications, * this must be done on an unreachable slot to avoid modifying the * current slot in the active tree. */ - kvm_copy_memslot(working_slot, old); - working_slot->flags |= KVM_MEMSLOT_INVALID; - kvm_replace_memslot(kvm, old, working_slot); + kvm_copy_memslot(invalid_slot, old); + invalid_slot->flags |= KVM_MEMSLOT_INVALID; + kvm_replace_memslot(kvm, old, invalid_slot); /* * Activate the slot that is now marked INVALID, but don't propagate @@ -1642,20 +1644,15 @@ static void kvm_invalidate_memslot(struct kvm *kvm, * above. Writers are required to retrieve memslots *after* acquiring * slots_arch_lock, thus the active slot's data is guaranteed to be fresh. */ - old->arch = working_slot->arch; + old->arch = invalid_slot->arch; } static void kvm_create_memslot(struct kvm *kvm, - const struct kvm_memory_slot *new, - struct kvm_memory_slot *working) + struct kvm_memory_slot *new) { - /* - * Add the new memslot to the inactive set as a copy of the - * new memslot data provided by userspace. - */ - kvm_copy_memslot(working, new); - kvm_replace_memslot(kvm, NULL, working); - kvm_activate_memslot(kvm, NULL, working); + /* Add the new memslot to the inactive set and activate. */ + kvm_replace_memslot(kvm, NULL, new); + kvm_activate_memslot(kvm, NULL, new); } static void kvm_delete_memslot(struct kvm *kvm, @@ -1664,65 +1661,36 @@ static void kvm_delete_memslot(struct kvm *kvm, { /* * Remove the old memslot (in the inactive memslots) by passing NULL as - * the "new" slot. + * the "new" slot, and for the invalid version in the active slots. */ kvm_replace_memslot(kvm, old, NULL); - - /* And do the same for the invalid version in the active slot. */ kvm_activate_memslot(kvm, invalid_slot, NULL); - - /* Free the invalid slot, the caller will clean up the old slot. */ - kfree(invalid_slot); } -static struct kvm_memory_slot *kvm_move_memslot(struct kvm *kvm, - struct kvm_memory_slot *old, - const struct kvm_memory_slot *new, - struct kvm_memory_slot *invalid_slot) +static void kvm_move_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + struct kvm_memory_slot *invalid_slot) { - struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, old->as_id); - - /* - * The memslot's gfn is changing, remove it from the inactive tree, it - * will be re-added with its updated gfn. Because its range is - * changing, an in-place replace is not possible. - */ - kvm_erase_gfn_node(slots, old); - - /* - * The old slot is now fully disconnected, reuse its memory for the - * persistent copy of "new". - */ - kvm_copy_memslot(old, new); - - /* Re-add to the gfn tree with the updated gfn */ - kvm_insert_gfn_node(slots, old); - - /* Replace the current INVALID slot with the updated memslot. */ - kvm_activate_memslot(kvm, invalid_slot, old); - /* - * Clear the INVALID flag so that the invalid_slot is now a perfect - * copy of the old slot. Return it for cleanup in the caller. + * Replace the old memslot in the inactive slots, and then swap slots + * and replace the current INVALID with the new as well. */ - WARN_ON_ONCE(!(invalid_slot->flags & KVM_MEMSLOT_INVALID)); - invalid_slot->flags &= ~KVM_MEMSLOT_INVALID; - return invalid_slot; + kvm_replace_memslot(kvm, old, new); + kvm_activate_memslot(kvm, invalid_slot, new); } static void kvm_update_flags_memslot(struct kvm *kvm, struct kvm_memory_slot *old, - const struct kvm_memory_slot *new, - struct kvm_memory_slot *working_slot) + struct kvm_memory_slot *new) { /* * Similar to the MOVE case, but the slot doesn't need to be zapped as * an intermediate step. Instead, the old memslot is simply replaced * with a new, updated copy in both memslot sets. */ - kvm_copy_memslot(working_slot, new); - kvm_replace_memslot(kvm, old, working_slot); - kvm_activate_memslot(kvm, old, working_slot); + kvm_replace_memslot(kvm, old, new); + kvm_activate_memslot(kvm, old, new); } static int kvm_set_memslot(struct kvm *kvm, @@ -1730,19 +1698,9 @@ static int kvm_set_memslot(struct kvm *kvm, struct kvm_memory_slot *new, enum kvm_mr_change change) { - struct kvm_memory_slot *working; + struct kvm_memory_slot *invalid_slot; int r; - /* - * Modifications are done on an unreachable slot. Any changes are then - * (eventually) propagated to both the active and inactive slots. This - * allocation would ideally be on-demand (in helpers), but is done here - * to avoid having to handle failure after kvm_prepare_memory_region(). - */ - working = kzalloc(sizeof(*working), GFP_KERNEL_ACCOUNT); - if (!working) - return -ENOMEM; - /* * Released in kvm_swap_active_memslots. * @@ -1767,9 +1725,19 @@ static int kvm_set_memslot(struct kvm *kvm, * (and without a lock), a window would exist between effecting the * delete/move and committing the changes in arch code where KVM or a * guest could access a non-existent memslot. + * + * Modifications are done on a temporary, unreachable slot. The old + * slot needs to be preserved in case a later step fails and the + * invalidation needs to be reverted. */ - if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) - kvm_invalidate_memslot(kvm, old, working); + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { + invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT); + if (!invalid_slot) { + mutex_unlock(&kvm->slots_arch_lock); + return -ENOMEM; + } + kvm_invalidate_memslot(kvm, old, invalid_slot); + } r = kvm_prepare_memory_region(kvm, old, new, change); if (r) { @@ -1779,11 +1747,12 @@ static int kvm_set_memslot(struct kvm *kvm, * in the inactive slots. Changing the active memslots also * release slots_arch_lock. */ - if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) - kvm_activate_memslot(kvm, working, old); - else + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { + kvm_activate_memslot(kvm, invalid_slot, old); + kfree(invalid_slot); + } else { mutex_unlock(&kvm->slots_arch_lock); - kfree(working); + } return r; } @@ -1795,16 +1764,20 @@ static int kvm_set_memslot(struct kvm *kvm, * old slot is detached but otherwise preserved. */ if (change == KVM_MR_CREATE) - kvm_create_memslot(kvm, new, working); + kvm_create_memslot(kvm, new); else if (change == KVM_MR_DELETE) - kvm_delete_memslot(kvm, old, working); + kvm_delete_memslot(kvm, old, invalid_slot); else if (change == KVM_MR_MOVE) - old = kvm_move_memslot(kvm, old, new, working); + kvm_move_memslot(kvm, old, new, invalid_slot); else if (change == KVM_MR_FLAGS_ONLY) - kvm_update_flags_memslot(kvm, old, new, working); + kvm_update_flags_memslot(kvm, old, new); else BUG(); + /* Free the temporary INVALID slot used for DELETE and MOVE. */ + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) + kfree(invalid_slot); + /* * No need to refresh new->arch, changes after dropping slots_arch_lock * will directly hit the final, active memsot. Architectures are @@ -1839,8 +1812,7 @@ static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, int __kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem) { - struct kvm_memory_slot *old; - struct kvm_memory_slot new; + struct kvm_memory_slot *old, *new; struct kvm_memslots *slots; enum kvm_mr_change change; unsigned long npages; @@ -1889,11 +1861,7 @@ int __kvm_set_memory_region(struct kvm *kvm, if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) return -EIO; - memset(&new, 0, sizeof(new)); - new.id = id; - new.as_id = as_id; - - return kvm_set_memslot(kvm, old, &new, KVM_MR_DELETE); + return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE); } base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); @@ -1926,14 +1894,22 @@ int __kvm_set_memory_region(struct kvm *kvm, kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) return -EEXIST; - new.as_id = as_id; - new.id = id; - new.base_gfn = base_gfn; - new.npages = npages; - new.flags = mem->flags; - new.userspace_addr = mem->userspace_addr; + /* Allocate a slot that will persist in the memslot. */ + new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT); + if (!new) + return -ENOMEM; + + new->as_id = as_id; + new->id = id; + new->base_gfn = base_gfn; + new->npages = npages; + new->flags = mem->flags; + new->userspace_addr = mem->userspace_addr; - return kvm_set_memslot(kvm, old, &new, change); + r = kvm_set_memslot(kvm, old, new, change); + if (r) + kfree(new); + return r; } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); -- cgit v1.2.3-58-ga151 From 8283e36abfff507c64fe8289ac30ea7ab59648aa Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Mon, 15 Nov 2021 15:45:58 -0800 Subject: KVM: x86/mmu: Propagate memslot const qualifier In preparation for implementing in-place hugepage promotion, various functions will need to be called from zap_collapsible_spte_range, which has the const qualifier on its memslot argument. Propagate the const qualifier to the various functions which will be needed. This just serves to simplify the following patch. No functional change intended. Signed-off-by: Ben Gardon Message-Id: <20211115234603.2908381-11-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 4 ++-- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/mmu_internal.h | 2 +- arch/x86/kvm/mmu/page_track.c | 4 ++-- arch/x86/kvm/mmu/spte.c | 2 +- arch/x86/kvm/mmu/spte.h | 2 +- include/linux/kvm_host.h | 10 +++++----- virt/kvm/kvm_main.c | 12 ++++++------ 8 files changed, 19 insertions(+), 19 deletions(-) (limited to 'virt') diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index e99a30a4d38b..eb186bc57f6a 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -64,8 +64,8 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode); bool kvm_slot_page_track_is_active(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode); + const struct kvm_memory_slot *slot, + gfn_t gfn, enum kvm_page_track_mode mode); void kvm_page_track_register_notifier(struct kvm *kvm, diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 29bcf26b0cb3..c28cf7eeb79d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2580,7 +2580,7 @@ static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must * be write-protected. */ -int mmu_try_to_unsync_pages(struct kvm *kvm, struct kvm_memory_slot *slot, +int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch) { struct kvm_mmu_page *sp; diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 787b8c553b9e..da6166b5c377 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -117,7 +117,7 @@ static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp) return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode; } -int mmu_try_to_unsync_pages(struct kvm *kvm, struct kvm_memory_slot *slot, +int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch); void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 35c221d5f6ce..68eb1fb548b6 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -174,8 +174,8 @@ EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page); * check if the corresponding access on the specified guest page is tracked. */ bool kvm_slot_page_track_is_active(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode) + const struct kvm_memory_slot *slot, + gfn_t gfn, enum kvm_page_track_mode mode) { int index; diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 8d3fe4311bc1..8a7b03207762 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -90,7 +90,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) } bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - struct kvm_memory_slot *slot, + const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index cc432f9a966b..a4af2a42695c 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -330,7 +330,7 @@ static inline u64 get_mmio_spte_generation(u64 spte) } bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - struct kvm_memory_slot *slot, + const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3bc98497e796..3eb7695aaa73 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -460,7 +460,7 @@ struct kvm_memory_slot { u16 as_id; }; -static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot) +static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot) { return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; } @@ -994,9 +994,9 @@ void kvm_set_page_accessed(struct page *page); kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable); -kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); -kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); -kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, +kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn); +kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn); +kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable, hva_t *hva); @@ -1073,7 +1073,7 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); -void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn); +void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e588dc4f9b7d..f93b60165fd7 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2285,12 +2285,12 @@ out: return size; } -static bool memslot_is_readonly(struct kvm_memory_slot *slot) +static bool memslot_is_readonly(const struct kvm_memory_slot *slot) { return slot->flags & KVM_MEM_READONLY; } -static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, +static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages, bool write) { if (!slot || slot->flags & KVM_MEMSLOT_INVALID) @@ -2585,7 +2585,7 @@ exit: return pfn; } -kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, +kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable, hva_t *hva) { @@ -2625,13 +2625,13 @@ kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, } EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); -kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) +kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) { return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL); } EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); -kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) +kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn) { return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL); } @@ -3150,7 +3150,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) EXPORT_SYMBOL_GPL(kvm_clear_guest); void mark_page_dirty_in_slot(struct kvm *kvm, - struct kvm_memory_slot *memslot, + const struct kvm_memory_slot *memslot, gfn_t gfn) { if (memslot && kvm_slot_dirty_track_enabled(memslot)) { -- cgit v1.2.3-58-ga151 From aefdc2ed445eb470bdba108bd6a19fb232d3bada Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 20 Oct 2021 06:38:05 -0400 Subject: KVM: Avoid atomic operations when kicking the running vCPU If we do have the vcpu mutex, as is the case if kvm_running_vcpu is set to the target vcpu of the kick, changes to vcpu->mode do not need atomic operations; cmpxchg is only needed _outside_ the mutex to ensure that the IN_GUEST_MODE->EXITING_GUEST_MODE change does not race with the vcpu thread going OUTSIDE_GUEST_MODE. Use this to optimize the case of a vCPU sending an interrupt to itself. Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f93b60165fd7..e9990c4c6e40 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3396,6 +3396,19 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) if (kvm_vcpu_wake_up(vcpu)) return; + me = get_cpu(); + /* + * The only state change done outside the vcpu mutex is IN_GUEST_MODE + * to EXITING_GUEST_MODE. Therefore the moderately expensive "should + * kick" check does not need atomic operations if kvm_vcpu_kick is used + * within the vCPU thread itself. + */ + if (vcpu == __this_cpu_read(kvm_running_vcpu)) { + if (vcpu->mode == IN_GUEST_MODE) + WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE); + goto out; + } + /* * Note, the vCPU could get migrated to a different pCPU at any point * after kvm_arch_vcpu_should_kick(), which could result in sending an @@ -3403,12 +3416,12 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the * vCPU also requires it to leave IN_GUEST_MODE. */ - me = get_cpu(); if (kvm_arch_vcpu_should_kick(vcpu)) { cpu = READ_ONCE(vcpu->cpu); if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) smp_send_reschedule(cpu); } +out: put_cpu(); } EXPORT_SYMBOL_GPL(kvm_vcpu_kick); -- cgit v1.2.3-58-ga151 From 6f390916c4fb359507d9ac4bf1b28a4f8abee5c0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:11:56 -0700 Subject: KVM: s390: Ensure kvm_arch_no_poll() is read once when blocking vCPU Wrap s390's halt_poll_max_steal with READ_ONCE and snapshot the result of kvm_arch_no_poll() in kvm_vcpu_block() to avoid a mostly-theoretical, largely benign bug on s390 where the result of kvm_arch_no_poll() could change due to userspace modifying halt_poll_max_steal while the vCPU is blocking. The bug is largely benign as it will either cause KVM to skip updating halt-polling times (no_poll toggles false=>true) or to update halt-polling times with a slightly flawed block_ns. Note, READ_ONCE is unnecessary in the current code, add it in case the arch hook is ever inlined, and to provide a hint that userspace can change the param at will. Fixes: 8b905d28ee17 ("KVM: s390: provide kvm_arch_no_poll function") Reviewed-by: Christian Borntraeger Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/s390/kvm/kvm-s390.c | 2 +- virt/kvm/kvm_main.c | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'virt') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b943a589ee41..9653e4075b11 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -3403,7 +3403,7 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) { /* do not poll with more than halt_poll_max_steal percent of steal time */ if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >= - halt_poll_max_steal) { + READ_ONCE(halt_poll_max_steal)) { vcpu->stat.halt_no_poll_steal++; return true; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e9990c4c6e40..a26b069a6929 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3284,6 +3284,7 @@ update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) */ void kvm_vcpu_block(struct kvm_vcpu *vcpu) { + bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); ktime_t start, cur, poll_end; bool waited = false; u64 block_ns; @@ -3291,7 +3292,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) kvm_arch_vcpu_blocking(vcpu); start = cur = poll_end = ktime_get(); - if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) { + if (vcpu->halt_poll_ns && halt_poll_allowed) { ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); ++vcpu->stat.generic.halt_attempted_poll; @@ -3346,7 +3347,7 @@ out: update_halt_poll_stats( vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); - if (!kvm_arch_no_poll(vcpu)) { + if (halt_poll_allowed) { if (!vcpu_valid_wakeup(vcpu)) { shrink_halt_poll_ns(vcpu); } else if (vcpu->kvm->max_halt_poll_ns) { -- cgit v1.2.3-58-ga151 From 510958e997217e39a16b47afb5a44dfa39013964 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:11:57 -0700 Subject: KVM: Force PPC to define its own rcuwait object Do not define/reference kvm_vcpu.wait if __KVM_HAVE_ARCH_WQP is true, and instead force the architecture (PPC) to define its own rcuwait object. Allowing common KVM to directly access vcpu->wait without a guard makes it all too easy to introduce potential bugs, e.g. kvm_vcpu_block(), kvm_vcpu_on_spin(), and async_pf_execute() all operate on vcpu->wait, not the result of kvm_arch_vcpu_get_wait(), and so may do the wrong thing for PPC. Due to PPC's shenanigans with respect to callbacks and waits (it switches to the virtual core's wait object at KVM_RUN!?!?), it's not clear whether or not this fixes any bugs. Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-5-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kvm/powerpc.c | 3 ++- include/linux/kvm_host.h | 2 ++ virt/kvm/async_pf.c | 2 +- virt/kvm/kvm_main.c | 9 ++++++--- 5 files changed, 12 insertions(+), 5 deletions(-) (limited to 'virt') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index e4d23193eba7..6ec97eff9563 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -749,6 +749,7 @@ struct kvm_vcpu_arch { u8 irq_pending; /* Used by XIVE to signal pending guest irqs */ u32 last_inst; + struct rcuwait wait; struct rcuwait *waitp; struct kvmppc_vcore *vcore; int ret; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index f1233500f4dc..7de9ddbc6af1 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -753,7 +753,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (err) goto out_vcpu_uninit; - vcpu->arch.waitp = &vcpu->wait; + rcuwait_init(&vcpu->arch.wait); + vcpu->arch.waitp = &vcpu->arch.wait; kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id); return 0; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3eb7695aaa73..afacbfb2e482 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -314,7 +314,9 @@ struct kvm_vcpu { struct mutex mutex; struct kvm_run *run; +#ifndef __KVM_HAVE_ARCH_WQP struct rcuwait wait; +#endif struct pid __rcu *pid; int sigset_active; sigset_t sigset; diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index dd777688d14a..ccb35c22785e 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -85,7 +85,7 @@ static void async_pf_execute(struct work_struct *work) trace_kvm_async_pf_completed(addr, cr2_or_gpa); - rcuwait_wake_up(&vcpu->wait); + rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); mmput(mm); kvm_put_kvm(vcpu->kvm); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a26b069a6929..11db44f4110e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -422,7 +422,9 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) vcpu->kvm = kvm; vcpu->vcpu_id = id; vcpu->pid = NULL; +#ifndef __KVM_HAVE_ARCH_WQP rcuwait_init(&vcpu->wait); +#endif kvm_async_pf_vcpu_init(vcpu); vcpu->pre_pcpu = -1; @@ -3284,6 +3286,7 @@ update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) */ void kvm_vcpu_block(struct kvm_vcpu *vcpu) { + struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); ktime_t start, cur, poll_end; bool waited = false; @@ -3322,7 +3325,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) } - prepare_to_rcuwait(&vcpu->wait); + prepare_to_rcuwait(wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); @@ -3332,7 +3335,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) waited = true; schedule(); } - finish_rcuwait(&vcpu->wait); + finish_rcuwait(wait); cur = ktime_get(); if (waited) { vcpu->stat.generic.halt_wait_ns += @@ -3544,7 +3547,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) continue; if (vcpu == me) continue; - if (rcuwait_active(&vcpu->wait) && + if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)) && !vcpu_dy_runnable(vcpu)) continue; if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && -- cgit v1.2.3-58-ga151 From 8df6a61c04038fa481a717fc86af38304aa600a3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:11:58 -0700 Subject: KVM: Update halt-polling stats if and only if halt-polling was attempted Don't update halt-polling stats if halt-polling wasn't attempted. This is a nop as @poll_ns is guaranteed to be '0' (poll_end == start); in a future patch (to move the histogram stats into the helper), it will avoid to avoid a discrepancy in what is considered a "successful" halt-poll. No functional change intended. Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-6-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 11db44f4110e..1a15043ceecb 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3288,6 +3288,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) { struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); + bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; ktime_t start, cur, poll_end; bool waited = false; u64 block_ns; @@ -3295,7 +3296,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) kvm_arch_vcpu_blocking(vcpu); start = cur = poll_end = ktime_get(); - if (vcpu->halt_poll_ns && halt_poll_allowed) { + if (do_halt_poll) { ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); ++vcpu->stat.generic.halt_attempted_poll; @@ -3347,8 +3348,9 @@ out: kvm_arch_vcpu_unblocking(vcpu); block_ns = ktime_to_ns(cur) - ktime_to_ns(start); - update_halt_poll_stats( - vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); + if (do_halt_poll) + update_halt_poll_stats( + vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); if (halt_poll_allowed) { if (!vcpu_valid_wakeup(vcpu)) { -- cgit v1.2.3-58-ga151 From 29e72893cec3b0268e19e7857d10bf79843f94dc Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:11:59 -0700 Subject: KVM: Refactor and document halt-polling stats update helper Add a comment to document that halt-polling is considered successful even if the polling loop itself didn't detect a wake event, i.e. if a wake event was detect in the final kvm_vcpu_check_block(). Invert the param to update helper so that the helper is a dumb function that is "told" whether or not polling was successful, as opposed to determining success based on blocking behavior. Opportunistically tweak the params to the update helper to reduce the line length for the call site so that it fits on a single line, and so that the prototype conforms to the more traditional kernel style. No functional change intended. Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-7-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1a15043ceecb..a7f9c313d642 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3272,13 +3272,15 @@ out: return ret; } -static inline void -update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) +static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, + ktime_t end, bool success) { - if (waited) - vcpu->stat.generic.halt_poll_fail_ns += poll_ns; - else + u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); + + if (success) vcpu->stat.generic.halt_poll_success_ns += poll_ns; + else + vcpu->stat.generic.halt_poll_fail_ns += poll_ns; } /* @@ -3348,9 +3350,13 @@ out: kvm_arch_vcpu_unblocking(vcpu); block_ns = ktime_to_ns(cur) - ktime_to_ns(start); + /* + * Note, halt-polling is considered successful so long as the vCPU was + * never actually scheduled out, i.e. even if the wake event arrived + * after of the halt-polling loop itself, but before the full wait. + */ if (do_halt_poll) - update_halt_poll_stats( - vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); + update_halt_poll_stats(vcpu, start, poll_end, !waited); if (halt_poll_allowed) { if (!vcpu_valid_wakeup(vcpu)) { -- cgit v1.2.3-58-ga151 From 30c9434717fd27e634a157dcdee286703b1f4891 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:00 -0700 Subject: KVM: Reconcile discrepancies in halt-polling stats Move the halt-polling "success" and histogram stats update into the dedicated helper to fix a discrepancy where the success/fail "time" stats consider polling successful so long as the wait is avoided, but the main "success" and histogram stats consider polling successful if and only if a wake event was detected by the halt-polling loop. Move halt_attempted_poll to the helper as well so that all the stats are updated in a single location. While it's a bit odd to update the stat well after the fact, practically speaking there's no meaningful advantage to updating before polling. Note, there is a functional change in addition to the success vs. fail change. The histogram updates previously called ktime_get() instead of using "cur". But that change is desirable as it means all the stats are now updated with the same polling time, and avoids the extra ktime_get(), which isn't expensive but isn't free either. Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-8-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a7f9c313d642..44158a4794d8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3275,12 +3275,23 @@ out: static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, ktime_t end, bool success) { + struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic; u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); - if (success) - vcpu->stat.generic.halt_poll_success_ns += poll_ns; - else - vcpu->stat.generic.halt_poll_fail_ns += poll_ns; + ++vcpu->stat.generic.halt_attempted_poll; + + if (success) { + ++vcpu->stat.generic.halt_successful_poll; + + if (!vcpu_valid_wakeup(vcpu)) + ++vcpu->stat.generic.halt_poll_invalid; + + stats->halt_poll_success_ns += poll_ns; + KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns); + } else { + stats->halt_poll_fail_ns += poll_ns; + KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns); + } } /* @@ -3301,30 +3312,16 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) if (do_halt_poll) { ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); - ++vcpu->stat.generic.halt_attempted_poll; do { /* * This sets KVM_REQ_UNHALT if an interrupt * arrives. */ - if (kvm_vcpu_check_block(vcpu) < 0) { - ++vcpu->stat.generic.halt_successful_poll; - if (!vcpu_valid_wakeup(vcpu)) - ++vcpu->stat.generic.halt_poll_invalid; - - KVM_STATS_LOG_HIST_UPDATE( - vcpu->stat.generic.halt_poll_success_hist, - ktime_to_ns(ktime_get()) - - ktime_to_ns(start)); + if (kvm_vcpu_check_block(vcpu) < 0) goto out; - } cpu_relax(); poll_end = cur = ktime_get(); } while (kvm_vcpu_can_poll(cur, stop)); - - KVM_STATS_LOG_HIST_UPDATE( - vcpu->stat.generic.halt_poll_fail_hist, - ktime_to_ns(ktime_get()) - ktime_to_ns(start)); } -- cgit v1.2.3-58-ga151 From f6c60d081e2ccb4655fa90625b630c860a99d036 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:04 -0700 Subject: KVM: Don't block+unblock when halt-polling is successful Invoke the arch hooks for block+unblock if and only if KVM actually attempts to block the vCPU. The only non-nop implementation is on x86, specifically SVM's AVIC, and there is no need to put the AVIC prior to halt-polling; KVM x86's kvm_vcpu_has_events() will scour the full vIRR to find pending IRQs regardless of whether the AVIC is loaded/"running". The primary motivation is to allow future cleanup to split out "block" from "halt", but this is also likely a small performance boost on x86 SVM when halt-polling is successful. Adjust the post-block path to update "cur" after unblocking, i.e. include AVIC load time in halt_wait_ns and halt_wait_hist, so that the behavior is consistent. Moving just the pre-block arch hook would result in only the AVIC put latency being included in the halt_wait stats. There is no obvious evidence that one way or the other is correct, so just ensure KVM is consistent. Note, x86 has two separate paths for handling APICv with respect to vCPU blocking. VMX uses hooks in x86's vcpu_block(), while SVM uses the arch hooks in kvm_vcpu_block(). Prior to this path, the two paths were more or less functionally identical. That is very much not the case after this patch, as the hooks used by VMX _must_ fire before halt-polling. x86's entire mess will be cleaned up in future patches. Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-12-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 44158a4794d8..cc68d21a8e58 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3306,8 +3306,6 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) bool waited = false; u64 block_ns; - kvm_arch_vcpu_blocking(vcpu); - start = cur = poll_end = ktime_get(); if (do_halt_poll) { ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); @@ -3324,6 +3322,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) } while (kvm_vcpu_can_poll(cur, stop)); } + kvm_arch_vcpu_blocking(vcpu); prepare_to_rcuwait(wait); for (;;) { @@ -3336,6 +3335,9 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) schedule(); } finish_rcuwait(wait); + + kvm_arch_vcpu_unblocking(vcpu); + cur = ktime_get(); if (waited) { vcpu->stat.generic.halt_wait_ns += @@ -3344,7 +3346,6 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ktime_to_ns(cur) - ktime_to_ns(poll_end)); } out: - kvm_arch_vcpu_unblocking(vcpu); block_ns = ktime_to_ns(cur) - ktime_to_ns(start); /* -- cgit v1.2.3-58-ga151 From 005467e06b16261ffdd7130ff0b4f0ebd627599a Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:02 -0700 Subject: KVM: Drop obsolete kvm_arch_vcpu_block_finish() Drop kvm_arch_vcpu_block_finish() now that all arch implementations are nops. No functional change intended. Acked-by: Christian Borntraeger Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-10-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/arm64/include/asm/kvm_host.h | 1 - arch/mips/include/asm/kvm_host.h | 1 - arch/powerpc/include/asm/kvm_host.h | 1 - arch/riscv/include/asm/kvm_host.h | 1 - arch/s390/include/asm/kvm_host.h | 2 -- arch/s390/kvm/kvm-s390.c | 5 ----- arch/x86/include/asm/kvm_host.h | 2 -- virt/kvm/kvm_main.c | 1 - 8 files changed, 14 deletions(-) (limited to 'virt') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2a5f7f38006f..0e75277be5d5 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -717,7 +717,6 @@ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} void kvm_arm_init_debug(void); void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 696f6b009377..72b90d45a46e 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -897,7 +897,6 @@ static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB int kvm_arch_flush_remote_tlb(struct kvm *kvm); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 6ec97eff9563..6e92b2c7a938 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -865,6 +865,5 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_exit(void) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} #endif /* __POWERPC_KVM_HOST_H__ */ diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 2639b9ee48f9..907fafea787e 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -208,7 +208,6 @@ struct kvm_vcpu_arch { static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} #define KVM_ARCH_WANT_MMU_NOTIFIER diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index a604d51acfc8..a22c9266ea05 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -1010,6 +1010,4 @@ static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} -void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu); - #endif diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8f577c76fcd3..dd099d352753 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5047,11 +5047,6 @@ static inline unsigned long nonhyp_mask(int i) return 0x0000ffffffffffffUL >> (nonhyp_fai << 4); } -void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) -{ - -} - static int __init kvm_s390_init(void) { int i; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index be1ad66cf39a..c07b30877de0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1931,8 +1931,6 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) static_call_cond(kvm_x86_vcpu_unblocking)(vcpu); } -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} - static inline int kvm_cpu_get_apicid(int mps_cpu) { #ifdef CONFIG_X86_LOCAL_APIC diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index cc68d21a8e58..53c58606e1e2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3376,7 +3376,6 @@ out: } trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); - kvm_arch_vcpu_block_finish(vcpu); } EXPORT_SYMBOL_GPL(kvm_vcpu_block); -- cgit v1.2.3-58-ga151 From 91b99ea7065786d0bff1c9281b002455dbaeb08b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:06 -0700 Subject: KVM: Rename kvm_vcpu_block() => kvm_vcpu_halt() Rename kvm_vcpu_block() to kvm_vcpu_halt() in preparation for splitting the actual "block" sequences into a separate helper (to be named kvm_vcpu_block()). x86 will use the standalone block-only path to handle non-halt cases where the vCPU is not runnable. Rename block_ns to halt_ns to match the new function name. No functional change intended. Reviewed-by: David Matlack Reviewed-by: Christian Borntraeger Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-14-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/arm64/kvm/arch_timer.c | 2 +- arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/handle_exit.c | 2 +- arch/arm64/kvm/psci.c | 2 +- arch/mips/kvm/emulate.c | 2 +- arch/powerpc/kvm/book3s_pr.c | 2 +- arch/powerpc/kvm/book3s_pr_papr.c | 2 +- arch/powerpc/kvm/booke.c | 2 +- arch/powerpc/kvm/powerpc.c | 2 +- arch/riscv/kvm/vcpu_exit.c | 2 +- arch/s390/kvm/interrupt.c | 2 +- arch/x86/kvm/x86.c | 11 +++++++++-- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 20 +++++++++----------- 14 files changed, 30 insertions(+), 25 deletions(-) (limited to 'virt') diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index d6f4114f1d11..3aeaa79ad4a2 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -467,7 +467,7 @@ out: } /* - * Schedule the background timer before calling kvm_vcpu_block, so that this + * Schedule the background timer before calling kvm_vcpu_halt, so that this * thread is removed from its waitqueue and made runnable when there's a timer * interrupt to handle. */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index ced54a3a3db0..77ecc11d67ae 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -681,7 +681,7 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) vgic_v4_put(vcpu, true); preempt_enable(); - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); preempt_disable(); diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 4794563a506b..6d0baf71aa67 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -82,7 +82,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu) * * WFE: Yield the CPU and come back to this vcpu when the scheduler * decides to. - * WFI: Simply call kvm_vcpu_block(), which will halt execution of + * WFI: Simply call kvm_vcpu_halt(), which will halt execution of * world-switches and schedule other host processes until there is an * incoming IRQ or FIQ to the VM. */ diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index ed675fce8fb7..ad6c9ef32928 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -46,7 +46,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu) * specification (ARM DEN 0022A). This means all suspend states * for KVM will preserve the register state. */ - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); return PSCI_RET_SUCCESS; diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 22e745e49b0a..b494d8d39290 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -952,7 +952,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) if (!vcpu->arch.pending_exceptions) { kvm_vz_lose_htimer(vcpu); vcpu->arch.wait = 1; - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); /* * We we are runnable, then definitely go off to user space to diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 30426e8c8cf6..34a801c3604a 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -492,7 +492,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) if (msr & MSR_POW) { if (!vcpu->arch.pending_exceptions) { - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); vcpu->stat.generic.halt_wakeup++; diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index ac14239f3424..1f10e7dfcdd0 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c @@ -376,7 +376,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) return kvmppc_h_pr_stuff_tce(vcpu); case H_CEDE: kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE); - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); vcpu->stat.generic.halt_wakeup++; return EMULATE_DONE; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 53b4c9597c30..06c5830a93f9 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -718,7 +718,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) if (vcpu->arch.shared->msr & MSR_WE) { local_irq_enable(); - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); hard_irq_disable(); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 7de9ddbc6af1..2ad0ccd202d5 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -236,7 +236,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) break; case EV_HCALL_TOKEN(EV_IDLE): r = EV_SUCCESS; - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); break; default: diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index 7f2d742ae4c6..571f319e995a 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -146,7 +146,7 @@ static int system_opcode_insn(struct kvm_vcpu *vcpu, vcpu->stat.wfi_exit_stat++; if (!kvm_arch_vcpu_runnable(vcpu)) { srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 3c8246fa208b..dbabd6f2404e 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1335,7 +1335,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime); no_timer: srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); vcpu->valid_wakeup = false; __unset_cpu_idle(vcpu); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 43cabc747318..e3dd76f251e9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8727,6 +8727,13 @@ void kvm_arch_exit(void) static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) { + /* + * The vCPU has halted, e.g. executed HLT. Update the run state if the + * local APIC is in-kernel, the run loop will detect the non-runnable + * state and halt the vCPU. Exit to userspace if the local APIC is + * managed by userspace, in which case userspace is responsible for + * handling wake events. + */ ++vcpu->stat.halt_exits; if (lapic_in_kernel(vcpu)) { vcpu->arch.mp_state = state; @@ -9999,7 +10006,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) if (!kvm_arch_vcpu_runnable(vcpu) && (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_x86_ops.post_block) @@ -10196,7 +10203,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) r = -EINTR; goto out; } - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); if (kvm_apic_accept_events(vcpu) < 0) { r = 0; goto out; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index afacbfb2e482..ea3c22d55d56 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1102,7 +1102,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); -void kvm_vcpu_block(struct kvm_vcpu *vcpu); +void kvm_vcpu_halt(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 53c58606e1e2..0d301c95fa1a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3294,17 +3294,14 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, } } -/* - * The vCPU has executed a HLT instruction with in-kernel mode enabled. - */ -void kvm_vcpu_block(struct kvm_vcpu *vcpu) +void kvm_vcpu_halt(struct kvm_vcpu *vcpu) { struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; ktime_t start, cur, poll_end; bool waited = false; - u64 block_ns; + u64 halt_ns; start = cur = poll_end = ktime_get(); if (do_halt_poll) { @@ -3346,7 +3343,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ktime_to_ns(cur) - ktime_to_ns(poll_end)); } out: - block_ns = ktime_to_ns(cur) - ktime_to_ns(start); + /* The total time the vCPU was "halted", including polling time. */ + halt_ns = ktime_to_ns(cur) - ktime_to_ns(start); /* * Note, halt-polling is considered successful so long as the vCPU was @@ -3360,24 +3358,24 @@ out: if (!vcpu_valid_wakeup(vcpu)) { shrink_halt_poll_ns(vcpu); } else if (vcpu->kvm->max_halt_poll_ns) { - if (block_ns <= vcpu->halt_poll_ns) + if (halt_ns <= vcpu->halt_poll_ns) ; /* we had a long block, shrink polling */ else if (vcpu->halt_poll_ns && - block_ns > vcpu->kvm->max_halt_poll_ns) + halt_ns > vcpu->kvm->max_halt_poll_ns) shrink_halt_poll_ns(vcpu); /* we had a short halt and our poll time is too small */ else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && - block_ns < vcpu->kvm->max_halt_poll_ns) + halt_ns < vcpu->kvm->max_halt_poll_ns) grow_halt_poll_ns(vcpu); } else { vcpu->halt_poll_ns = 0; } } - trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); + trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu)); } -EXPORT_SYMBOL_GPL(kvm_vcpu_block); +EXPORT_SYMBOL_GPL(kvm_vcpu_halt); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) { -- cgit v1.2.3-58-ga151 From fac4268894394213127e43856f41d10f29131e69 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:07 -0700 Subject: KVM: Split out a kvm_vcpu_block() helper from kvm_vcpu_halt() Factor out the "block" part of kvm_vcpu_halt() so that x86 can emulate non-halt wait/sleep/block conditions that should not be subjected to halt-polling. No functional change intended. Reviewed-by: Christian Borntraeger Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-15-seanjc@google.com> Signed-off-by: Paolo Bonzini --- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 52 +++++++++++++++++++++++++++++++++--------------- 2 files changed, 37 insertions(+), 16 deletions(-) (limited to 'virt') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ea3c22d55d56..bd13c5b5bd1d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1103,6 +1103,7 @@ void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); void kvm_vcpu_halt(struct kvm_vcpu *vcpu); +bool kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0d301c95fa1a..370b95ad5f03 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3272,6 +3272,35 @@ out: return ret; } +/* + * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is + * pending. This is mostly used when halting a vCPU, but may also be used + * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI. + */ +bool kvm_vcpu_block(struct kvm_vcpu *vcpu) +{ + struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); + bool waited = false; + + kvm_arch_vcpu_blocking(vcpu); + + prepare_to_rcuwait(wait); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + + if (kvm_vcpu_check_block(vcpu) < 0) + break; + + waited = true; + schedule(); + } + finish_rcuwait(wait); + + kvm_arch_vcpu_unblocking(vcpu); + + return waited; +} + static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, ktime_t end, bool success) { @@ -3294,9 +3323,14 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, } } +/* + * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt + * polling is enabled, busy wait for a short time before blocking to avoid the + * expensive block+unblock sequence if a wake event arrives soon after the vCPU + * is halted. + */ void kvm_vcpu_halt(struct kvm_vcpu *vcpu) { - struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; ktime_t start, cur, poll_end; @@ -3319,21 +3353,7 @@ void kvm_vcpu_halt(struct kvm_vcpu *vcpu) } while (kvm_vcpu_can_poll(cur, stop)); } - kvm_arch_vcpu_blocking(vcpu); - - prepare_to_rcuwait(wait); - for (;;) { - set_current_state(TASK_INTERRUPTIBLE); - - if (kvm_vcpu_check_block(vcpu) < 0) - break; - - waited = true; - schedule(); - } - finish_rcuwait(wait); - - kvm_arch_vcpu_unblocking(vcpu); + waited = kvm_vcpu_block(vcpu); cur = ktime_get(); if (waited) { -- cgit v1.2.3-58-ga151 From c3858335c711569b82a234a560dc19247e8f3fcc Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Fri, 8 Oct 2021 19:12:08 -0700 Subject: KVM: stats: Add stat to detect if vcpu is currently blocking Add a "blocking" stat that userspace can use to detect the case where a vCPU is not being run because of an vCPU/guest action, e.g. HLT or WFS on x86, WFI on arm64, etc... Current guest/host/halt stats don't show this well, e.g. if a guest halts for a long period of time then the vCPU could could appear pathologically blocked due to a host condition, when in reality the vCPU has been put into a not-runnable state by the guest. Originally-by: Cannon Matthews Suggested-by: Sean Christopherson Reviewed-by: David Matlack Signed-off-by: Jing Zhang [sean: renamed stat to "blocking", massaged changelog] Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-16-seanjc@google.com> Signed-off-by: Paolo Bonzini --- include/linux/kvm_host.h | 3 ++- include/linux/kvm_types.h | 1 + virt/kvm/kvm_main.c | 4 ++++ 3 files changed, 7 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index bd13c5b5bd1d..dc7740cafea7 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1587,7 +1587,8 @@ struct _kvm_stats_desc { STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \ HALT_POLL_HIST_COUNT), \ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \ - HALT_POLL_HIST_COUNT) + HALT_POLL_HIST_COUNT), \ + STATS_DESC_ICOUNTER(VCPU_GENERIC, blocking) extern struct dentry *kvm_debugfs_dir; diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 234eab059839..888ef12862c9 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -87,6 +87,7 @@ struct kvm_vcpu_stat_generic { u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT]; u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT]; u64 halt_wait_hist[HALT_POLL_HIST_COUNT]; + u64 blocking; }; #define KVM_STATS_NAME_SIZE 48 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 370b95ad5f03..2630db6e8cb5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3282,6 +3282,8 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu) struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); bool waited = false; + vcpu->stat.generic.blocking = 1; + kvm_arch_vcpu_blocking(vcpu); prepare_to_rcuwait(wait); @@ -3298,6 +3300,8 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu) kvm_arch_vcpu_unblocking(vcpu); + vcpu->stat.generic.blocking = 0; + return waited; } -- cgit v1.2.3-58-ga151 From 109a98260b533722d1190dcfa18447dd39fee5ff Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:09 -0700 Subject: KVM: Don't redo ktime_get() when calculating halt-polling stop/deadline Calculate the halt-polling "stop" time using "start" instead of redoing ktime_get(). In practice, the numbers involved are in the noise (e.g., in the happy case where hardware correctly predicts do_halt_poll and there are no interrupts, "start" is probably only a few cycles old) and either approach is perfectly ok. But it's more precise to count any extra latency toward the halt-polling time. Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-17-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2630db6e8cb5..97bde32082d0 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3343,7 +3343,7 @@ void kvm_vcpu_halt(struct kvm_vcpu *vcpu) start = cur = poll_end = ktime_get(); if (do_halt_poll) { - ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); + ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns); do { /* -- cgit v1.2.3-58-ga151 From d92a5d1c6c757f659ffb9c2c2e65fcf3d571c14e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:12 -0700 Subject: KVM: Add helpers to wake/query blocking vCPU Add helpers to wake and query a blocking vCPU. In addition to providing nice names, the helpers reduce the probability of KVM neglecting to use kvm_arch_vcpu_get_wait(). No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-20-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/arm64/kvm/arch_timer.c | 3 +-- arch/arm64/kvm/arm.c | 2 +- arch/x86/kvm/lapic.c | 2 +- include/linux/kvm_host.h | 14 ++++++++++++++ virt/kvm/async_pf.c | 2 +- virt/kvm/kvm_main.c | 8 ++------ 6 files changed, 20 insertions(+), 11 deletions(-) (limited to 'virt') diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 3aeaa79ad4a2..6e542e2eae32 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -649,7 +649,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct timer_map map; - struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); if (unlikely(!timer->enabled)) return; @@ -672,7 +671,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) if (map.emul_ptimer) soft_timer_cancel(&map.emul_ptimer->hrtimer); - if (rcuwait_active(wait)) + if (kvm_vcpu_is_blocking(vcpu)) kvm_timer_blocking(vcpu); /* diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 77ecc11d67ae..14106a7c75b5 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -631,7 +631,7 @@ void kvm_arm_resume_guest(struct kvm *kvm) kvm_for_each_vcpu(i, vcpu, kvm) { vcpu->arch.pause = false; - rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); + __kvm_vcpu_wake_up(vcpu); } } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 451e80306b51..bbac8477b3ec 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1931,7 +1931,7 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) /* If the preempt notifier has already run, it also called apic_timer_expired */ if (!apic->lapic_timer.hv_timer_in_use) goto out; - WARN_ON(rcuwait_active(&vcpu->wait)); + WARN_ON(kvm_vcpu_is_blocking(vcpu)); apic_timer_expired(apic, false); cancel_hv_timer(apic); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index dc7740cafea7..f8ed799e8674 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1286,6 +1286,20 @@ static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) #endif } +/* + * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns + * true if the vCPU was blocking and was awakened, false otherwise. + */ +static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) +{ + return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); +} + +static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu) +{ + return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)); +} + #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED /* * returns true if the virtual interrupt controller is initialized and diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index ccb35c22785e..9bfe1d6f6529 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -85,7 +85,7 @@ static void async_pf_execute(struct work_struct *work) trace_kvm_async_pf_completed(addr, cr2_or_gpa); - rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); + __kvm_vcpu_wake_up(vcpu); mmput(mm); kvm_put_kvm(vcpu->kvm); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 97bde32082d0..f3acff708bf5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3403,10 +3403,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_halt); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) { - struct rcuwait *waitp; - - waitp = kvm_arch_vcpu_get_wait(vcpu); - if (rcuwait_wake_up(waitp)) { + if (__kvm_vcpu_wake_up(vcpu)) { WRITE_ONCE(vcpu->ready, true); ++vcpu->stat.generic.halt_wakeup; return true; @@ -3574,8 +3571,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) continue; if (vcpu == me) continue; - if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)) && - !vcpu_dy_runnable(vcpu)) + if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu)) continue; if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && !kvm_arch_dy_has_pending_interrupt(vcpu) && -- cgit v1.2.3-58-ga151 From dc70ec217cec504e6f8fee8fd91bf5c118af05f2 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sun, 21 Nov 2021 12:54:40 +0000 Subject: KVM: Introduce CONFIG_HAVE_KVM_DIRTY_RING I'd like to make the build include dirty_ring.c based on whether the arch wants it or not. That's a whole lot simpler if there's a config symbol instead of doing it implicitly on KVM_DIRTY_LOG_PAGE_OFFSET being set to something non-zero. Signed-off-by: David Woodhouse Message-Id: <20211121125451.9489-2-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/Kconfig | 1 + include/linux/kvm_dirty_ring.h | 8 ++++---- virt/kvm/Kconfig | 3 +++ virt/kvm/kvm_main.c | 4 ++-- 4 files changed, 10 insertions(+), 6 deletions(-) (limited to 'virt') diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 7618bef0a4a9..03b2ce34e7f4 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -27,6 +27,7 @@ config KVM select MMU_NOTIFIER select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQFD + select HAVE_KVM_DIRTY_RING select IRQ_BYPASS_MANAGER select HAVE_KVM_IRQ_BYPASS select HAVE_KVM_IRQ_ROUTING diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h index 120e5e90fa1d..4da8d4a4140b 100644 --- a/include/linux/kvm_dirty_ring.h +++ b/include/linux/kvm_dirty_ring.h @@ -27,9 +27,9 @@ struct kvm_dirty_ring { int index; }; -#if (KVM_DIRTY_LOG_PAGE_OFFSET == 0) +#ifndef CONFIG_HAVE_KVM_DIRTY_RING /* - * If KVM_DIRTY_LOG_PAGE_OFFSET not defined, kvm_dirty_ring.o should + * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should * not be included as well, so define these nop functions for the arch. */ static inline u32 kvm_dirty_ring_get_rsvd_entries(void) @@ -74,7 +74,7 @@ static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring) return true; } -#else /* KVM_DIRTY_LOG_PAGE_OFFSET == 0 */ +#else /* CONFIG_HAVE_KVM_DIRTY_RING */ u32 kvm_dirty_ring_get_rsvd_entries(void); int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size); @@ -98,6 +98,6 @@ struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset); void kvm_dirty_ring_free(struct kvm_dirty_ring *ring); bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring); -#endif /* KVM_DIRTY_LOG_PAGE_OFFSET == 0 */ +#endif /* CONFIG_HAVE_KVM_DIRTY_RING */ #endif /* KVM_DIRTY_RING_H */ diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 62b39149b8c8..97cf5413ac25 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -13,6 +13,9 @@ config HAVE_KVM_IRQFD config HAVE_KVM_IRQ_ROUTING bool +config HAVE_KVM_DIRTY_RING + bool + config HAVE_KVM_EVENTFD bool select EVENTFD diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f3acff708bf5..b0f7e6eb00ff 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3600,7 +3600,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) { -#if KVM_DIRTY_LOG_PAGE_OFFSET > 0 +#ifdef CONFIG_HAVE_KVM_DIRTY_RING return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + kvm->dirty_ring_size / PAGE_SIZE); @@ -4305,7 +4305,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) case KVM_CAP_NR_MEMSLOTS: return KVM_USER_MEM_SLOTS; case KVM_CAP_DIRTY_LOG_RING: -#if KVM_DIRTY_LOG_PAGE_OFFSET > 0 +#ifdef CONFIG_HAVE_KVM_DIRTY_RING return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); #else return 0; -- cgit v1.2.3-58-ga151 From 6f2cdbdba43e4afad8df1ab06797c83e3af4a3dc Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sun, 21 Nov 2021 12:54:41 +0000 Subject: KVM: Add Makefile.kvm for common files, use it for x86 Splitting kvm_main.c out into smaller and better-organized files is slightly non-trivial when it involves editing a bunch of per-arch KVM makefiles. Provide virt/kvm/Makefile.kvm for them to include. Signed-off-by: David Woodhouse Acked-by: Marc Zyngier Message-Id: <20211121125451.9489-3-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/Makefile | 7 +------ virt/kvm/Makefile.kvm | 13 +++++++++++++ 2 files changed, 14 insertions(+), 6 deletions(-) create mode 100644 virt/kvm/Makefile.kvm (limited to 'virt') diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 75dfd27b6e8a..30f244b64523 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -7,12 +7,7 @@ ifeq ($(CONFIG_FRAME_POINTER),y) OBJECT_FILES_NON_STANDARD_vmenter.o := y endif -KVM := ../../../virt/kvm - -kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ - $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o \ - $(KVM)/dirty_ring.o $(KVM)/binary_stats.o -kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o +include $(srctree)/virt/kvm/Makefile.kvm kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ diff --git a/virt/kvm/Makefile.kvm b/virt/kvm/Makefile.kvm new file mode 100644 index 000000000000..ffdcad3cc97a --- /dev/null +++ b/virt/kvm/Makefile.kvm @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Kernel-based Virtual Machine module +# + +KVM ?= ../../../virt/kvm + +kvm-y := $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/binary_stats.o +kvm-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o +kvm-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o +kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o +kvm-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o +kvm-$(CONFIG_HAVE_KVM_DIRTY_RING) += $(KVM)/dirty_ring.o -- cgit v1.2.3-58-ga151 From 2efd61a608b0039911924d2e5d7028eb37496e85 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 10 Dec 2021 16:36:20 +0000 Subject: KVM: Warn if mark_page_dirty() is called without an active vCPU The various kvm_write_guest() and mark_page_dirty() functions must only ever be called in the context of an active vCPU, because if dirty ring tracking is enabled it may simply oops when kvm_get_running_vcpu() returns NULL for the vcpu and then kvm_dirty_ring_get() dereferences it. This oops was reported by "butt3rflyh4ck" in https://lore.kernel.org/kvm/CAFcO6XOmoS7EacN_n6v4Txk7xL7iqRa2gABg3F7E3Naf5uG94g@mail.gmail.com/ That actual bug will be fixed under separate cover but this warning should help to prevent new ones from being added. Signed-off-by: David Woodhouse Message-Id: <20211210163625.2886-2-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- include/linux/kvm_dirty_ring.h | 6 ------ virt/kvm/dirty_ring.c | 9 --------- virt/kvm/kvm_main.c | 7 ++++++- 3 files changed, 6 insertions(+), 16 deletions(-) (limited to 'virt') diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h index 4da8d4a4140b..906f899813dc 100644 --- a/include/linux/kvm_dirty_ring.h +++ b/include/linux/kvm_dirty_ring.h @@ -43,11 +43,6 @@ static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, return 0; } -static inline struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm) -{ - return NULL; -} - static inline int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring) { @@ -78,7 +73,6 @@ static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring) u32 kvm_dirty_ring_get_rsvd_entries(void); int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size); -struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm); /* * called with kvm->slots_lock held, returns the number of diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c index 88f4683198ea..8e9874760fb3 100644 --- a/virt/kvm/dirty_ring.c +++ b/virt/kvm/dirty_ring.c @@ -36,15 +36,6 @@ static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring) return kvm_dirty_ring_used(ring) >= ring->size; } -struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm) -{ - struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); - - WARN_ON_ONCE(vcpu->kvm != kvm); - - return &vcpu->dirty_ring; -} - static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask) { struct kvm_memory_slot *memslot; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b0f7e6eb00ff..af5b4427b139 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3155,12 +3155,17 @@ void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn) { + struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); + + if (WARN_ON_ONCE(!vcpu) || WARN_ON_ONCE(vcpu->kvm != kvm)) + return; + if (memslot && kvm_slot_dirty_track_enabled(memslot)) { unsigned long rel_gfn = gfn - memslot->base_gfn; u32 slot = (memslot->as_id << 16) | memslot->id; if (kvm->dirty_ring_size) - kvm_dirty_ring_push(kvm_dirty_ring_get(kvm), + kvm_dirty_ring_push(&vcpu->dirty_ring, slot, rel_gfn); else set_bit_le(rel_gfn, memslot->dirty_bitmap); -- cgit v1.2.3-58-ga151 From 982ed0de4753ed6e71dbd40f82a5a066baf133ed Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 10 Dec 2021 16:36:21 +0000 Subject: KVM: Reinstate gfn_to_pfn_cache with invalidation support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This can be used in two modes. There is an atomic mode where the cached mapping is accessed while holding the rwlock, and a mode where the physical address is used by a vCPU in guest mode. For the latter case, an invalidation will wake the vCPU with the new KVM_REQ_GPC_INVALIDATE, and the architecture will need to refresh any caches it still needs to access before entering guest mode again. Only one vCPU can be targeted by the wake requests; it's simple enough to make it wake all vCPUs or even a mask but I don't see a use case for that additional complexity right now. Invalidation happens from the invalidate_range_start MMU notifier, which needs to be able to sleep in order to wake the vCPU and wait for it. This means that revalidation potentially needs to "wait" for the MMU operation to complete and the invalidate_range_end notifier to be invoked. Like the vCPU when it takes a page fault in that period, we just spin — fixing that in a future patch by implementing an actual *wait* may be another part of shaving this particularly hirsute yak. As noted in the comments in the function itself, the only case where the invalidate_range_start notifier is expected to be called *without* being able to sleep is when the OOM reaper is killing the process. In that case, we expect the vCPU threads already to have exited, and thus there will be nothing to wake, and no reason to wait. So we clear the KVM_REQUEST_WAIT bit and send the request anyway, then complain loudly if there actually *was* anything to wake up. Signed-off-by: David Woodhouse Message-Id: <20211210163625.2886-3-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/Kconfig | 1 + include/linux/kvm_host.h | 103 ++++++++++++++ include/linux/kvm_types.h | 18 +++ virt/kvm/Kconfig | 3 + virt/kvm/Makefile.kvm | 1 + virt/kvm/dirty_ring.c | 2 +- virt/kvm/kvm_main.c | 12 +- virt/kvm/kvm_mm.h | 44 ++++++ virt/kvm/mmu_lock.h | 23 ---- virt/kvm/pfncache.c | 337 ++++++++++++++++++++++++++++++++++++++++++++++ 10 files changed, 517 insertions(+), 27 deletions(-) create mode 100644 virt/kvm/kvm_mm.h delete mode 100644 virt/kvm/mmu_lock.h create mode 100644 virt/kvm/pfncache.c (limited to 'virt') diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 03b2ce34e7f4..ebc8ce9ec917 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -26,6 +26,7 @@ config KVM select PREEMPT_NOTIFIERS select MMU_NOTIFIER select HAVE_KVM_IRQCHIP + select HAVE_KVM_PFNCACHE select HAVE_KVM_IRQFD select HAVE_KVM_DIRTY_RING select IRQ_BYPASS_MANAGER diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f9bbcf519280..9bbb1f1d9e48 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -155,6 +155,7 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_UNBLOCK 2 #define KVM_REQ_UNHALT 3 #define KVM_REQ_VM_DEAD (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_GPC_INVALIDATE (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQUEST_ARCH_BASE 8 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ @@ -593,6 +594,10 @@ struct kvm { unsigned long mn_active_invalidate_count; struct rcuwait mn_memslots_update_rcuwait; + /* For management / invalidation of gfn_to_pfn_caches */ + spinlock_t gpc_lock; + struct list_head gpc_list; + /* * created_vcpus is protected by kvm->lock, and is incremented * at the beginning of KVM_CREATE_VCPU. online_vcpus is only @@ -1099,6 +1104,104 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len); void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); +/** + * kvm_gfn_to_pfn_cache_init - prepare a cached kernel mapping and HPA for a + * given guest physical address. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * @vcpu: vCPU to be used for marking pages dirty and to be woken on + * invalidation. + * @guest_uses_pa: indicates that the resulting host physical PFN is used while + * @vcpu is IN_GUEST_MODE so invalidations should wake it. + * @kernel_map: requests a kernel virtual mapping (kmap / memremap). + * @gpa: guest physical address to map. + * @len: sanity check; the range being access must fit a single page. + * @dirty: mark the cache dirty immediately. + * + * @return: 0 for success. + * -EINVAL for a mapping which would cross a page boundary. + * -EFAULT for an untranslatable guest physical address. + * + * This primes a gfn_to_pfn_cache and links it into the @kvm's list for + * invalidations to be processed. Invalidation callbacks to @vcpu using + * %KVM_REQ_GPC_INVALIDATE will occur only for MMU notifiers, not for KVM + * memslot changes. Callers are required to use kvm_gfn_to_pfn_cache_check() + * to ensure that the cache is valid before accessing the target page. + */ +int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + struct kvm_vcpu *vcpu, bool guest_uses_pa, + bool kernel_map, gpa_t gpa, unsigned long len, + bool dirty); + +/** + * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * @gpa: current guest physical address to map. + * @len: sanity check; the range being access must fit a single page. + * @dirty: mark the cache dirty immediately. + * + * @return: %true if the cache is still valid and the address matches. + * %false if the cache is not valid. + * + * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock + * while calling this function, and then continue to hold the lock until the + * access is complete. + * + * Callers in IN_GUEST_MODE may do so without locking, although they should + * still hold a read lock on kvm->scru for the memslot checks. + */ +bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + gpa_t gpa, unsigned long len); + +/** + * kvm_gfn_to_pfn_cache_refresh - update a previously initialized cache. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * @gpa: updated guest physical address to map. + * @len: sanity check; the range being access must fit a single page. + * @dirty: mark the cache dirty immediately. + * + * @return: 0 for success. + * -EINVAL for a mapping which would cross a page boundary. + * -EFAULT for an untranslatable guest physical address. + * + * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful + * returm from this function does not mean the page can be immediately + * accessed because it may have raced with an invalidation. Callers must + * still lock and check the cache status, as this function does not return + * with the lock still held to permit access. + */ +int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + gpa_t gpa, unsigned long len, bool dirty); + +/** + * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * + * This unmaps the referenced page and marks it dirty, if appropriate. The + * cache is left in the invalid state but at least the mapping from GPA to + * userspace HVA will remain cached and can be reused on a subsequent + * refresh. + */ +void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); + +/** + * kvm_gfn_to_pfn_cache_destroy - destroy and unlink a gfn_to_pfn_cache. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * + * This removes a cache from the @kvm's list to be processed on MMU notifier + * invocation. + */ +void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); + void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 888ef12862c9..dceac12c1ce5 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -19,6 +19,7 @@ struct kvm_memslots; enum kvm_mr_change; #include +#include #include @@ -53,6 +54,23 @@ struct gfn_to_hva_cache { struct kvm_memory_slot *memslot; }; +struct gfn_to_pfn_cache { + u64 generation; + gpa_t gpa; + unsigned long uhva; + struct kvm_memory_slot *memslot; + struct kvm_vcpu *vcpu; + struct list_head list; + rwlock_t lock; + void *khva; + kvm_pfn_t pfn; + bool active; + bool valid; + bool dirty; + bool kernel_map; + bool guest_uses_pa; +}; + #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE /* * Memory caches are used to preallocate memory ahead of various MMU flows, diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 97cf5413ac25..f4834c20e4a6 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -4,6 +4,9 @@ config HAVE_KVM bool +config HAVE_KVM_PFNCACHE + bool + config HAVE_KVM_IRQCHIP bool diff --git a/virt/kvm/Makefile.kvm b/virt/kvm/Makefile.kvm index ffdcad3cc97a..2c27d5d0c367 100644 --- a/virt/kvm/Makefile.kvm +++ b/virt/kvm/Makefile.kvm @@ -11,3 +11,4 @@ kvm-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o kvm-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o kvm-$(CONFIG_HAVE_KVM_DIRTY_RING) += $(KVM)/dirty_ring.o +kvm-$(CONFIG_HAVE_KVM_PFNCACHE) += $(KVM)/pfncache.o diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c index 8e9874760fb3..222ecc81d7df 100644 --- a/virt/kvm/dirty_ring.c +++ b/virt/kvm/dirty_ring.c @@ -9,7 +9,7 @@ #include #include #include -#include "mmu_lock.h" +#include "kvm_mm.h" int __weak kvm_cpu_dirty_log_size(void) { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index af5b4427b139..6e8e9d36f382 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -59,7 +59,7 @@ #include "coalesced_mmio.h" #include "async_pf.h" -#include "mmu_lock.h" +#include "kvm_mm.h" #include "vfio.h" #define CREATE_TRACE_POINTS @@ -711,6 +711,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm->mn_active_invalidate_count++; spin_unlock(&kvm->mn_invalidate_lock); + gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end, + hva_range.may_block); + __kvm_handle_hva_range(kvm, &hva_range); return 0; @@ -1071,6 +1074,9 @@ static struct kvm *kvm_create_vm(unsigned long type) rcuwait_init(&kvm->mn_memslots_update_rcuwait); xa_init(&kvm->vcpu_array); + INIT_LIST_HEAD(&kvm->gpc_list); + spin_lock_init(&kvm->gpc_lock); + INIT_LIST_HEAD(&kvm->devices); BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); @@ -2539,8 +2545,8 @@ out: * 2): @write_fault = false && @writable, @writable will tell the caller * whether the mapping is writable. */ -static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, - bool write_fault, bool *writable) +kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, + bool write_fault, bool *writable) { struct vm_area_struct *vma; kvm_pfn_t pfn = 0; diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h new file mode 100644 index 000000000000..34ca40823260 --- /dev/null +++ b/virt/kvm/kvm_mm.h @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#ifndef __KVM_MM_H__ +#define __KVM_MM_H__ 1 + +/* + * Architectures can choose whether to use an rwlock or spinlock + * for the mmu_lock. These macros, for use in common code + * only, avoids using #ifdefs in places that must deal with + * multiple architectures. + */ + +#ifdef KVM_HAVE_MMU_RWLOCK +#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) +#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) +#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) +#define KVM_MMU_READ_LOCK(kvm) read_lock(&(kvm)->mmu_lock) +#define KVM_MMU_READ_UNLOCK(kvm) read_unlock(&(kvm)->mmu_lock) +#else +#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) +#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) +#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) +#define KVM_MMU_READ_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) +#define KVM_MMU_READ_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) +#endif /* KVM_HAVE_MMU_RWLOCK */ + +kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, + bool write_fault, bool *writable); + +#ifdef CONFIG_HAVE_KVM_PFNCACHE +void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, + unsigned long start, + unsigned long end, + bool may_block); +#else +static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, + unsigned long start, + unsigned long end, + bool may_block) +{ +} +#endif /* HAVE_KVM_PFNCACHE */ + +#endif /* __KVM_MM_H__ */ diff --git a/virt/kvm/mmu_lock.h b/virt/kvm/mmu_lock.h deleted file mode 100644 index 9e1308f9734c..000000000000 --- a/virt/kvm/mmu_lock.h +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only - -#ifndef KVM_MMU_LOCK_H -#define KVM_MMU_LOCK_H 1 - -/* - * Architectures can choose whether to use an rwlock or spinlock - * for the mmu_lock. These macros, for use in common code - * only, avoids using #ifdefs in places that must deal with - * multiple architectures. - */ - -#ifdef KVM_HAVE_MMU_RWLOCK -#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) -#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) -#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) -#else -#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) -#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) -#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) -#endif /* KVM_HAVE_MMU_RWLOCK */ - -#endif diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c new file mode 100644 index 000000000000..ce878f4be4da --- /dev/null +++ b/virt/kvm/pfncache.c @@ -0,0 +1,337 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Kernel-based Virtual Machine driver for Linux + * + * This module enables kernel and guest-mode vCPU access to guest physical + * memory with suitable invalidation mechanisms. + * + * Copyright © 2021 Amazon.com, Inc. or its affiliates. + * + * Authors: + * David Woodhouse + */ + +#include +#include +#include +#include +#include + +#include "kvm_mm.h" + +/* + * MMU notifier 'invalidate_range_start' hook. + */ +void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, + unsigned long end, bool may_block) +{ + DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); + struct gfn_to_pfn_cache *gpc; + bool wake_vcpus = false; + + spin_lock(&kvm->gpc_lock); + list_for_each_entry(gpc, &kvm->gpc_list, list) { + write_lock_irq(&gpc->lock); + + /* Only a single page so no need to care about length */ + if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && + gpc->uhva >= start && gpc->uhva < end) { + gpc->valid = false; + + /* + * If a guest vCPU could be using the physical address, + * it needs to be woken. + */ + if (gpc->guest_uses_pa) { + if (!wake_vcpus) { + wake_vcpus = true; + bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); + } + __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap); + } + + /* + * We cannot call mark_page_dirty() from here because + * this physical CPU might not have an active vCPU + * with which to do the KVM dirty tracking. + * + * Neither is there any point in telling the kernel MM + * that the underlying page is dirty. A vCPU in guest + * mode might still be writing to it up to the point + * where we wake them a few lines further down anyway. + * + * So all the dirty marking happens on the unmap. + */ + } + write_unlock_irq(&gpc->lock); + } + spin_unlock(&kvm->gpc_lock); + + if (wake_vcpus) { + unsigned int req = KVM_REQ_GPC_INVALIDATE; + bool called; + + /* + * If the OOM reaper is active, then all vCPUs should have + * been stopped already, so perform the request without + * KVM_REQUEST_WAIT and be sad if any needed to be woken. + */ + if (!may_block) + req &= ~KVM_REQUEST_WAIT; + + called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap); + + WARN_ON_ONCE(called && !may_block); + } +} + +bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + gpa_t gpa, unsigned long len) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + + if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE) + return false; + + if (gpc->gpa != gpa || gpc->generation != slots->generation || + kvm_is_error_hva(gpc->uhva)) + return false; + + if (!gpc->valid) + return false; + + return true; +} +EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check); + +static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, + gpa_t gpa, bool dirty) +{ + /* Unmap the old page if it was mapped before, and release it */ + if (!is_error_noslot_pfn(pfn)) { + if (khva) { + if (pfn_valid(pfn)) + kunmap(pfn_to_page(pfn)); +#ifdef CONFIG_HAS_IOMEM + else + memunmap(khva); +#endif + } + + kvm_release_pfn(pfn, dirty); + if (dirty) + mark_page_dirty(kvm, gpa); + } +} + +static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, unsigned long uhva) +{ + unsigned long mmu_seq; + kvm_pfn_t new_pfn; + int retry; + + do { + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); + + /* We always request a writeable mapping */ + new_pfn = hva_to_pfn(uhva, false, NULL, true, NULL); + if (is_error_noslot_pfn(new_pfn)) + break; + + KVM_MMU_READ_LOCK(kvm); + retry = mmu_notifier_retry_hva(kvm, mmu_seq, uhva); + KVM_MMU_READ_UNLOCK(kvm); + if (!retry) + break; + + cond_resched(); + } while (1); + + return new_pfn; +} + +int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + gpa_t gpa, unsigned long len, bool dirty) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + unsigned long page_offset = gpa & ~PAGE_MASK; + kvm_pfn_t old_pfn, new_pfn; + unsigned long old_uhva; + gpa_t old_gpa; + void *old_khva; + bool old_valid, old_dirty; + int ret = 0; + + /* + * If must fit within a single page. The 'len' argument is + * only to enforce that. + */ + if (page_offset + len > PAGE_SIZE) + return -EINVAL; + + write_lock_irq(&gpc->lock); + + old_gpa = gpc->gpa; + old_pfn = gpc->pfn; + old_khva = gpc->khva - offset_in_page(gpc->khva); + old_uhva = gpc->uhva; + old_valid = gpc->valid; + old_dirty = gpc->dirty; + + /* If the userspace HVA is invalid, refresh that first */ + if (gpc->gpa != gpa || gpc->generation != slots->generation || + kvm_is_error_hva(gpc->uhva)) { + gfn_t gfn = gpa_to_gfn(gpa); + + gpc->dirty = false; + gpc->gpa = gpa; + gpc->generation = slots->generation; + gpc->memslot = __gfn_to_memslot(slots, gfn); + gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); + + if (kvm_is_error_hva(gpc->uhva)) { + ret = -EFAULT; + goto out; + } + + gpc->uhva += page_offset; + } + + /* + * If the userspace HVA changed or the PFN was already invalid, + * drop the lock and do the HVA to PFN lookup again. + */ + if (!old_valid || old_uhva != gpc->uhva) { + unsigned long uhva = gpc->uhva; + void *new_khva = NULL; + + /* Placeholders for "hva is valid but not yet mapped" */ + gpc->pfn = KVM_PFN_ERR_FAULT; + gpc->khva = NULL; + gpc->valid = true; + + write_unlock_irq(&gpc->lock); + + new_pfn = hva_to_pfn_retry(kvm, uhva); + if (is_error_noslot_pfn(new_pfn)) { + ret = -EFAULT; + goto map_done; + } + + if (gpc->kernel_map) { + if (new_pfn == old_pfn) { + new_khva = old_khva; + old_pfn = KVM_PFN_ERR_FAULT; + old_khva = NULL; + } else if (pfn_valid(new_pfn)) { + new_khva = kmap(pfn_to_page(new_pfn)); +#ifdef CONFIG_HAS_IOMEM + } else { + new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB); +#endif + } + if (new_khva) + new_khva += page_offset; + else + ret = -EFAULT; + } + + map_done: + write_lock_irq(&gpc->lock); + if (ret) { + gpc->valid = false; + gpc->pfn = KVM_PFN_ERR_FAULT; + gpc->khva = NULL; + } else { + /* At this point, gpc->valid may already have been cleared */ + gpc->pfn = new_pfn; + gpc->khva = new_khva; + } + } else { + /* If the HVA→PFN mapping was already valid, don't unmap it. */ + old_pfn = KVM_PFN_ERR_FAULT; + old_khva = NULL; + } + + out: + if (ret) + gpc->dirty = false; + else + gpc->dirty = dirty; + + write_unlock_irq(&gpc->lock); + + __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty); + + return ret; +} +EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_refresh); + +void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) +{ + void *old_khva; + kvm_pfn_t old_pfn; + bool old_dirty; + gpa_t old_gpa; + + write_lock_irq(&gpc->lock); + + gpc->valid = false; + + old_khva = gpc->khva - offset_in_page(gpc->khva); + old_dirty = gpc->dirty; + old_gpa = gpc->gpa; + old_pfn = gpc->pfn; + + /* + * We can leave the GPA → uHVA map cache intact but the PFN + * lookup will need to be redone even for the same page. + */ + gpc->khva = NULL; + gpc->pfn = KVM_PFN_ERR_FAULT; + + write_unlock_irq(&gpc->lock); + + __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty); +} +EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap); + + +int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + struct kvm_vcpu *vcpu, bool guest_uses_pa, + bool kernel_map, gpa_t gpa, unsigned long len, + bool dirty) +{ + if (!gpc->active) { + rwlock_init(&gpc->lock); + + gpc->khva = NULL; + gpc->pfn = KVM_PFN_ERR_FAULT; + gpc->uhva = KVM_HVA_ERR_BAD; + gpc->vcpu = vcpu; + gpc->kernel_map = kernel_map; + gpc->guest_uses_pa = guest_uses_pa; + gpc->valid = false; + gpc->active = true; + + spin_lock(&kvm->gpc_lock); + list_add(&gpc->list, &kvm->gpc_list); + spin_unlock(&kvm->gpc_lock); + } + return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len, dirty); +} +EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init); + +void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) +{ + if (gpc->active) { + spin_lock(&kvm->gpc_lock); + list_del(&gpc->list); + spin_unlock(&kvm->gpc_lock); + + kvm_gfn_to_pfn_cache_unmap(kvm, gpc); + gpc->active = false; + } +} +EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_destroy); -- cgit v1.2.3-58-ga151