From b51012deb390528d89d426f328d84618683f5d73 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 22 Jan 2016 11:39:22 +0100 Subject: KVM: x86: introduce do_shl32_div32 This is similar to the existing div_frac function, but it returns the remainder too. Unlike div_frac, it can be used to implement long division, e.g. (a << 64) / b for 32-bit a and b. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 10 ++-------- arch/x86/kvm/x86.h | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4244c2baf57d..5b937fdebc66 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1196,14 +1196,8 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) static uint32_t div_frac(uint32_t dividend, uint32_t divisor) { - uint32_t quotient, remainder; - - /* Don't try to replace with do_div(), this one calculates - * "(dividend << 32) / divisor" */ - __asm__ ( "divl %4" - : "=a" (quotient), "=d" (remainder) - : "0" (0), "1" (dividend), "r" (divisor) ); - return quotient; + do_shl32_div32(dividend, divisor); + return dividend; } static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index f2afa5fe48a6..34f416427143 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -192,4 +192,19 @@ extern unsigned int min_timer_period_us; extern unsigned int lapic_timer_advance_ns; extern struct static_key kvm_no_apic_vcpu; + +/* Same "calling convention" as do_div: + * - divide (n << 32) by base + * - put result in n + * - return remainder + */ +#define do_shl32_div32(n, base) \ + ({ \ + u32 __quot, __rem; \ + asm("divl %2" : "=a" (__quot), "=d" (__rem) \ + : "rm" (base), "0" (0), "1" ((u32) n)); \ + n = __quot; \ + __rem; \ + }) + #endif -- cgit v1.2.3-58-ga151 From 23a1c2579b575b228a6c685dfe93f296d3d5e0e1 Mon Sep 17 00:00:00 2001 From: Feng Wu Date: Mon, 25 Jan 2016 16:53:32 +0800 Subject: KVM: Recover IRTE to remapped mode if the interrupt is not single-destination MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the interrupt is not single destination any more, we need to change back IRTE to remapped mode explicitly. Signed-off-by: Feng Wu Reviewed-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e2951b6edbbc..a4b4aa4cdc54 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -10764,8 +10764,21 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, */ kvm_set_msi_irq(e, &irq); - if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) + if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) { + /* + * Make sure the IRTE is in remapped mode if + * we don't handle it in posted mode. + */ + ret = irq_set_vcpu_affinity(host_irq, NULL); + if (ret < 0) { + printk(KERN_INFO + "failed to back to remapped mode, irq: %u\n", + host_irq); + goto out; + } + continue; + } vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); vcpu_info.vector = irq.vector; -- cgit v1.2.3-58-ga151 From 520040146a0af36f7875ec06b58f44b19a0edf53 Mon Sep 17 00:00:00 2001 From: Feng Wu Date: Mon, 25 Jan 2016 16:53:33 +0800 Subject: KVM: x86: Use vector-hashing to deliver lowest-priority interrupts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use vector-hashing to deliver lowest-priority interrupts, As an example, modern Intel CPUs in server platform use this method to handle lowest-priority interrupts. Signed-off-by: Feng Wu Reviewed-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/irq_comm.c | 25 +++++++++++++++++---- arch/x86/kvm/lapic.c | 50 ++++++++++++++++++++++++++++++++++++++--- arch/x86/kvm/lapic.h | 2 ++ arch/x86/kvm/x86.c | 9 ++++++++ arch/x86/kvm/x86.h | 1 + 6 files changed, 82 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 44adbb819041..7b5459982433 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -754,6 +754,8 @@ struct kvm_arch { bool irqchip_split; u8 nr_reserved_ioapic_pins; + + bool disabled_lapic_found; }; struct kvm_vm_stat { diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 8fc89efb5250..37217363887d 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -34,6 +34,7 @@ #include "lapic.h" #include "hyperv.h" +#include "x86.h" static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, @@ -57,6 +58,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, { int i, r = -1; struct kvm_vcpu *vcpu, *lowest = NULL; + unsigned long dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]; + unsigned int dest_vcpus = 0; if (irq->dest_mode == 0 && irq->dest_id == 0xff && kvm_lowest_prio_delivery(irq)) { @@ -67,6 +70,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map)) return r; + memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap)); + kvm_for_each_vcpu(i, vcpu, kvm) { if (!kvm_apic_present(vcpu)) continue; @@ -80,13 +85,25 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, r = 0; r += kvm_apic_set_irq(vcpu, irq, dest_map); } else if (kvm_lapic_enabled(vcpu)) { - if (!lowest) - lowest = vcpu; - else if (kvm_apic_compare_prio(vcpu, lowest) < 0) - lowest = vcpu; + if (!kvm_vector_hashing_enabled()) { + if (!lowest) + lowest = vcpu; + else if (kvm_apic_compare_prio(vcpu, lowest) < 0) + lowest = vcpu; + } else { + __set_bit(i, dest_vcpu_bitmap); + dest_vcpus++; + } } } + if (dest_vcpus != 0) { + int idx = kvm_vector_to_index(irq->vector, dest_vcpus, + dest_vcpu_bitmap, KVM_MAX_VCPUS); + + lowest = kvm_get_vcpu(kvm, idx); + } + if (lowest) r = kvm_apic_set_irq(lowest, irq, dest_map); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 36591faed13b..1a4ca1d05fe9 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -675,6 +675,22 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, } } +int kvm_vector_to_index(u32 vector, u32 dest_vcpus, + const unsigned long *bitmap, u32 bitmap_size) +{ + u32 mod; + int i, idx = -1; + + mod = vector % dest_vcpus; + + for (i = 0; i <= mod; i++) { + idx = find_next_bit(bitmap, bitmap_size, idx + 1); + BUG_ON(idx == bitmap_size); + } + + return idx; +} + bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map) { @@ -727,21 +743,49 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, dst = map->logical_map[cid]; - if (kvm_lowest_prio_delivery(irq)) { + if (!kvm_lowest_prio_delivery(irq)) + goto set_irq; + + if (!kvm_vector_hashing_enabled()) { int l = -1; for_each_set_bit(i, &bitmap, 16) { if (!dst[i]) continue; if (l < 0) l = i; - else if (kvm_apic_compare_prio(dst[i]->vcpu, dst[l]->vcpu) < 0) + else if (kvm_apic_compare_prio(dst[i]->vcpu, + dst[l]->vcpu) < 0) l = i; } - bitmap = (l >= 0) ? 1 << l : 0; + } else { + int idx; + unsigned int dest_vcpus; + + dest_vcpus = hweight16(bitmap); + if (dest_vcpus == 0) + goto out; + + idx = kvm_vector_to_index(irq->vector, + dest_vcpus, &bitmap, 16); + + /* + * We may find a hardware disabled LAPIC here, if that + * is the case, print out a error message once for each + * guest and return. + */ + if (!dst[idx] && !kvm->arch.disabled_lapic_found) { + kvm->arch.disabled_lapic_found = true; + printk(KERN_INFO + "Disabled LAPIC found during irq injection\n"); + goto out; + } + + bitmap = (idx >= 0) ? 1 << idx : 0; } } +set_irq: for_each_set_bit(i, &bitmap, 16) { if (!dst[i]) continue; diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 41bdb35b4b67..afccf4099b00 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -175,4 +175,6 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu); bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq, struct kvm_vcpu **dest_vcpu); +int kvm_vector_to_index(u32 vector, u32 dest_vcpus, + const unsigned long *bitmap, u32 bitmap_size); #endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5b937fdebc66..aafbcf9f9776 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -123,6 +123,9 @@ module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); unsigned int __read_mostly lapic_timer_advance_ns = 0; module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR); +static bool __read_mostly vector_hashing = true; +module_param(vector_hashing, bool, S_IRUGO); + static bool __read_mostly backwards_tsc_observed = false; #define KVM_NR_SHARED_MSRS 16 @@ -8364,6 +8367,12 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set); } +bool kvm_vector_hashing_enabled(void) +{ + return vector_hashing; +} +EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled); + EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 34f416427143..007940faa5c6 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -179,6 +179,7 @@ int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num); +bool kvm_vector_hashing_enabled(void); #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ -- cgit v1.2.3-58-ga151 From 6228a0da805792c2f25b32e9b926d0810a6648ab Mon Sep 17 00:00:00 2001 From: Feng Wu Date: Mon, 25 Jan 2016 16:53:34 +0800 Subject: KVM: x86: Add lowest-priority support for vt-d posted-interrupts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use vector-hashing to deliver lowest-priority interrupts for VT-d posted-interrupts. This patch extends kvm_intr_is_single_vcpu() to support lowest-priority handling. Signed-off-by: Feng Wu Reviewed-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 56 +++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 1a4ca1d05fe9..1520d1acd0ad 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -798,6 +798,20 @@ out: return ret; } +/* + * This routine tries to handler interrupts in posted mode, here is how + * it deals with different cases: + * - For single-destination interrupts, handle it in posted mode + * - Else if vector hashing is enabled and it is a lowest-priority + * interrupt, handle it in posted mode and use the following mechanism + * to find the destinaiton vCPU. + * 1. For lowest-priority interrupts, store all the possible + * destination vCPUs in an array. + * 2. Use "guest vector % max number of destination vCPUs" to find + * the right destination vCPU in the array for the lowest-priority + * interrupt. + * - Otherwise, use remapped mode to inject the interrupt. + */ bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq, struct kvm_vcpu **dest_vcpu) { @@ -839,16 +853,44 @@ bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq, if (cid >= ARRAY_SIZE(map->logical_map)) goto out; - for_each_set_bit(i, &bitmap, 16) { - dst = map->logical_map[cid][i]; - if (++r == 2) + if (kvm_vector_hashing_enabled() && + kvm_lowest_prio_delivery(irq)) { + int idx; + unsigned int dest_vcpus; + + dest_vcpus = hweight16(bitmap); + if (dest_vcpus == 0) goto out; - } - if (dst && kvm_apic_present(dst->vcpu)) + idx = kvm_vector_to_index(irq->vector, dest_vcpus, + &bitmap, 16); + + /* + * We may find a hardware disabled LAPIC here, if that + * is the case, print out a error message once for each + * guest and return + */ + dst = map->logical_map[cid][idx]; + if (!dst && !kvm->arch.disabled_lapic_found) { + kvm->arch.disabled_lapic_found = true; + printk(KERN_INFO + "Disabled LAPIC found during irq injection\n"); + goto out; + } + *dest_vcpu = dst->vcpu; - else - goto out; + } else { + for_each_set_bit(i, &bitmap, 16) { + dst = map->logical_map[cid][i]; + if (++r == 2) + goto out; + } + + if (dst && kvm_apic_present(dst->vcpu)) + *dest_vcpu = dst->vcpu; + else + goto out; + } } ret = true; -- cgit v1.2.3-58-ga151 From b6ce978067e75187d3c30f59b60d390a29374fab Mon Sep 17 00:00:00 2001 From: Feng Wu Date: Mon, 25 Jan 2016 16:53:35 +0800 Subject: KVM/VMX: Add host irq information in trace event when updating IRTE for posted interrupts Add host irq information in trace event, so we can better understand which irq is in posted mode. Signed-off-by: Feng Wu Reviewed-by: Radim Krcmar Signed-off-by: Paolo Bonzini --- arch/x86/kvm/trace.h | 12 ++++++++---- arch/x86/kvm/vmx.c | 2 +- 2 files changed, 9 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index ad9f6a23f139..2f1ea2f61e1f 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -996,11 +996,13 @@ TRACE_EVENT(kvm_enter_smm, * Tracepoint for VT-d posted-interrupts. */ TRACE_EVENT(kvm_pi_irte_update, - TP_PROTO(unsigned int vcpu_id, unsigned int gsi, - unsigned int gvec, u64 pi_desc_addr, bool set), - TP_ARGS(vcpu_id, gsi, gvec, pi_desc_addr, set), + TP_PROTO(unsigned int host_irq, unsigned int vcpu_id, + unsigned int gsi, unsigned int gvec, + u64 pi_desc_addr, bool set), + TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set), TP_STRUCT__entry( + __field( unsigned int, host_irq ) __field( unsigned int, vcpu_id ) __field( unsigned int, gsi ) __field( unsigned int, gvec ) @@ -1009,6 +1011,7 @@ TRACE_EVENT(kvm_pi_irte_update, ), TP_fast_assign( + __entry->host_irq = host_irq; __entry->vcpu_id = vcpu_id; __entry->gsi = gsi; __entry->gvec = gvec; @@ -1016,9 +1019,10 @@ TRACE_EVENT(kvm_pi_irte_update, __entry->set = set; ), - TP_printk("VT-d PI is %s for this irq, vcpu %u, gsi: 0x%x, " + TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, " "gvec: 0x%x, pi_desc_addr: 0x%llx", __entry->set ? "enabled and being updated" : "disabled", + __entry->host_irq, __entry->vcpu_id, __entry->gsi, __entry->gvec, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a4b4aa4cdc54..164eb9e1678b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -10783,7 +10783,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); vcpu_info.vector = irq.vector; - trace_kvm_pi_irte_update(vcpu->vcpu_id, e->gsi, + trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); if (set) -- cgit v1.2.3-58-ga151 From f8543d6a977a1bdb37eb13ad81ef2874526209b0 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 8 Jan 2016 13:42:24 +0100 Subject: KVM: APIC: remove unnecessary double checks on APIC existence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Usually the in-kernel APIC's existence is checked in the caller. Do not bother checking it again in lapic.c. Reviewed-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 1520d1acd0ad..b1029051f664 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -475,18 +475,12 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) { - int highest_irr; - /* This may race with setting of irr in __apic_accept_irq() and * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq * will cause vmexit immediately and the value will be recalculated * on the next vmentry. */ - if (!kvm_vcpu_has_lapic(vcpu)) - return 0; - highest_irr = apic_find_highest_irr(vcpu->arch.apic); - - return highest_irr; + return apic_find_highest_irr(vcpu->arch.apic); } static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, @@ -1601,8 +1595,7 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) { - if (kvm_vcpu_has_lapic(vcpu)) - apic_reg_write(vcpu->arch.apic, APIC_EOI, 0); + apic_reg_write(vcpu->arch.apic, APIC_EOI, 0); } EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); @@ -1676,9 +1669,6 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) { struct kvm_lapic *apic = vcpu->arch.apic; - if (!kvm_vcpu_has_lapic(vcpu)) - return; - apic_set_tpr(apic, ((cr8 & 0x0f) << 4) | (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4)); } @@ -1687,9 +1677,6 @@ u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) { u64 tpr; - if (!kvm_vcpu_has_lapic(vcpu)) - return 0; - tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI); return (tpr & 0xf0) >> 4; @@ -1912,7 +1899,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) struct kvm_lapic *apic = vcpu->arch.apic; int highest_irr; - if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic)) + if (!apic_enabled(apic)) return -1; apic_update_ppr(apic); -- cgit v1.2.3-58-ga151 From 1e3161b4147caf2045ac4aae3d71fae6ac1a1d65 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 8 Jan 2016 13:41:16 +0100 Subject: KVM: x86: consolidate "has lapic" checks into irq.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Do for kvm_cpu_has_pending_timer and kvm_inject_pending_timer_irqs what the other irq.c routines have been doing. Reviewed-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/irq.c | 9 ++++++--- arch/x86/kvm/lapic.c | 6 +----- 2 files changed, 7 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 3982b479bb5f..95fcc7b13866 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -33,7 +33,10 @@ */ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { - return apic_has_pending_timer(vcpu); + if (lapic_in_kernel(vcpu)) + return apic_has_pending_timer(vcpu); + + return 0; } EXPORT_SYMBOL(kvm_cpu_has_pending_timer); @@ -137,8 +140,8 @@ EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt); void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) { - kvm_inject_apic_timer_irqs(vcpu); - /* TODO: PIT, RTC etc. */ + if (lapic_in_kernel(vcpu)) + kvm_inject_apic_timer_irqs(vcpu); } EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index b1029051f664..57e3f27bdadb 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1801,8 +1801,7 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; - if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) && - apic_lvt_enabled(apic, APIC_LVTT)) + if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT)) return atomic_read(&apic->lapic_timer.pending); return 0; @@ -1927,9 +1926,6 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; - if (!kvm_vcpu_has_lapic(vcpu)) - return; - if (atomic_read(&apic->lapic_timer.pending) > 0) { kvm_apic_local_deliver(apic, APIC_LVTT); if (apic_lvtt_tscdeadline(apic)) -- cgit v1.2.3-58-ga151 From bce87cce88c71957c56479809db8316a836ec8b1 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 8 Jan 2016 13:48:51 +0100 Subject: KVM: x86: consolidate different ways to test for in-kernel LAPIC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Different pieces of code checked for vcpu->arch.apic being (non-)NULL, or used kvm_vcpu_has_lapic (more optimized) or lapic_in_kernel. Replace everything with lapic_in_kernel's name and kvm_vcpu_has_lapic's implementation. Reviewed-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/irq.h | 8 -------- arch/x86/kvm/lapic.c | 16 ++++++++-------- arch/x86/kvm/lapic.h | 8 ++++---- arch/x86/kvm/pmu.c | 2 +- arch/x86/kvm/x86.c | 17 +++++++++-------- 5 files changed, 22 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index ae5c78f2337d..61ebdc13a29a 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -109,14 +109,6 @@ static inline int irqchip_in_kernel(struct kvm *kvm) return ret; } -static inline int lapic_in_kernel(struct kvm_vcpu *vcpu) -{ - /* Same as irqchip_in_kernel(vcpu->kvm), but with less - * pointer chasing and no unnecessary memory barriers. - */ - return vcpu->arch.apic != NULL; -} - void kvm_pic_reset(struct kvm_kpic_state *s); void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 57e3f27bdadb..1482a581a83c 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -281,7 +281,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu) struct kvm_cpuid_entry2 *feat; u32 v = APIC_VERSION; - if (!kvm_vcpu_has_lapic(vcpu)) + if (!lapic_in_kernel(vcpu)) return; feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); @@ -1319,7 +1319,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu) struct kvm_lapic *apic = vcpu->arch.apic; u64 guest_tsc, tsc_deadline; - if (!kvm_vcpu_has_lapic(vcpu)) + if (!lapic_in_kernel(vcpu)) return; if (apic->lapic_timer.expired_tscdeadline == 0) @@ -1645,7 +1645,7 @@ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; - if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || + if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || apic_lvtt_period(apic)) return 0; @@ -1656,7 +1656,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) { struct kvm_lapic *apic = vcpu->arch.apic; - if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || + if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || apic_lvtt_period(apic)) return; @@ -2001,7 +2001,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) { struct hrtimer *timer; - if (!kvm_vcpu_has_lapic(vcpu)) + if (!lapic_in_kernel(vcpu)) return; timer = &vcpu->arch.apic->lapic_timer.timer; @@ -2174,7 +2174,7 @@ int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) { struct kvm_lapic *apic = vcpu->arch.apic; - if (!kvm_vcpu_has_lapic(vcpu)) + if (!lapic_in_kernel(vcpu)) return 1; /* if this is ICR write vector before command */ @@ -2188,7 +2188,7 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) struct kvm_lapic *apic = vcpu->arch.apic; u32 low, high = 0; - if (!kvm_vcpu_has_lapic(vcpu)) + if (!lapic_in_kernel(vcpu)) return 1; if (apic_reg_read(apic, reg, 4, &low)) @@ -2220,7 +2220,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) u8 sipi_vector; unsigned long pe; - if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) + if (!lapic_in_kernel(vcpu) || !apic->pending_events) return; /* diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index afccf4099b00..59610099af04 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -103,7 +103,7 @@ static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off) extern struct static_key kvm_no_apic_vcpu; -static inline bool kvm_vcpu_has_lapic(struct kvm_vcpu *vcpu) +static inline bool lapic_in_kernel(struct kvm_vcpu *vcpu) { if (static_key_false(&kvm_no_apic_vcpu)) return vcpu->arch.apic; @@ -130,7 +130,7 @@ static inline bool kvm_apic_sw_enabled(struct kvm_lapic *apic) static inline bool kvm_apic_present(struct kvm_vcpu *vcpu) { - return kvm_vcpu_has_lapic(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic); + return lapic_in_kernel(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic); } static inline int kvm_lapic_enabled(struct kvm_vcpu *vcpu) @@ -150,7 +150,7 @@ static inline bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu) static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu) { - return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events; + return lapic_in_kernel(vcpu) && vcpu->arch.apic->pending_events; } static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq) @@ -161,7 +161,7 @@ static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq) static inline int kvm_lapic_latched_init(struct kvm_vcpu *vcpu) { - return kvm_vcpu_has_lapic(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); + return lapic_in_kernel(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); } static inline int kvm_apic_id(struct kvm_lapic *apic) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 31aa2c85dc97..06ce377dcbc9 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -257,7 +257,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) { - if (vcpu->arch.apic) + if (lapic_in_kernel(vcpu)) kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index aafbcf9f9776..ee3e990d519a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2984,7 +2984,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && - kvm_vcpu_has_lapic(vcpu)) + lapic_in_kernel(vcpu)) vcpu->arch.apic->sipi_vector = events->sipi_vector; if (events->flags & KVM_VCPUEVENT_VALID_SMM) { @@ -2997,7 +2997,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; else vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; - if (kvm_vcpu_has_lapic(vcpu)) { + if (lapic_in_kernel(vcpu)) { if (events->smi.latched_init) set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); else @@ -3237,7 +3237,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, switch (ioctl) { case KVM_GET_LAPIC: { r = -EINVAL; - if (!vcpu->arch.apic) + if (!lapic_in_kernel(vcpu)) goto out; u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); @@ -3255,7 +3255,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, } case KVM_SET_LAPIC: { r = -EINVAL; - if (!vcpu->arch.apic) + if (!lapic_in_kernel(vcpu)) goto out; u.lapic = memdup_user(argp, sizeof(*u.lapic)); if (IS_ERR(u.lapic)) @@ -4090,7 +4090,7 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, do { n = min(len, 8); - if (!(vcpu->arch.apic && + if (!(lapic_in_kernel(vcpu) && !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) break; @@ -4110,7 +4110,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) do { n = min(len, 8); - if (!(vcpu->arch.apic && + if (!(lapic_in_kernel(vcpu) && !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) @@ -6007,7 +6007,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) if (!kvm_x86_ops->update_cr8_intercept) return; - if (!vcpu->arch.apic) + if (!lapic_in_kernel(vcpu)) return; if (vcpu->arch.apicv_active) @@ -7035,7 +7035,7 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { - if (!kvm_vcpu_has_lapic(vcpu) && + if (!lapic_in_kernel(vcpu) && mp_state->mp_state != KVM_MP_STATE_RUNNABLE) return -EINVAL; @@ -7590,6 +7590,7 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) } struct static_key kvm_no_apic_vcpu __read_mostly; +EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu); int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { -- cgit v1.2.3-58-ga151 From f6aa6dc44948739be39c20bbcbbe8cff5d77fe18 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 15 Jan 2016 14:11:46 +0100 Subject: KVM: s390: allow sync of fp registers via vregs If we have MACHINE_HAS_VX, the floating point registers are stored in the vector register format, event if the guest isn't enabled for vector registers. So we can allow KVM_SYNC_VRS as soon as MACHINE_HAS_VX is available. This can in return be used by user space to support floating point registers via struct kvm_run when the machine has vector registers. Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 4af21c771f9b..8fcdf9836321 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1414,7 +1414,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) KVM_SYNC_PFAULT; if (test_kvm_facility(vcpu->kvm, 64)) vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; - if (test_kvm_facility(vcpu->kvm, 129)) + /* fprs can be synchronized via vrs, even if the guest has no vx. With + * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format. + */ + if (MACHINE_HAS_VX) vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; if (kvm_is_ucontrol(vcpu->kvm)) -- cgit v1.2.3-58-ga151 From 6fd8e67dd83437118cf46a4a9c224142004c3d51 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 18 Jan 2016 14:46:34 +0100 Subject: KVM: s390: sync of fp registers via kvm_run As we already store the floating point registers in the vector save area in floating point register format when we don't have MACHINE_HAS_VX, we can directly expose them to user space using a new sync flag. The floating point registers will be valid when KVM_SYNC_FPRS is set. The fpc will also be valid when KVM_SYNC_FPRS is set. Either KVM_SYNC_FPRS or KVM_SYNC_VRS will be enabled, never both. Let's also change two positions where we access vrs, making the code easier to read and one comment superfluous. Suggested-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 8 ++++++-- arch/s390/kvm/kvm-s390.c | 12 +++++++----- 2 files changed, 13 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index fe84bd5fe7ce..347fe5afa419 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -154,6 +154,7 @@ struct kvm_guest_debug_arch { #define KVM_SYNC_PFAULT (1UL << 5) #define KVM_SYNC_VRS (1UL << 6) #define KVM_SYNC_RICCB (1UL << 7) +#define KVM_SYNC_FPRS (1UL << 8) /* definition of registers in kvm_run */ struct kvm_sync_regs { __u64 prefix; /* prefix register */ @@ -168,9 +169,12 @@ struct kvm_sync_regs { __u64 pft; /* pfault token [PFAULT] */ __u64 pfs; /* pfault select [PFAULT] */ __u64 pfc; /* pfault compare [PFAULT] */ - __u64 vrs[32][2]; /* vector registers */ + union { + __u64 vrs[32][2]; /* vector registers (KVM_SYNC_VRS) */ + __u64 fprs[16]; /* fp registers (KVM_SYNC_FPRS) */ + }; __u8 reserved[512]; /* for future vector expansion */ - __u32 fpc; /* only valid with vector registers */ + __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ __u8 padding[52]; /* riccb needs to be 64byte aligned */ __u8 riccb[64]; /* runtime instrumentation controls block */ }; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8fcdf9836321..2270fe4c8b71 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1419,6 +1419,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) */ if (MACHINE_HAS_VX) vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; + else + vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; if (kvm_is_ucontrol(vcpu->kvm)) return __kvm_ucontrol_vcpu_init(vcpu); @@ -1433,10 +1435,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; - /* Depending on MACHINE_HAS_VX, data stored to vrs either - * has vector register or floating point register format. - */ - current->thread.fpu.regs = vcpu->run->s.regs.vrs; + if (MACHINE_HAS_VX) + current->thread.fpu.regs = vcpu->run->s.regs.vrs; + else + current->thread.fpu.regs = vcpu->run->s.regs.fprs; current->thread.fpu.fpc = vcpu->run->s.regs.fpc; if (test_fp_ctl(current->thread.fpu.fpc)) /* User space provided an invalid FPC, let's clear it */ @@ -2389,7 +2391,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) fprs, 128); } else { rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, - vcpu->run->s.regs.vrs, 128); + vcpu->run->s.regs.fprs, 128); } rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA, vcpu->run->s.regs.gprs, 128); -- cgit v1.2.3-58-ga151 From 0e8bc06a2fbb4d6b688baa8e2416cd07f9453595 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 4 Nov 2015 13:47:58 +0100 Subject: KVM: s390: PSW forwarding / rewinding / ilc rework We have some confusion about ilc vs. ilen in our current code. So let's correctly use the term ilen when dealing with (ilc << 1). Program irq injection didn't take care of the correct ilc in case of irqs triggered by EXECUTE functions, let's provide one function kvm_s390_get_ilen() to take care of all that. Also, manually specifying in intercept handlers the size of the instruction (and sometimes overwriting that value for EXECUTE internally) doesn't make too much sense. So also provide the functions: - kvm_s390_retry_instr to retry the currently intercepted instruction - kvm_s390_rewind_psw to rewind the PSW without internal overwrites - kvm_s390_forward_psw to forward the PSW Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/intercept.c | 31 +++++++++++++++++++++++-------- arch/s390/kvm/interrupt.c | 28 ++++++---------------------- arch/s390/kvm/kvm-s390.c | 2 +- arch/s390/kvm/kvm-s390.h | 17 ++++++++++++++++- arch/s390/kvm/priv.c | 8 ++++---- 5 files changed, 50 insertions(+), 36 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index d53c10753c46..7f992e02124a 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -38,17 +38,32 @@ static const intercept_handler_t instruction_handlers[256] = { [0xeb] = kvm_s390_handle_eb, }; -void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc) +u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) { struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; + u8 ilen = 0; - /* Use the length of the EXECUTE instruction if necessary */ - if (sie_block->icptstatus & 1) { - ilc = (sie_block->icptstatus >> 4) & 0x6; - if (!ilc) - ilc = 4; + switch (vcpu->arch.sie_block->icptcode) { + case ICPT_INST: + case ICPT_INSTPROGI: + case ICPT_OPEREXC: + case ICPT_PARTEXEC: + case ICPT_IOINST: + /* instruction only stored for these icptcodes */ + ilen = insn_length(vcpu->arch.sie_block->ipa >> 8); + /* Use the length of the EXECUTE instruction if necessary */ + if (sie_block->icptstatus & 1) { + ilen = (sie_block->icptstatus >> 4) & 0x6; + if (!ilen) + ilen = 4; + } + break; + case ICPT_PROGI: + /* bit 1+2 of pgmilc are the ilc, so we directly get ilen */ + ilen = vcpu->arch.sie_block->pgmilc & 0x6; + break; } - sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilc); + return ilen; } static int handle_noop(struct kvm_vcpu *vcpu) @@ -318,7 +333,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) if (rc != 0) return rc; - kvm_s390_rewind_psw(vcpu, 4); + kvm_s390_retry_instr(vcpu); return 0; } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index f88ca72c3a77..daa4fdbcc91c 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -335,23 +335,6 @@ static void set_intercept_indicators(struct kvm_vcpu *vcpu) set_intercept_indicators_stop(vcpu); } -static u16 get_ilc(struct kvm_vcpu *vcpu) -{ - switch (vcpu->arch.sie_block->icptcode) { - case ICPT_INST: - case ICPT_INSTPROGI: - case ICPT_OPEREXC: - case ICPT_PARTEXEC: - case ICPT_IOINST: - /* last instruction only stored for these icptcodes */ - return insn_length(vcpu->arch.sie_block->ipa >> 8); - case ICPT_PROGI: - return vcpu->arch.sie_block->pgmilc; - default: - return 0; - } -} - static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; @@ -588,7 +571,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_pgm_info pgm_info; int rc = 0, nullifying = false; - u16 ilc = get_ilc(vcpu); + u16 ilen = kvm_s390_get_ilen(vcpu); spin_lock(&li->lock); pgm_info = li->irq.pgm; @@ -596,8 +579,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) memset(&li->irq.pgm, 0, sizeof(pgm_info)); spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d", - pgm_info.code, ilc); + VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d", + pgm_info.code, ilen); vcpu->stat.deliver_program_int++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, pgm_info.code, 0); @@ -682,9 +665,10 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) } if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST) - kvm_s390_rewind_psw(vcpu, ilc); + kvm_s390_rewind_psw(vcpu, ilen); - rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); + /* bit 1+2 of the target are the ilc, so we can directly use ilen */ + rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC); rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea, (u64 *) __LC_LAST_BREAK); rc |= put_guest_lc(vcpu, pgm_info.code, diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2270fe4c8b71..cd84a3eeb214 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2181,7 +2181,7 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) rc = read_guest(vcpu, psw->addr, 0, &opcode, 1); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); - psw->addr = __rewind_psw(*psw, -insn_length(opcode)); + kvm_s390_forward_psw(vcpu, insn_length(opcode)); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index df1abada1f36..1c756c7dd0c2 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -19,6 +19,7 @@ #include #include #include +#include typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); @@ -212,8 +213,22 @@ int kvm_s390_reinject_io_int(struct kvm *kvm, int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); /* implemented in intercept.c */ -void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc); +u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu); int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); +static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen) +{ + struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; + + sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen); +} +static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen) +{ + kvm_s390_rewind_psw(vcpu, -ilen); +} +static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu) +{ + kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu)); +} /* implemented in priv.c */ int is_valid_psw(psw_t *psw); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index ed74e86d9b9e..d58cbe9813db 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -173,7 +173,7 @@ static int handle_skey(struct kvm_vcpu *vcpu) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - kvm_s390_rewind_psw(vcpu, 4); + kvm_s390_retry_instr(vcpu); VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); return 0; } @@ -184,7 +184,7 @@ static int handle_ipte_interlock(struct kvm_vcpu *vcpu) if (psw_bits(vcpu->arch.sie_block->gpsw).p) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); - kvm_s390_rewind_psw(vcpu, 4); + kvm_s390_retry_instr(vcpu); VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); return 0; } @@ -759,8 +759,8 @@ static int handle_essa(struct kvm_vcpu *vcpu) if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - /* Rewind PSW to repeat the ESSA instruction */ - kvm_s390_rewind_psw(vcpu, 4); + /* Retry the ESSA instruction */ + kvm_s390_retry_instr(vcpu); vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); down_read(&gmap->mm->mmap_sem); -- cgit v1.2.3-58-ga151 From 634790b82759c98ee57c80966d859083fa2fcd8c Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 4 Nov 2015 16:33:33 +0100 Subject: KVM: s390: migration / injection of prog irq ilc We have to migrate the program irq ilc and someday we will have to specify the ilc without KVM trying to autodetect the value. Let's reuse one of the spare fields in our program irq that should always be set to 0 by user space. Because we also want to make use of 0 ilcs ("not available"), we need a validity indicator. If no valid ilc is given, we try to autodetect the ilc via the current icptcode and icptstatus + parameter and store the valid ilc in the irq structure. This has a nice effect: QEMU's making use of KVM_S390_IRQ / KVM_S390_SET_IRQ_STATE / KVM_S390_GET_IRQ_STATE for migration will directly migrate the ilc without any changes. Please note that we use bit 0 as validity and bit 1,2 for the ilc, so by applying the ilc mask we directly get the ilen which is usually what we work with. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 12 +++++++++++- include/uapi/linux/kvm.h | 7 ++++++- 2 files changed, 17 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index daa4fdbcc91c..e594a7830022 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -571,7 +571,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_pgm_info pgm_info; int rc = 0, nullifying = false; - u16 ilen = kvm_s390_get_ilen(vcpu); + u16 ilen; spin_lock(&li->lock); pgm_info = li->irq.pgm; @@ -579,6 +579,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) memset(&li->irq.pgm, 0, sizeof(pgm_info)); spin_unlock(&li->lock); + ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK; VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d", pgm_info.code, ilen); vcpu->stat.deliver_program_int++; @@ -1043,8 +1044,16 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, irq->u.pgm.code, 0); + if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) { + /* auto detection if no valid ILC was given */ + irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK; + irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu); + irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID; + } + if (irq->u.pgm.code == PGM_PER) { li->irq.pgm.code |= PGM_PER; + li->irq.pgm.flags = irq->u.pgm.flags; /* only modify PER related information */ li->irq.pgm.per_address = irq->u.pgm.per_address; li->irq.pgm.per_code = irq->u.pgm.per_code; @@ -1053,6 +1062,7 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) } else if (!(irq->u.pgm.code & PGM_PER)) { li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) | irq->u.pgm.code; + li->irq.pgm.flags = irq->u.pgm.flags; /* only modify non-PER information */ li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code; li->irq.pgm.mon_code = irq->u.pgm.mon_code; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 9da905157cee..4e20a40bb10f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -541,7 +541,12 @@ struct kvm_s390_pgm_info { __u8 exc_access_id; __u8 per_access_id; __u8 op_access_id; - __u8 pad[3]; +#define KVM_S390_PGM_FLAGS_ILC_VALID 0x01 +#define KVM_S390_PGM_FLAGS_ILC_0 0x02 +#define KVM_S390_PGM_FLAGS_ILC_1 0x04 +#define KVM_S390_PGM_FLAGS_ILC_MASK 0x06 + __u8 flags; + __u8 pad[2]; }; struct kvm_s390_prefix_info { -- cgit v1.2.3-58-ga151 From 92c9632119b67f3e201240f6813cd0343bfb0141 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 16 Nov 2015 15:42:11 +0100 Subject: KVM: s390: gaccess: introduce access modes We will need special handling when fetching instructions, so let's introduce new guest access modes GACC_FETCH and GACC_STORE instead of a write flag. An additional patch will then introduce GACC_IFETCH. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/gaccess.c | 46 +++++++++++++++++++++++----------------------- arch/s390/kvm/gaccess.h | 17 +++++++++++------ arch/s390/kvm/intercept.c | 4 ++-- arch/s390/kvm/kvm-s390.c | 6 ++++-- arch/s390/kvm/priv.c | 5 +++-- 5 files changed, 43 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index d30db40437dc..c72ad9157414 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -373,7 +373,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu) } static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, - int write) + enum gacc_mode mode) { union alet alet; struct ale ale; @@ -454,7 +454,7 @@ static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, } } - if (ale.fo == 1 && write) + if (ale.fo == 1 && mode == GACC_STORE) return PGM_PROTECTION; asce->val = aste.asce; @@ -477,7 +477,7 @@ enum { }; static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, - ar_t ar, int write) + ar_t ar, enum gacc_mode mode) { int rc; psw_t *psw = &vcpu->arch.sie_block->gpsw; @@ -486,7 +486,7 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, memset(pgm, 0, sizeof(*pgm)); tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; - tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; + tec_bits->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH; tec_bits->as = psw_bits(*psw).as; if (!psw_bits(*psw).t) { @@ -506,7 +506,7 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, asce->val = vcpu->arch.sie_block->gcr[13]; return 0; case PSW_AS_ACCREG: - rc = ar_translation(vcpu, asce, ar, write); + rc = ar_translation(vcpu, asce, ar, mode); switch (rc) { case PGM_ALEN_TRANSLATION: case PGM_ALE_SEQUENCE: @@ -538,7 +538,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) * @gva: guest virtual address * @gpa: points to where guest physical (absolute) address should be stored * @asce: effective asce - * @write: indicates if access is a write access + * @mode: indicates the access mode to be used * * Translate a guest virtual address into a guest absolute address by means * of dynamic address translation as specified by the architecture. @@ -554,7 +554,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) */ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, unsigned long *gpa, const union asce asce, - int write) + enum gacc_mode mode) { union vaddress vaddr = {.addr = gva}; union raddress raddr = {.addr = gva}; @@ -699,7 +699,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, real_address: raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr); absolute_address: - if (write && dat_protection) + if (mode == GACC_STORE && dat_protection) return PGM_PROTECTION; if (kvm_is_error_gpa(vcpu->kvm, raddr.addr)) return PGM_ADDRESSING; @@ -728,7 +728,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu, static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, unsigned long *pages, unsigned long nr_pages, - const union asce asce, int write) + const union asce asce, enum gacc_mode mode) { struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; psw_t *psw = &vcpu->arch.sie_block->gpsw; @@ -740,13 +740,13 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, while (nr_pages) { ga = kvm_s390_logical_to_effective(vcpu, ga); tec_bits->addr = ga >> PAGE_SHIFT; - if (write && lap_enabled && is_low_address(ga)) { + if (mode == GACC_STORE && lap_enabled && is_low_address(ga)) { pgm->code = PGM_PROTECTION; return pgm->code; } ga &= PAGE_MASK; if (psw_bits(*psw).t) { - rc = guest_translate(vcpu, ga, pages, asce, write); + rc = guest_translate(vcpu, ga, pages, asce, mode); if (rc < 0) return rc; if (rc == PGM_PROTECTION) @@ -768,7 +768,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, } int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, - unsigned long len, int write) + unsigned long len, enum gacc_mode mode) { psw_t *psw = &vcpu->arch.sie_block->gpsw; unsigned long _len, nr_pages, gpa, idx; @@ -780,7 +780,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, if (!len) return 0; - rc = get_vcpu_asce(vcpu, &asce, ar, write); + rc = get_vcpu_asce(vcpu, &asce, ar, mode); if (rc) return rc; nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; @@ -792,11 +792,11 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, need_ipte_lock = psw_bits(*psw).t && !asce.r; if (need_ipte_lock) ipte_lock(vcpu); - rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write); + rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, mode); for (idx = 0; idx < nr_pages && !rc; idx++) { gpa = *(pages + idx) + (ga & ~PAGE_MASK); _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); - if (write) + if (mode == GACC_STORE) rc = kvm_write_guest(vcpu->kvm, gpa, data, _len); else rc = kvm_read_guest(vcpu->kvm, gpa, data, _len); @@ -812,7 +812,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, } int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, - void *data, unsigned long len, int write) + void *data, unsigned long len, enum gacc_mode mode) { unsigned long _len, gpa; int rc = 0; @@ -820,7 +820,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, while (len && !rc) { gpa = kvm_s390_real_to_abs(vcpu, gra); _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); - if (write) + if (mode) rc = write_guest_abs(vcpu, gpa, data, _len); else rc = read_guest_abs(vcpu, gpa, data, _len); @@ -841,7 +841,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, * has to take care of this. */ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, - unsigned long *gpa, int write) + unsigned long *gpa, enum gacc_mode mode) { struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; psw_t *psw = &vcpu->arch.sie_block->gpsw; @@ -851,19 +851,19 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, gva = kvm_s390_logical_to_effective(vcpu, gva); tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; - rc = get_vcpu_asce(vcpu, &asce, ar, write); + rc = get_vcpu_asce(vcpu, &asce, ar, mode); tec->addr = gva >> PAGE_SHIFT; if (rc) return rc; if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { - if (write) { + if (mode == GACC_STORE) { rc = pgm->code = PGM_PROTECTION; return rc; } } if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ - rc = guest_translate(vcpu, gva, gpa, asce, write); + rc = guest_translate(vcpu, gva, gpa, asce, mode); if (rc > 0) { if (rc == PGM_PROTECTION) tec->b61 = 1; @@ -883,7 +883,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, * check_gva_range - test a range of guest virtual addresses for accessibility */ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, - unsigned long length, int is_write) + unsigned long length, enum gacc_mode mode) { unsigned long gpa; unsigned long currlen; @@ -892,7 +892,7 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, ipte_lock(vcpu); while (length > 0 && !rc) { currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE)); - rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write); + rc = guest_translate_address(vcpu, gva, ar, &gpa, mode); gva += currlen; length -= currlen; } diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index ef03726cc661..2a6f8bfd22f8 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -155,16 +155,21 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, return kvm_read_guest(vcpu->kvm, gpa, data, len); } +enum gacc_mode { + GACC_FETCH, + GACC_STORE, +}; + int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, - ar_t ar, unsigned long *gpa, int write); + ar_t ar, unsigned long *gpa, enum gacc_mode mode); int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, - unsigned long length, int is_write); + unsigned long length, enum gacc_mode mode); int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, - unsigned long len, int write); + unsigned long len, enum gacc_mode mode); int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, - void *data, unsigned long len, int write); + void *data, unsigned long len, enum gacc_mode mode); /** * write_guest - copy data from kernel space to guest space @@ -215,7 +220,7 @@ static inline __must_check int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, unsigned long len) { - return access_guest(vcpu, ga, ar, data, len, 1); + return access_guest(vcpu, ga, ar, data, len, GACC_STORE); } /** @@ -235,7 +240,7 @@ static inline __must_check int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, unsigned long len) { - return access_guest(vcpu, ga, ar, data, len, 0); + return access_guest(vcpu, ga, ar, data, len, GACC_FETCH); } /** diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 7f992e02124a..44bb923a6482 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -317,7 +317,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) /* Make sure that the source is paged-in */ rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], - reg2, &srcaddr, 0); + reg2, &srcaddr, GACC_FETCH); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); @@ -326,7 +326,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) /* Make sure that the destination is paged-in */ rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], - reg1, &dstaddr, 1); + reg1, &dstaddr, GACC_STORE); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index cd84a3eeb214..85e169b8e90d 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2610,7 +2610,8 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, switch (mop->op) { case KVM_S390_MEMOP_LOGICAL_READ: if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { - r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false); + r = check_gva_range(vcpu, mop->gaddr, mop->ar, + mop->size, GACC_FETCH); break; } r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); @@ -2621,7 +2622,8 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, break; case KVM_S390_MEMOP_LOGICAL_WRITE: if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { - r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true); + r = check_gva_range(vcpu, mop->gaddr, mop->ar, + mop->size, GACC_STORE); break; } if (copy_from_user(tmpbuf, uaddr, mop->size)) { diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d58cbe9813db..add990945986 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -981,11 +981,12 @@ static int handle_tprot(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) ipte_lock(vcpu); - ret = guest_translate_address(vcpu, address1, ar, &gpa, 1); + ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE); if (ret == PGM_PROTECTION) { /* Write protected? Try again with read-only... */ cc = 1; - ret = guest_translate_address(vcpu, address1, ar, &gpa, 0); + ret = guest_translate_address(vcpu, address1, ar, &gpa, + GACC_FETCH); } if (ret) { if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { -- cgit v1.2.3-58-ga151 From 34346b9a9388b28c3b4a865f8b4f98863f05df81 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 16 Nov 2015 15:48:59 +0100 Subject: KVM: s390: gaccess: implement instruction fetching mode When an instruction is to be fetched, special handling applies to secondary-space mode and access-register mode. The instruction is to be fetched from primary space. We can easily support this by selecting the right asce for translation. Access registers will never be used during translation, so don't include them in the interface. As we only want to read from the current PSW address for now, let's also hide that detail. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/gaccess.c | 11 +++++++---- arch/s390/kvm/gaccess.h | 21 +++++++++++++++++++++ 2 files changed, 28 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index c72ad9157414..66938d283b77 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -480,22 +480,25 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, enum gacc_mode mode) { int rc; - psw_t *psw = &vcpu->arch.sie_block->gpsw; + struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; struct trans_exc_code_bits *tec_bits; memset(pgm, 0, sizeof(*pgm)); tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; tec_bits->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH; - tec_bits->as = psw_bits(*psw).as; + tec_bits->as = psw.as; - if (!psw_bits(*psw).t) { + if (!psw.t) { asce->val = 0; asce->r = 1; return 0; } - switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { + if (mode == GACC_IFETCH) + psw.as = psw.as == PSW_AS_HOME ? PSW_AS_HOME : PSW_AS_PRIMARY; + + switch (psw.as) { case PSW_AS_PRIMARY: asce->val = vcpu->arch.sie_block->gcr[1]; return 0; diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 2a6f8bfd22f8..df0a79dd8159 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -158,6 +158,7 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, enum gacc_mode { GACC_FETCH, GACC_STORE, + GACC_IFETCH, }; int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, @@ -243,6 +244,26 @@ int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, return access_guest(vcpu, ga, ar, data, len, GACC_FETCH); } +/** + * read_guest_instr - copy instruction data from guest space to kernel space + * @vcpu: virtual cpu + * @data: destination address in kernel space + * @len: number of bytes to copy + * + * Copy @len bytes from the current psw address (guest space) to @data (kernel + * space). + * + * The behaviour of read_guest_instr is identical to read_guest, except that + * instruction data will be read from primary space when in home-space or + * address-space mode. + */ +static inline __must_check +int read_guest_instr(struct kvm_vcpu *vcpu, void *data, unsigned long len) +{ + return access_guest(vcpu, vcpu->arch.sie_block->gpsw.addr, 0, data, len, + GACC_IFETCH); +} + /** * write_guest_abs - copy data from kernel space to guest space absolute * @vcpu: virtual cpu -- cgit v1.2.3-58-ga151 From 659773227506f7b971cfd52e1aa146c896405187 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 16 Nov 2015 16:17:45 +0100 Subject: KVM: s390: read the correct opcode on SIE faults Let's use our fresh new function read_guest_instr() to access guest storage via the correct addressing schema. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 85e169b8e90d..aa51a8d5179f 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2163,7 +2163,6 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) { - psw_t *psw = &vcpu->arch.sie_block->gpsw; u8 opcode; int rc; @@ -2178,7 +2177,7 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) * to look up the current opcode to get the length of the instruction * to be able to forward the PSW. */ - rc = read_guest(vcpu, psw->addr, 0, &opcode, 1); + rc = read_guest_instr(vcpu, &opcode, 1); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); kvm_s390_forward_psw(vcpu, insn_length(opcode)); -- cgit v1.2.3-58-ga151 From f6af84e7e7eb0558d5ad3882007956eb5b748ffd Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 4 Nov 2015 16:22:19 +0100 Subject: KVM: s390: clean up prog irq injection on prog irq icpts __extract_prog_irq() is used only once for getting the program check data in one place. Let's combine it with an injection function to avoid a memset and to prevent misuse on injection by simplifying the interface to only have the VCPU as parameter. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/intercept.c | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 44bb923a6482..6b4e5b5ff06c 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -136,11 +136,11 @@ static int handle_instruction(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } -static void __extract_prog_irq(struct kvm_vcpu *vcpu, - struct kvm_s390_pgm_info *pgm_info) +static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu) { - memset(pgm_info, 0, sizeof(struct kvm_s390_pgm_info)); - pgm_info->code = vcpu->arch.sie_block->iprcc; + struct kvm_s390_pgm_info pgm_info = { + .code = vcpu->arch.sie_block->iprcc, + }; switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) { case PGM_AFX_TRANSLATION: @@ -153,7 +153,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu, case PGM_PRIMARY_AUTHORITY: case PGM_SECONDARY_AUTHORITY: case PGM_SPACE_SWITCH: - pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; + pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc; break; case PGM_ALEN_TRANSLATION: case PGM_ALE_SEQUENCE: @@ -161,7 +161,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu, case PGM_ASTE_SEQUENCE: case PGM_ASTE_VALIDITY: case PGM_EXTENDED_AUTHORITY: - pgm_info->exc_access_id = vcpu->arch.sie_block->eai; + pgm_info.exc_access_id = vcpu->arch.sie_block->eai; break; case PGM_ASCE_TYPE: case PGM_PAGE_TRANSLATION: @@ -169,32 +169,33 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu, case PGM_REGION_SECOND_TRANS: case PGM_REGION_THIRD_TRANS: case PGM_SEGMENT_TRANSLATION: - pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; - pgm_info->exc_access_id = vcpu->arch.sie_block->eai; - pgm_info->op_access_id = vcpu->arch.sie_block->oai; + pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc; + pgm_info.exc_access_id = vcpu->arch.sie_block->eai; + pgm_info.op_access_id = vcpu->arch.sie_block->oai; break; case PGM_MONITOR: - pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn; - pgm_info->mon_code = vcpu->arch.sie_block->tecmc; + pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn; + pgm_info.mon_code = vcpu->arch.sie_block->tecmc; break; case PGM_VECTOR_PROCESSING: case PGM_DATA: - pgm_info->data_exc_code = vcpu->arch.sie_block->dxc; + pgm_info.data_exc_code = vcpu->arch.sie_block->dxc; break; case PGM_PROTECTION: - pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; - pgm_info->exc_access_id = vcpu->arch.sie_block->eai; + pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc; + pgm_info.exc_access_id = vcpu->arch.sie_block->eai; break; default: break; } if (vcpu->arch.sie_block->iprcc & PGM_PER) { - pgm_info->per_code = vcpu->arch.sie_block->perc; - pgm_info->per_atmid = vcpu->arch.sie_block->peratmid; - pgm_info->per_address = vcpu->arch.sie_block->peraddr; - pgm_info->per_access_id = vcpu->arch.sie_block->peraid; + pgm_info.per_code = vcpu->arch.sie_block->perc; + pgm_info.per_atmid = vcpu->arch.sie_block->peratmid; + pgm_info.per_address = vcpu->arch.sie_block->peraddr; + pgm_info.per_access_id = vcpu->arch.sie_block->peraid; } + return kvm_s390_inject_prog_irq(vcpu, &pgm_info); } /* @@ -223,7 +224,6 @@ static int handle_itdb(struct kvm_vcpu *vcpu) static int handle_prog(struct kvm_vcpu *vcpu) { - struct kvm_s390_pgm_info pgm_info; psw_t psw; int rc; @@ -249,8 +249,7 @@ static int handle_prog(struct kvm_vcpu *vcpu) if (rc) return rc; - __extract_prog_irq(vcpu, &pgm_info); - return kvm_s390_inject_prog_irq(vcpu, &pgm_info); + return inject_prog_on_prog_intercept(vcpu); } /** -- cgit v1.2.3-58-ga151 From eaa4f41642f096f1e10c15a2b172d79199e893ff Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 4 Nov 2015 16:46:55 +0100 Subject: KVM: s390: irq delivery should not rely on icptcode Program irq injection during program irq intercepts is the last candidates that injects nullifying irqs and relies on delivery to do the right thing. As we should not rely on the icptcode during any delivery (because that value will not be migrated), let's add a flag, telling prog IRQ delivery to not rewind the PSW in case of nullifying prog IRQs. Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/intercept.c | 2 ++ arch/s390/kvm/interrupt.c | 2 +- include/uapi/linux/kvm.h | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 6b4e5b5ff06c..2e6b54e4d3f9 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -140,6 +140,8 @@ static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu) { struct kvm_s390_pgm_info pgm_info = { .code = vcpu->arch.sie_block->iprcc, + /* the PSW has already been rewound */ + .flags = KVM_S390_PGM_FLAGS_NO_REWIND, }; switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) { diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index e594a7830022..87e2d1a89d74 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -665,7 +665,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) (u8 *) __LC_PER_ACCESS_ID); } - if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST) + if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND)) kvm_s390_rewind_psw(vcpu, ilen); /* bit 1+2 of the target are the ilc, so we can directly use ilen */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 4e20a40bb10f..a2fe0ac1d61a 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -545,6 +545,7 @@ struct kvm_s390_pgm_info { #define KVM_S390_PGM_FLAGS_ILC_0 0x02 #define KVM_S390_PGM_FLAGS_ILC_1 0x04 #define KVM_S390_PGM_FLAGS_ILC_MASK 0x06 +#define KVM_S390_PGM_FLAGS_NO_REWIND 0x08 __u8 flags; __u8 pad[2]; }; -- cgit v1.2.3-58-ga151 From 5631792053f094a8e2f01d5ddcc2550ad4da22f0 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 12 Jan 2016 17:37:58 +0100 Subject: KVM: s390: provide prog irq ilc on SIE faults On SIE faults, the ilc cannot be detected automatically, as the icptcode is 0. The ilc indicated in the program irq will always be 0. Therefore we have to manually specify the ilc in order to tell the guest which ilen was used when forwarding the PSW. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index aa51a8d5179f..12cec63eda27 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2163,7 +2163,10 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) { - u8 opcode; + struct kvm_s390_pgm_info pgm_info = { + .code = PGM_ADDRESSING, + }; + u8 opcode, ilen; int rc; VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); @@ -2180,9 +2183,10 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) rc = read_guest_instr(vcpu, &opcode, 1); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); - kvm_s390_forward_psw(vcpu, insn_length(opcode)); - - return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + ilen = insn_length(opcode); + pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID; + kvm_s390_forward_psw(vcpu, ilen); + return kvm_s390_inject_prog_irq(vcpu, &pgm_info); } static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) -- cgit v1.2.3-58-ga151 From 9b0d721a07a2d92c79362dda8e6d896b2c107ce6 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 12 Jan 2016 17:40:54 +0100 Subject: KVM: s390: instruction-fetching exceptions on SIE faults On instruction-fetch exceptions, we have to forward the PSW by any valid ilc and correctly use that ilc when injecting the irq. Injection will already take care of rewinding the PSW if we injected a nullifying program irq, so we don't need special handling prior to injection. Until now, autodetection would have guessed an ilc of 0. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 12cec63eda27..d0dcf73f36bc 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2181,9 +2181,17 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) * to be able to forward the PSW. */ rc = read_guest_instr(vcpu, &opcode, 1); - if (rc) - return kvm_s390_inject_prog_cond(vcpu, rc); ilen = insn_length(opcode); + if (rc < 0) { + return rc; + } else if (rc) { + /* Instruction-Fetching Exceptions - we can't detect the ilen. + * Forward by arbitrary ilc, injection will take care of + * nullification if necessary. + */ + pgm_info = vcpu->arch.pgm; + ilen = 4; + } pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID; kvm_s390_forward_psw(vcpu, ilen); return kvm_s390_inject_prog_irq(vcpu, &pgm_info); -- cgit v1.2.3-58-ga151 From efa48163b8564573fbc28c0e84e4a278442e5fe1 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 14 Jan 2016 22:15:08 +0100 Subject: KVM: s390: remove old fragment of vector registers Since commit 9977e886cbbc ("s390/kernel: lazy restore fpu registers"), vregs in struct sie_page is unsed. We can safely remove the field and the definition. Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 8959ebb6d2c9..727e7f7b33fd 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -229,17 +229,11 @@ struct kvm_s390_itdb { __u8 data[256]; } __packed; -struct kvm_s390_vregs { - __vector128 vrs[32]; - __u8 reserved200[512]; /* for future vector expansion */ -} __packed; - struct sie_page { struct kvm_s390_sie_block sie_block; __u8 reserved200[1024]; /* 0x0200 */ struct kvm_s390_itdb itdb; /* 0x0600 */ - __u8 reserved700[1280]; /* 0x0700 */ - struct kvm_s390_vregs vregs; /* 0x0c00 */ + __u8 reserved700[2304]; /* 0x0700 */ } __packed; struct kvm_vcpu_stat { -- cgit v1.2.3-58-ga151 From ab99a1cc7a405fed5148cf6a6fc26eec75b8a7a7 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Wed, 3 Feb 2016 10:18:41 +0100 Subject: KVM: s390: do not take mmap_sem on dirty log query Dirty log query can take a long time for huge guests. Holding the mmap_sem for very long times can cause some unwanted latencies. Turns out that we do not need to hold the mmap semaphore. We hold the slots_lock for gfn->hva translation and walk the page tables with that address, so no need to look at the VMAs. KVM also holds a reference to the mm, which should prevent other things going away. During the walk we take the necessary ptl locks. Signed-off-by: Christian Borntraeger Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d0dcf73f36bc..d4bcd863b24a 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -274,7 +274,6 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm, unsigned long address; struct gmap *gmap = kvm->arch.gmap; - down_read(&gmap->mm->mmap_sem); /* Loop over all guest pages */ last_gfn = memslot->base_gfn + memslot->npages; for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { @@ -283,7 +282,6 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm, if (gmap_test_and_clear_dirty(address, gmap)) mark_page_dirty(kvm, cur_gfn); } - up_read(&gmap->mm->mmap_sem); } /* Section: vm related */ -- cgit v1.2.3-58-ga151 From 70c88a00fbf65990b6268dabd305113460fe94c1 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Tue, 2 Feb 2016 15:15:56 +0100 Subject: KVM: s390: do not block CPU on dirty logging When doing dirty logging on huge guests (e.g.600GB) we sometimes get rcu stall timeouts with backtraces like [ 2753.194083] ([<0000000000112fb2>] show_trace+0x12a/0x130) [ 2753.194092] [<0000000000113024>] show_stack+0x6c/0xe8 [ 2753.194094] [<00000000001ee6a8>] rcu_pending+0x358/0xa48 [ 2753.194099] [<00000000001f20cc>] rcu_check_callbacks+0x84/0x168 [ 2753.194102] [<0000000000167654>] update_process_times+0x54/0x80 [ 2753.194107] [<00000000001bdb5c>] tick_sched_handle.isra.16+0x4c/0x60 [ 2753.194113] [<00000000001bdbd8>] tick_sched_timer+0x68/0x90 [ 2753.194115] [<0000000000182a88>] __run_hrtimer+0x88/0x1f8 [ 2753.194119] [<00000000001838ba>] hrtimer_interrupt+0x122/0x2b0 [ 2753.194121] [<000000000010d034>] do_extint+0x16c/0x170 [ 2753.194123] [<00000000005e206e>] ext_skip+0x38/0x3e [ 2753.194129] [<000000000012157c>] gmap_test_and_clear_dirty+0xcc/0x118 [ 2753.194134] ([<00000000001214ea>] gmap_test_and_clear_dirty+0x3a/0x118) [ 2753.194137] [<0000000000132da4>] kvm_vm_ioctl_get_dirty_log+0xd4/0x1b0 [ 2753.194143] [<000000000012ac12>] kvm_vm_ioctl+0x21a/0x548 [ 2753.194146] [<00000000002b57f6>] do_vfs_ioctl+0x30e/0x518 [ 2753.194149] [<00000000002b5a9c>] SyS_ioctl+0x9c/0xb0 [ 2753.194151] [<00000000005e1ae6>] sysc_tracego+0x14/0x1a [ 2753.194153] [<000003ffb75f3972>] 0x3ffb75f3972 We should do a cond_resched in here. Signed-off-by: Christian Borntraeger Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d4bcd863b24a..bb99ca28eb66 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -281,6 +281,7 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm, if (gmap_test_and_clear_dirty(address, gmap)) mark_page_dirty(kvm, cur_gfn); + cond_resched(); } } -- cgit v1.2.3-58-ga151 From 1763f8d09d522b3ac998229dcf038476e88b78fc Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Wed, 3 Feb 2016 11:12:34 +0100 Subject: KVM: s390: bail out early on fatal signal in dirty logging A KVM_GET_DIRTY_LOG ioctl might take a long time. This can result in fatal signals seemingly being ignored. Lets bail out during the dirty bit sync, if a fatal signal is pending. Signed-off-by: Christian Borntraeger Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index bb99ca28eb66..28bd5ea1b08f 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -281,6 +281,8 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm, if (gmap_test_and_clear_dirty(address, gmap)) mark_page_dirty(kvm, cur_gfn); + if (fatal_signal_pending(current)) + return; cond_resched(); } } -- cgit v1.2.3-58-ga151 From 178a787502123b01499c5a4617b94bb69ad49dd5 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Mon, 1 Feb 2016 11:14:15 +1100 Subject: vfio: Enable VFIO device for powerpc ec53500f "kvm: Add VFIO device" added a special KVM pseudo-device which is used to handle any necessary interactions between KVM and VFIO. Currently that device is built on x86 and ARM, but not powerpc, although powerpc does support both KVM and VFIO. This makes things awkward in userspace Currently qemu prints an alarming error message if you attempt to use VFIO and it can't initialize the KVM VFIO device. We don't want to remove the warning, because lack of the KVM VFIO device could mean coherency problems on x86. On powerpc, however, the error is harmless but looks disturbing, and a test based on host architecture in qemu would be ugly, and break if we do need the KVM VFIO device for something important in future. There's nothing preventing the KVM VFIO device from being built for powerpc, so this patch turns it on. It won't actually do anything, since we don't define any of the arch_*() hooks, but it will make qemu happy and we can extend it in future if we need to. Signed-off-by: David Gibson Reviewed-by: Eric Auger Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 0570eef83fba..7f7b6d86ac73 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -8,7 +8,7 @@ ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm KVM := ../../../virt/kvm common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ - $(KVM)/eventfd.o + $(KVM)/eventfd.o $(KVM)/vfio.o CFLAGS_e500_mmu.o := -I. CFLAGS_e500_mmu_host.o := -I. -- cgit v1.2.3-58-ga151 From e9ab1a1cafb7911df1550a285f2f733ea5920f55 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 15 Feb 2016 12:55:03 +1100 Subject: powerpc: Make vmalloc_to_phys() public This makes vmalloc_to_phys() public as there will be another user (KVM in-kernel VFIO acceleration) for it soon. As this new user can be compiled as a module, this exports the symbol. As a little optimization, this changes the helper to call vmalloc_to_pfn() instead of vmalloc_to_page() as the size of the struct page may not be power-of-two aligned which will make gcc use multiply instructions instead of shifts. Signed-off-by: Alexey Kardashevskiy Acked-by: Michael Ellerman Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/pgtable.h | 3 +++ arch/powerpc/mm/pgtable.c | 8 ++++++++ arch/powerpc/perf/hv-24x7.c | 8 -------- 3 files changed, 11 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index ac9fb114e25d..47897a30982d 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -78,6 +78,9 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, } return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift); } + +unsigned long vmalloc_to_phys(void *vmalloc_addr); + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PGTABLE_H */ diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 83dfd7925c72..de37ff445362 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -243,3 +243,11 @@ void assert_pte_locked(struct mm_struct *mm, unsigned long addr) } #endif /* CONFIG_DEBUG_VM */ +unsigned long vmalloc_to_phys(void *va) +{ + unsigned long pfn = vmalloc_to_pfn(va); + + BUG_ON(!pfn); + return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va); +} +EXPORT_SYMBOL_GPL(vmalloc_to_phys); diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 9f9dfda9ed2c..3b09ecfd0aee 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -493,14 +493,6 @@ static size_t event_to_attr_ct(struct hv_24x7_event_data *event) } } -static unsigned long vmalloc_to_phys(void *v) -{ - struct page *p = vmalloc_to_page(v); - - BUG_ON(!p); - return page_to_phys(p) + offset_in_page(v); -} - /* */ struct event_uniq { struct rb_node node; -- cgit v1.2.3-58-ga151 From fcbb2ce672848481275c1f014ad44ccd1e43a7a2 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 15 Feb 2016 12:55:04 +1100 Subject: KVM: PPC: Rework H_PUT_TCE/H_GET_TCE handlers This reworks the existing H_PUT_TCE/H_GET_TCE handlers to have following patches applied nicer. This moves the ioba boundaries check to a helper and adds a check for least bits which have to be zeros. The patch is pretty mechanical (only check for least ioba bits is added) so no change in behaviour is expected. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_vio_hv.c | 117 +++++++++++++++++++++++------------- 1 file changed, 75 insertions(+), 42 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 89e96b3e0039..f29ba2c63e07 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -35,71 +35,104 @@ #include #include #include +#include #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) +/* + * Finds a TCE table descriptor by LIOBN. + * + * WARNING: This will be called in real or virtual mode on HV KVM and virtual + * mode on PR KVM + */ +static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu, + unsigned long liobn) +{ + struct kvm *kvm = vcpu->kvm; + struct kvmppc_spapr_tce_table *stt; + + list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) + if (stt->liobn == liobn) + return stt; + + return NULL; +} + +/* + * Validates IO address. + * + * WARNING: This will be called in real-mode on HV KVM and virtual + * mode on PR KVM + */ +static long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, + unsigned long ioba, unsigned long npages) +{ + unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1; + unsigned long idx = ioba >> IOMMU_PAGE_SHIFT_4K; + unsigned long size = stt->window_size >> IOMMU_PAGE_SHIFT_4K; + + if ((ioba & mask) || (idx + npages > size) || (idx + npages < idx)) + return H_PARAMETER; + + return H_SUCCESS; +} + /* WARNING: This will be called in real-mode on HV KVM and virtual * mode on PR KVM */ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce) { - struct kvm *kvm = vcpu->kvm; - struct kvmppc_spapr_tce_table *stt; + struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn); + long ret; + unsigned long idx; + struct page *page; + u64 *tbl; /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ /* liobn, ioba, tce); */ - list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { - if (stt->liobn == liobn) { - unsigned long idx = ioba >> SPAPR_TCE_SHIFT; - struct page *page; - u64 *tbl; - - /* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p window_size=0x%x\n", */ - /* liobn, stt, stt->window_size); */ - if (ioba >= stt->window_size) - return H_PARAMETER; - - page = stt->pages[idx / TCES_PER_PAGE]; - tbl = (u64 *)page_address(page); - - /* FIXME: Need to validate the TCE itself */ - /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */ - tbl[idx % TCES_PER_PAGE] = tce; - return H_SUCCESS; - } - } - - /* Didn't find the liobn, punt it to userspace */ - return H_TOO_HARD; + if (!stt) + return H_TOO_HARD; + + ret = kvmppc_ioba_validate(stt, ioba, 1); + if (ret != H_SUCCESS) + return ret; + + idx = ioba >> SPAPR_TCE_SHIFT; + page = stt->pages[idx / TCES_PER_PAGE]; + tbl = (u64 *)page_address(page); + + /* FIXME: Need to validate the TCE itself */ + /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */ + tbl[idx % TCES_PER_PAGE] = tce; + + return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba) { - struct kvm *kvm = vcpu->kvm; - struct kvmppc_spapr_tce_table *stt; + struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn); + long ret; + unsigned long idx; + struct page *page; + u64 *tbl; - list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { - if (stt->liobn == liobn) { - unsigned long idx = ioba >> SPAPR_TCE_SHIFT; - struct page *page; - u64 *tbl; + if (!stt) + return H_TOO_HARD; - if (ioba >= stt->window_size) - return H_PARAMETER; + ret = kvmppc_ioba_validate(stt, ioba, 1); + if (ret != H_SUCCESS) + return ret; - page = stt->pages[idx / TCES_PER_PAGE]; - tbl = (u64 *)page_address(page); + idx = ioba >> SPAPR_TCE_SHIFT; + page = stt->pages[idx / TCES_PER_PAGE]; + tbl = (u64 *)page_address(page); - vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; - return H_SUCCESS; - } - } + vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; - /* Didn't find the liobn, punt it to userspace */ - return H_TOO_HARD; + return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_h_get_tce); -- cgit v1.2.3-58-ga151 From 366baf28ee3fc22dea504a0bddf8edd1e9bcee70 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 15 Feb 2016 12:55:05 +1100 Subject: KVM: PPC: Use RCU for arch.spapr_tce_tables At the moment only spapr_tce_tables updates are protected against races but not lookups. This fixes missing protection by using RCU for the list. As lookups also happen in real mode, this uses list_for_each_entry_lockless() (which is expected not to access any vmalloc'd memory). This converts release_spapr_tce_table() to a RCU scheduled handler. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kvm/book3s.c | 2 +- arch/powerpc/kvm/book3s_64_vio.c | 20 +++++++++++--------- arch/powerpc/kvm/book3s_64_vio_hv.c | 2 +- 4 files changed, 14 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 9d08d8cbed1a..ffdbc2dc18f9 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -183,6 +183,7 @@ struct kvmppc_spapr_tce_table { struct kvm *kvm; u64 liobn; u32 window_size; + struct rcu_head rcu; struct page *pages[0]; }; diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 638c6d9be9e0..b34220d2aa42 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -807,7 +807,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) { #ifdef CONFIG_PPC64 - INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); + INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables); INIT_LIST_HEAD(&kvm->arch.rtas_tokens); #endif diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 54cf9bc94dad..9526c34c29c2 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -45,19 +45,16 @@ static long kvmppc_stt_npages(unsigned long window_size) * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; } -static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt) +static void release_spapr_tce_table(struct rcu_head *head) { - struct kvm *kvm = stt->kvm; + struct kvmppc_spapr_tce_table *stt = container_of(head, + struct kvmppc_spapr_tce_table, rcu); int i; - mutex_lock(&kvm->lock); - list_del(&stt->list); for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++) __free_page(stt->pages[i]); - kfree(stt); - mutex_unlock(&kvm->lock); - kvm_put_kvm(kvm); + kfree(stt); } static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf) @@ -88,7 +85,12 @@ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) { struct kvmppc_spapr_tce_table *stt = filp->private_data; - release_spapr_tce_table(stt); + list_del_rcu(&stt->list); + + kvm_put_kvm(stt->kvm); + + call_rcu(&stt->rcu, release_spapr_tce_table); + return 0; } @@ -131,7 +133,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, kvm_get_kvm(kvm); mutex_lock(&kvm->lock); - list_add(&stt->list, &kvm->arch.spapr_tce_tables); + list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables); mutex_unlock(&kvm->lock); diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index f29ba2c63e07..124d69246e11 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -51,7 +51,7 @@ static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu, struct kvm *kvm = vcpu->kvm; struct kvmppc_spapr_tce_table *stt; - list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) + list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list) if (stt->liobn == liobn) return stt; -- cgit v1.2.3-58-ga151 From f8626985c7c2485c423ce9f448028f81535b0ecc Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 15 Feb 2016 12:55:06 +1100 Subject: KVM: PPC: Account TCE-containing pages in locked_vm At the moment pages used for TCE tables (in addition to pages addressed by TCEs) are not counted in locked_vm counter so a malicious userspace tool can call ioctl(KVM_CREATE_SPAPR_TCE) as many times as RLIMIT_NOFILE and lock a lot of memory. This adds counting for pages used for TCE tables. This counts the number of pages required for a table plus pages for the kvmppc_spapr_tce_table struct (TCE table descriptor) itself. This changes release_spapr_tce_table() to store @npages on stack to avoid calling kvmppc_stt_npages() in the loop (tiny optimization, probably). This does not change the amount of used memory. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_vio.c | 63 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 58 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 9526c34c29c2..1a1e14f8572f 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -39,19 +39,65 @@ #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) -static long kvmppc_stt_npages(unsigned long window_size) +static unsigned long kvmppc_tce_pages(unsigned long window_size) { return ALIGN((window_size >> SPAPR_TCE_SHIFT) * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; } +static unsigned long kvmppc_stt_pages(unsigned long tce_pages) +{ + unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) + + (tce_pages * sizeof(struct page *)); + + return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE; +} + +static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc) +{ + long ret = 0; + + if (!current || !current->mm) + return ret; /* process exited */ + + down_write(¤t->mm->mmap_sem); + + if (inc) { + unsigned long locked, lock_limit; + + locked = current->mm->locked_vm + stt_pages; + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) + ret = -ENOMEM; + else + current->mm->locked_vm += stt_pages; + } else { + if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm)) + stt_pages = current->mm->locked_vm; + + current->mm->locked_vm -= stt_pages; + } + + pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid, + inc ? '+' : '-', + stt_pages << PAGE_SHIFT, + current->mm->locked_vm << PAGE_SHIFT, + rlimit(RLIMIT_MEMLOCK), + ret ? " - exceeded" : ""); + + up_write(¤t->mm->mmap_sem); + + return ret; +} + static void release_spapr_tce_table(struct rcu_head *head) { struct kvmppc_spapr_tce_table *stt = container_of(head, struct kvmppc_spapr_tce_table, rcu); int i; + unsigned long npages = kvmppc_tce_pages(stt->window_size); - for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++) + for (i = 0; i < npages; i++) __free_page(stt->pages[i]); kfree(stt); @@ -62,7 +108,7 @@ static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data; struct page *page; - if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size)) + if (vmf->pgoff >= kvmppc_tce_pages(stt->window_size)) return VM_FAULT_SIGBUS; page = stt->pages[vmf->pgoff]; @@ -89,6 +135,8 @@ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) kvm_put_kvm(stt->kvm); + kvmppc_account_memlimit( + kvmppc_stt_pages(kvmppc_tce_pages(stt->window_size)), false); call_rcu(&stt->rcu, release_spapr_tce_table); return 0; @@ -103,7 +151,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce *args) { struct kvmppc_spapr_tce_table *stt = NULL; - long npages; + unsigned long npages; int ret = -ENOMEM; int i; @@ -113,7 +161,12 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, return -EBUSY; } - npages = kvmppc_stt_npages(args->window_size); + npages = kvmppc_tce_pages(args->window_size); + ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); + if (ret) { + stt = NULL; + goto fail; + } stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), GFP_KERNEL); -- cgit v1.2.3-58-ga151 From 462ee11e58c96b81707d98fb1d02a8a3e84290ce Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 15 Feb 2016 12:55:07 +1100 Subject: KVM: PPC: Replace SPAPR_TCE_SHIFT with IOMMU_PAGE_SHIFT_4K SPAPR_TCE_SHIFT is used in few places only and since IOMMU_PAGE_SHIFT_4K can be easily used instead, remove SPAPR_TCE_SHIFT. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_book3s_64.h | 2 -- arch/powerpc/kvm/book3s_64_vio.c | 3 ++- arch/powerpc/kvm/book3s_64_vio_hv.c | 4 ++-- 3 files changed, 4 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 2aa79c864e91..7529aab068f5 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -33,8 +33,6 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) } #endif -#define SPAPR_TCE_SHIFT 12 - #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ #endif diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 1a1e14f8572f..84993d151db3 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -36,12 +36,13 @@ #include #include #include +#include #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) static unsigned long kvmppc_tce_pages(unsigned long window_size) { - return ALIGN((window_size >> SPAPR_TCE_SHIFT) + return ALIGN((window_size >> IOMMU_PAGE_SHIFT_4K) * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; } diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 124d69246e11..0ce4ffb2df12 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -99,7 +99,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, if (ret != H_SUCCESS) return ret; - idx = ioba >> SPAPR_TCE_SHIFT; + idx = ioba >> IOMMU_PAGE_SHIFT_4K; page = stt->pages[idx / TCES_PER_PAGE]; tbl = (u64 *)page_address(page); @@ -127,7 +127,7 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, if (ret != H_SUCCESS) return ret; - idx = ioba >> SPAPR_TCE_SHIFT; + idx = ioba >> IOMMU_PAGE_SHIFT_4K; page = stt->pages[idx / TCES_PER_PAGE]; tbl = (u64 *)page_address(page); -- cgit v1.2.3-58-ga151 From 5ee7af18642ce38c79b35927872f13d292cc3e27 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 15 Feb 2016 12:55:08 +1100 Subject: KVM: PPC: Move reusable bits of H_PUT_TCE handler to helpers Upcoming multi-tce support (H_PUT_TCE_INDIRECT/H_STUFF_TCE hypercalls) will validate TCE (not to have unexpected bits) and IO address (to be within the DMA window boundaries). This introduces helpers to validate TCE and IO address. The helpers are exported as they compile into vmlinux (to work in realmode) and will be used later by KVM kernel module in virtual mode. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_ppc.h | 4 ++ arch/powerpc/kvm/book3s_64_vio_hv.c | 89 ++++++++++++++++++++++++++++++++----- 2 files changed, 83 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 2241d5357129..95139111a929 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -166,6 +166,10 @@ extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce *args); +extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, + unsigned long ioba, unsigned long npages); +extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt, + unsigned long tce); extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce); extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 0ce4ffb2df12..b608fdd0c6f6 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -36,6 +36,7 @@ #include #include #include +#include #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) @@ -64,7 +65,7 @@ static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu, * WARNING: This will be called in real-mode on HV KVM and virtual * mode on PR KVM */ -static long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, +long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, unsigned long ioba, unsigned long npages) { unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1; @@ -76,6 +77,79 @@ static long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, return H_SUCCESS; } +EXPORT_SYMBOL_GPL(kvmppc_ioba_validate); + +/* + * Validates TCE address. + * At the moment flags and page mask are validated. + * As the host kernel does not access those addresses (just puts them + * to the table and user space is supposed to process them), we can skip + * checking other things (such as TCE is a guest RAM address or the page + * was actually allocated). + * + * WARNING: This will be called in real-mode on HV KVM and virtual + * mode on PR KVM + */ +long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) +{ + unsigned long mask = + ~(IOMMU_PAGE_MASK_4K | TCE_PCI_WRITE | TCE_PCI_READ); + + if (tce & mask) + return H_PARAMETER; + + return H_SUCCESS; +} +EXPORT_SYMBOL_GPL(kvmppc_tce_validate); + +/* Note on the use of page_address() in real mode, + * + * It is safe to use page_address() in real mode on ppc64 because + * page_address() is always defined as lowmem_page_address() + * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic + * operation and does not access page struct. + * + * Theoretically page_address() could be defined different + * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL + * would have to be enabled. + * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64, + * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only + * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP + * is not expected to be enabled on ppc32, page_address() + * is safe for ppc32 as well. + * + * WARNING: This will be called in real-mode on HV KVM and virtual + * mode on PR KVM + */ +static u64 *kvmppc_page_address(struct page *page) +{ +#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL) +#error TODO: fix to avoid page_address() here +#endif + return (u64 *) page_address(page); +} + +/* + * Handles TCE requests for emulated devices. + * Puts guest TCE values to the table and expects user space to convert them. + * Called in both real and virtual modes. + * Cannot fail so kvmppc_tce_validate must be called before it. + * + * WARNING: This will be called in real-mode on HV KVM and virtual + * mode on PR KVM + */ +void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, + unsigned long idx, unsigned long tce) +{ + struct page *page; + u64 *tbl; + + page = stt->pages[idx / TCES_PER_PAGE]; + tbl = kvmppc_page_address(page); + + tbl[idx % TCES_PER_PAGE] = tce; +} +EXPORT_SYMBOL_GPL(kvmppc_tce_put); /* WARNING: This will be called in real-mode on HV KVM and virtual * mode on PR KVM @@ -85,9 +159,6 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, { struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn); long ret; - unsigned long idx; - struct page *page; - u64 *tbl; /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ /* liobn, ioba, tce); */ @@ -99,13 +170,11 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, if (ret != H_SUCCESS) return ret; - idx = ioba >> IOMMU_PAGE_SHIFT_4K; - page = stt->pages[idx / TCES_PER_PAGE]; - tbl = (u64 *)page_address(page); + ret = kvmppc_tce_validate(stt, tce); + if (ret != H_SUCCESS) + return ret; - /* FIXME: Need to validate the TCE itself */ - /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */ - tbl[idx % TCES_PER_PAGE] = tce; + kvmppc_tce_put(stt, ioba >> IOMMU_PAGE_SHIFT_4K, tce); return H_SUCCESS; } -- cgit v1.2.3-58-ga151 From d3695aa4f452bc09c834a5010484f65fca37d87c Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 15 Feb 2016 12:55:09 +1100 Subject: KVM: PPC: Add support for multiple-TCE hcalls This adds real and virtual mode handlers for the H_PUT_TCE_INDIRECT and H_STUFF_TCE hypercalls for user space emulated devices such as IBMVIO devices or emulated PCI. These calls allow adding multiple entries (up to 512) into the TCE table in one call which saves time on transition between kernel and user space. The current implementation of kvmppc_h_stuff_tce() allows it to be executed in both real and virtual modes so there is one helper. The kvmppc_rm_h_put_tce_indirect() needs to translate the guest address to the host address and since the translation is different, there are 2 helpers - one for each mode. This implements the KVM_CAP_PPC_MULTITCE capability. When present, the kernel will try handling H_PUT_TCE_INDIRECT and H_STUFF_TCE if these are enabled by the userspace via KVM_CAP_PPC_ENABLE_HCALL. If they can not be handled by the kernel, they are passed on to the user space. The user space still has to have an implementation for these. Both HV and PR-syle KVM are supported. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/api.txt | 25 ++++++ arch/powerpc/include/asm/kvm_ppc.h | 12 +++ arch/powerpc/kvm/book3s_64_vio.c | 60 ++++++++++++- arch/powerpc/kvm/book3s_64_vio_hv.c | 150 +++++++++++++++++++++++++++++++- arch/powerpc/kvm/book3s_hv.c | 26 +++++- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 +- arch/powerpc/kvm/book3s_pr_papr.c | 35 ++++++++ arch/powerpc/kvm/powerpc.c | 3 + 8 files changed, 306 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 07e4cdf02407..da3943586a2b 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -3035,6 +3035,31 @@ Returns: 0 on success, -1 on error Queues an SMI on the thread's vcpu. +4.97 KVM_CAP_PPC_MULTITCE + +Capability: KVM_CAP_PPC_MULTITCE +Architectures: ppc +Type: vm + +This capability means the kernel is capable of handling hypercalls +H_PUT_TCE_INDIRECT and H_STUFF_TCE without passing those into the user +space. This significantly accelerates DMA operations for PPC KVM guests. +User space should expect that its handlers for these hypercalls +are not going to be called if user space previously registered LIOBN +in KVM (via KVM_CREATE_SPAPR_TCE or similar calls). + +In order to enable H_PUT_TCE_INDIRECT and H_STUFF_TCE use in the guest, +user space might have to advertise it for the guest. For example, +IBM pSeries (sPAPR) guest starts using them if "hcall-multi-tce" is +present in the "ibm,hypertas-functions" device-tree property. + +The hypercalls mentioned above may or may not be processed successfully +in the kernel based fast path. If they can not be handled by the kernel, +they will get passed on to user space. So user space still has to have +an implementation for these despite the in kernel acceleration. + +This capability is always enabled. + 5. The kvm_run structure ------------------------ diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 95139111a929..4cadee590deb 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -166,12 +166,24 @@ extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce *args); +extern struct kvmppc_spapr_tce_table *kvmppc_find_table( + struct kvm_vcpu *vcpu, unsigned long liobn); extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, unsigned long ioba, unsigned long npages); extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt, unsigned long tce); +extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, + unsigned long *ua, unsigned long **prmap); +extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt, + unsigned long idx, unsigned long tce); extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce); +extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, + unsigned long liobn, unsigned long ioba, + unsigned long tce_list, unsigned long npages); +extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, + unsigned long liobn, unsigned long ioba, + unsigned long tce_value, unsigned long npages); extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba); extern struct page *kvm_alloc_hpt(unsigned long nr_pages); diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 84993d151db3..94c8e7e9b58c 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -14,6 +14,7 @@ * * Copyright 2010 Paul Mackerras, IBM Corp. * Copyright 2011 David Gibson, IBM Corporation + * Copyright 2016 Alexey Kardashevskiy, IBM Corporation */ #include @@ -37,8 +38,7 @@ #include #include #include - -#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) +#include static unsigned long kvmppc_tce_pages(unsigned long window_size) { @@ -204,3 +204,59 @@ fail: } return ret; } + +long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, + unsigned long liobn, unsigned long ioba, + unsigned long tce_list, unsigned long npages) +{ + struct kvmppc_spapr_tce_table *stt; + long i, ret = H_SUCCESS, idx; + unsigned long entry, ua = 0; + u64 __user *tces, tce; + + stt = kvmppc_find_table(vcpu, liobn); + if (!stt) + return H_TOO_HARD; + + entry = ioba >> IOMMU_PAGE_SHIFT_4K; + /* + * SPAPR spec says that the maximum size of the list is 512 TCEs + * so the whole table fits in 4K page + */ + if (npages > 512) + return H_PARAMETER; + + if (tce_list & (SZ_4K - 1)) + return H_PARAMETER; + + ret = kvmppc_ioba_validate(stt, ioba, npages); + if (ret != H_SUCCESS) + return ret; + + idx = srcu_read_lock(&vcpu->kvm->srcu); + if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) { + ret = H_TOO_HARD; + goto unlock_exit; + } + tces = (u64 __user *) ua; + + for (i = 0; i < npages; ++i) { + if (get_user(tce, tces + i)) { + ret = H_TOO_HARD; + goto unlock_exit; + } + tce = be64_to_cpu(tce); + + ret = kvmppc_tce_validate(stt, tce); + if (ret != H_SUCCESS) + goto unlock_exit; + + kvmppc_tce_put(stt, entry + i, tce); + } + +unlock_exit: + srcu_read_unlock(&vcpu->kvm->srcu, idx); + + return ret; +} +EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect); diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index b608fdd0c6f6..0486aa2329ee 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -14,6 +14,7 @@ * * Copyright 2010 Paul Mackerras, IBM Corp. * Copyright 2011 David Gibson, IBM Corporation + * Copyright 2016 Alexey Kardashevskiy, IBM Corporation */ #include @@ -30,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -37,6 +39,7 @@ #include #include #include +#include #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) @@ -46,7 +49,7 @@ * WARNING: This will be called in real or virtual mode on HV KVM and virtual * mode on PR KVM */ -static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu, +struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu, unsigned long liobn) { struct kvm *kvm = vcpu->kvm; @@ -58,6 +61,7 @@ static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu, return NULL; } +EXPORT_SYMBOL_GPL(kvmppc_find_table); /* * Validates IO address. @@ -151,9 +155,29 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, } EXPORT_SYMBOL_GPL(kvmppc_tce_put); -/* WARNING: This will be called in real-mode on HV KVM and virtual - * mode on PR KVM - */ +long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, + unsigned long *ua, unsigned long **prmap) +{ + unsigned long gfn = gpa >> PAGE_SHIFT; + struct kvm_memory_slot *memslot; + + memslot = search_memslots(kvm_memslots(kvm), gfn); + if (!memslot) + return -EINVAL; + + *ua = __gfn_to_hva_memslot(memslot, gfn) | + (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); + +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + if (prmap) + *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; +#endif + + return 0; +} +EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); + +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce) { @@ -180,6 +204,122 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, } EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); +static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, + unsigned long ua, unsigned long *phpa) +{ + pte_t *ptep, pte; + unsigned shift = 0; + + ptep = __find_linux_pte_or_hugepte(vcpu->arch.pgdir, ua, NULL, &shift); + if (!ptep || !pte_present(*ptep)) + return -ENXIO; + pte = *ptep; + + if (!shift) + shift = PAGE_SHIFT; + + /* Avoid handling anything potentially complicated in realmode */ + if (shift > PAGE_SHIFT) + return -EAGAIN; + + if (!pte_young(pte)) + return -EAGAIN; + + *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) | + (ua & ~PAGE_MASK); + + return 0; +} + +long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, + unsigned long liobn, unsigned long ioba, + unsigned long tce_list, unsigned long npages) +{ + struct kvmppc_spapr_tce_table *stt; + long i, ret = H_SUCCESS; + unsigned long tces, entry, ua = 0; + unsigned long *rmap = NULL; + + stt = kvmppc_find_table(vcpu, liobn); + if (!stt) + return H_TOO_HARD; + + entry = ioba >> IOMMU_PAGE_SHIFT_4K; + /* + * The spec says that the maximum size of the list is 512 TCEs + * so the whole table addressed resides in 4K page + */ + if (npages > 512) + return H_PARAMETER; + + if (tce_list & (SZ_4K - 1)) + return H_PARAMETER; + + ret = kvmppc_ioba_validate(stt, ioba, npages); + if (ret != H_SUCCESS) + return ret; + + if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) + return H_TOO_HARD; + + rmap = (void *) vmalloc_to_phys(rmap); + + /* + * Synchronize with the MMU notifier callbacks in + * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.). + * While we have the rmap lock, code running on other CPUs + * cannot finish unmapping the host real page that backs + * this guest real page, so we are OK to access the host + * real page. + */ + lock_rmap(rmap); + if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) { + ret = H_TOO_HARD; + goto unlock_exit; + } + + for (i = 0; i < npages; ++i) { + unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); + + ret = kvmppc_tce_validate(stt, tce); + if (ret != H_SUCCESS) + goto unlock_exit; + + kvmppc_tce_put(stt, entry + i, tce); + } + +unlock_exit: + unlock_rmap(rmap); + + return ret; +} + +long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, + unsigned long liobn, unsigned long ioba, + unsigned long tce_value, unsigned long npages) +{ + struct kvmppc_spapr_tce_table *stt; + long i, ret; + + stt = kvmppc_find_table(vcpu, liobn); + if (!stt) + return H_TOO_HARD; + + ret = kvmppc_ioba_validate(stt, ioba, npages); + if (ret != H_SUCCESS) + return ret; + + /* Check permission bits only to allow userspace poison TCE for debug */ + if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) + return H_PARAMETER; + + for (i = 0; i < npages; ++i, ioba += IOMMU_PAGE_SIZE_4K) + kvmppc_tce_put(stt, ioba >> IOMMU_PAGE_SHIFT_4K, tce_value); + + return H_SUCCESS; +} +EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce); + long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba) { @@ -205,3 +345,5 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_h_get_tce); + +#endif /* KVM_BOOK3S_HV_POSSIBLE */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index baeddb06811d..33b491e6f666 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -768,7 +768,31 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) if (kvmppc_xics_enabled(vcpu)) { ret = kvmppc_xics_hcall(vcpu, req); break; - } /* fallthrough */ + } + return RESUME_HOST; + case H_PUT_TCE: + ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6)); + if (ret == H_TOO_HARD) + return RESUME_HOST; + break; + case H_PUT_TCE_INDIRECT: + ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6), + kvmppc_get_gpr(vcpu, 7)); + if (ret == H_TOO_HARD) + return RESUME_HOST; + break; + case H_STUFF_TCE: + ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6), + kvmppc_get_gpr(vcpu, 7)); + if (ret == H_TOO_HARD) + return RESUME_HOST; + break; default: return RESUME_HOST; } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 6ee26de9a1de..ed16182a008b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2006,8 +2006,8 @@ hcall_real_table: .long 0 /* 0x12c */ .long 0 /* 0x130 */ .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table - .long 0 /* 0x138 */ - .long 0 /* 0x13c */ + .long DOTSYM(kvmppc_h_stuff_tce) - hcall_real_table + .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table .long 0 /* 0x140 */ .long 0 /* 0x144 */ .long 0 /* 0x148 */ diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index f2c75a1e0536..02176fd52f84 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c @@ -280,6 +280,37 @@ static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu) return EMULATE_DONE; } +static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu) +{ + unsigned long liobn = kvmppc_get_gpr(vcpu, 4); + unsigned long ioba = kvmppc_get_gpr(vcpu, 5); + unsigned long tce = kvmppc_get_gpr(vcpu, 6); + unsigned long npages = kvmppc_get_gpr(vcpu, 7); + long rc; + + rc = kvmppc_h_put_tce_indirect(vcpu, liobn, ioba, + tce, npages); + if (rc == H_TOO_HARD) + return EMULATE_FAIL; + kvmppc_set_gpr(vcpu, 3, rc); + return EMULATE_DONE; +} + +static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu) +{ + unsigned long liobn = kvmppc_get_gpr(vcpu, 4); + unsigned long ioba = kvmppc_get_gpr(vcpu, 5); + unsigned long tce_value = kvmppc_get_gpr(vcpu, 6); + unsigned long npages = kvmppc_get_gpr(vcpu, 7); + long rc; + + rc = kvmppc_h_stuff_tce(vcpu, liobn, ioba, tce_value, npages); + if (rc == H_TOO_HARD) + return EMULATE_FAIL; + kvmppc_set_gpr(vcpu, 3, rc); + return EMULATE_DONE; +} + static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) { long rc = kvmppc_xics_hcall(vcpu, cmd); @@ -306,6 +337,10 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) return kvmppc_h_pr_bulk_remove(vcpu); case H_PUT_TCE: return kvmppc_h_pr_put_tce(vcpu); + case H_PUT_TCE_INDIRECT: + return kvmppc_h_pr_put_tce_indirect(vcpu); + case H_STUFF_TCE: + return kvmppc_h_pr_stuff_tce(vcpu); case H_CEDE: kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE); kvm_vcpu_block(vcpu); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index a3b182dcb823..69f897da782d 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -569,6 +569,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_PPC_GET_SMMU_INFO: r = 1; break; + case KVM_CAP_SPAPR_MULTITCE: + r = 1; + break; #endif default: r = 0; -- cgit v1.2.3-58-ga151 From 5bb16016ce111f3f4bc68bb109f7e5be6bc14ad7 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Tue, 9 Feb 2016 20:14:21 +0100 Subject: KVM: VMX: Factor out is_exception_n helper There is quite some common code in all these is_() helpers. Factor it out before adding even more of them. Signed-off-by: Jan Kiszka Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 164eb9e1678b..7852092b82ae 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -961,25 +961,26 @@ static const u32 vmx_msr_index[] = { MSR_EFER, MSR_TSC_AUX, MSR_STAR, }; -static inline bool is_page_fault(u32 intr_info) +static inline bool is_exception_n(u32 intr_info, u8 vector) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == - (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); + (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK); +} + +static inline bool is_page_fault(u32 intr_info) +{ + return is_exception_n(intr_info, PF_VECTOR); } static inline bool is_no_device(u32 intr_info) { - return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | - INTR_INFO_VALID_MASK)) == - (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); + return is_exception_n(intr_info, NM_VECTOR); } static inline bool is_invalid_opcode(u32 intr_info) { - return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | - INTR_INFO_VALID_MASK)) == - (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); + return is_exception_n(intr_info, UD_VECTOR); } static inline bool is_external_interrupt(u32 intr_info) -- cgit v1.2.3-58-ga151 From 6f05485d3a161caebc0fc7a73d641fa42a0dd263 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Tue, 9 Feb 2016 20:15:18 +0100 Subject: KVM: VMX: Fix guest debugging while in L2 When we take a #DB or #BP vmexit while in guest mode, we first of all need to check if there is ongoing guest debugging that might be interested in the event. Currently, we unconditionally leave L2 and inject the event into L1 if it is intercepting the exceptions. That breaks things marvelously. Signed-off-by: Jan Kiszka Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7852092b82ae..cb501d306416 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -968,6 +968,16 @@ static inline bool is_exception_n(u32 intr_info, u8 vector) (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK); } +static inline bool is_debug(u32 intr_info) +{ + return is_exception_n(intr_info, DB_VECTOR); +} + +static inline bool is_breakpoint(u32 intr_info) +{ + return is_exception_n(intr_info, BP_VECTOR); +} + static inline bool is_page_fault(u32 intr_info) { return is_exception_n(intr_info, PF_VECTOR); @@ -7753,6 +7763,13 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) else if (is_no_device(intr_info) && !(vmcs12->guest_cr0 & X86_CR0_TS)) return false; + else if (is_debug(intr_info) && + vcpu->guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) + return false; + else if (is_breakpoint(intr_info) && + vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) + return false; return vmcs12->exception_bitmap & (1u << (intr_info & INTR_INFO_VECTOR_MASK)); case EXIT_REASON_EXTERNAL_INTERRUPT: -- cgit v1.2.3-58-ga151 From 4941b8cb3746f09bb102f7a5d64d878e96a0c6cd Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 8 Feb 2016 14:51:12 +0100 Subject: KVM: x86: rename argument to kvm_set_tsc_khz This refers to the desired (scaled) frequency, which is called user_tsc_khz in the rest of the file. Reviewed-by: Marcelo Tosatti Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ee3e990d519a..16bbe6df679b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1290,23 +1290,23 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) return 0; } -static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) +static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) { u32 thresh_lo, thresh_hi; int use_scaling = 0; /* tsc_khz can be zero if TSC calibration fails */ - if (this_tsc_khz == 0) { + if (user_tsc_khz == 0) { /* set tsc_scaling_ratio to a safe value */ vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; return -1; } /* Compute a scale to convert nanoseconds in TSC cycles */ - kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, + kvm_get_time_scale(user_tsc_khz, NSEC_PER_SEC / 1000, &vcpu->arch.virtual_tsc_shift, &vcpu->arch.virtual_tsc_mult); - vcpu->arch.virtual_tsc_khz = this_tsc_khz; + vcpu->arch.virtual_tsc_khz = user_tsc_khz; /* * Compute the variation in TSC rate which is acceptable @@ -1316,11 +1316,11 @@ static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) */ thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); - if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) { - pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi); + if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) { + pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi); use_scaling = 1; } - return set_tsc_khz(vcpu, this_tsc_khz, use_scaling); + return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); } static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) -- cgit v1.2.3-58-ga151 From 78db6a5037965429c04d708281f35a6e5562d31b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 8 Feb 2016 14:51:40 +0100 Subject: KVM: x86: rewrite handling of scaled TSC for kvmclock This is the same as before: kvm_scale_tsc(tgt_tsc_khz) = tgt_tsc_khz * ratio = tgt_tsc_khz * user_tsc_khz / tsc_khz (see set_tsc_khz) = user_tsc_khz (see kvm_guest_time_update) = vcpu->arch.virtual_tsc_khz (see kvm_set_tsc_khz) However, computing it through kvm_scale_tsc will make it possible to include the NTP correction in tgt_tsc_khz. Reviewed-by: Marcelo Tosatti Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 16bbe6df679b..01d22b37556c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1713,7 +1713,7 @@ static void kvm_gen_update_masterclock(struct kvm *kvm) static int kvm_guest_time_update(struct kvm_vcpu *v) { - unsigned long flags, this_tsc_khz, tgt_tsc_khz; + unsigned long flags, tgt_tsc_khz; struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_arch *ka = &v->kvm->arch; s64 kernel_ns; @@ -1739,8 +1739,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); - this_tsc_khz = __this_cpu_read(cpu_tsc_khz); - if (unlikely(this_tsc_khz == 0)) { + tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz); + if (unlikely(tgt_tsc_khz == 0)) { local_irq_restore(flags); kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); return 1; @@ -1775,13 +1775,14 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) if (!vcpu->pv_time_enabled) return 0; - if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { - tgt_tsc_khz = kvm_has_tsc_control ? - vcpu->virtual_tsc_khz : this_tsc_khz; + if (kvm_has_tsc_control) + tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz); + + if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { kvm_get_time_scale(NSEC_PER_SEC / 1000, tgt_tsc_khz, &vcpu->hv_clock.tsc_shift, &vcpu->hv_clock.tsc_to_system_mul); - vcpu->hw_tsc_khz = this_tsc_khz; + vcpu->hw_tsc_khz = tgt_tsc_khz; } /* With all the info we got, fill in the values */ -- cgit v1.2.3-58-ga151 From 4efd805fca5590af181f89ad122b8695c63b5f2c Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Fri, 12 Feb 2016 15:00:15 +0100 Subject: KVM: x86: fix *NULL on invalid low-prio irq MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Smatch noticed a NULL dereference in kvm_intr_is_single_vcpu_fast that happens if VM already warned about invalid lowest-priority interrupt. Create a function for common code while fixing it. Fixes: 6228a0da8057 ("KVM: x86: Add lowest-priority support for vt-d posted-interrupts") Reported-by: Dan Carpenter Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 1482a581a83c..cf74404230ca 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -685,6 +685,15 @@ int kvm_vector_to_index(u32 vector, u32 dest_vcpus, return idx; } +static void kvm_apic_disabled_lapic_found(struct kvm *kvm) +{ + if (!kvm->arch.disabled_lapic_found) { + kvm->arch.disabled_lapic_found = true; + printk(KERN_INFO + "Disabled LAPIC found during irq injection\n"); + } +} + bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map) { @@ -763,15 +772,8 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, idx = kvm_vector_to_index(irq->vector, dest_vcpus, &bitmap, 16); - /* - * We may find a hardware disabled LAPIC here, if that - * is the case, print out a error message once for each - * guest and return. - */ - if (!dst[idx] && !kvm->arch.disabled_lapic_found) { - kvm->arch.disabled_lapic_found = true; - printk(KERN_INFO - "Disabled LAPIC found during irq injection\n"); + if (!dst[idx]) { + kvm_apic_disabled_lapic_found(kvm); goto out; } @@ -859,16 +861,9 @@ bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq, idx = kvm_vector_to_index(irq->vector, dest_vcpus, &bitmap, 16); - /* - * We may find a hardware disabled LAPIC here, if that - * is the case, print out a error message once for each - * guest and return - */ dst = map->logical_map[cid][idx]; - if (!dst && !kvm->arch.disabled_lapic_found) { - kvm->arch.disabled_lapic_found = true; - printk(KERN_INFO - "Disabled LAPIC found during irq injection\n"); + if (!dst) { + kvm_apic_disabled_lapic_found(kvm); goto out; } -- cgit v1.2.3-58-ga151 From 4e422bdd2f849d98fffccbc3295c2f0996097fb3 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 10 Feb 2016 17:50:23 +0100 Subject: KVM: x86: fix missed hardware breakpoints Sometimes when setting a breakpoint a process doesn't stop on it. This is because the debug registers are not loaded correctly on VCPU load. The following simple reproducer from Oleg Nesterov tries using debug registers in both the host and the guest, for example by running "./bp 0 1" on the host and "./bp 14 15" under QEMU. #include #include #include #include #include #include #include #include #include #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) unsigned long encode_dr7(int drnum, int enable, unsigned int type, unsigned int len) { unsigned long dr7; dr7 = ((len | type) & 0xf) << (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE); if (enable) dr7 |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE)); return dr7; } int write_dr(int pid, int dr, unsigned long val) { return ptrace(PTRACE_POKEUSER, pid, offsetof (struct user, u_debugreg[dr]), val); } void set_bp(pid_t pid, void *addr) { unsigned long dr7; assert(write_dr(pid, 0, (long)addr) == 0); dr7 = encode_dr7(0, 1, DR_RW_EXECUTE, DR_LEN_1); assert(write_dr(pid, 7, dr7) == 0); } void *get_rip(int pid) { return (void*)ptrace(PTRACE_PEEKUSER, pid, offsetof(struct user, regs.rip), 0); } void test(int nr) { void *bp_addr = &&label + nr, *bp_hit; int pid; printf("test bp %d\n", nr); assert(nr < 16); // see 16 asm nops below pid = fork(); if (!pid) { assert(ptrace(PTRACE_TRACEME, 0,0,0) == 0); kill(getpid(), SIGSTOP); for (;;) { label: asm ( "nop; nop; nop; nop;" "nop; nop; nop; nop;" "nop; nop; nop; nop;" "nop; nop; nop; nop;" ); } } assert(pid == wait(NULL)); set_bp(pid, bp_addr); for (;;) { assert(ptrace(PTRACE_CONT, pid, 0, 0) == 0); assert(pid == wait(NULL)); bp_hit = get_rip(pid); if (bp_hit != bp_addr) fprintf(stderr, "ERR!! hit wrong bp %ld != %d\n", bp_hit - &&label, nr); } } int main(int argc, const char *argv[]) { while (--argc) { int nr = atoi(*++argv); if (!fork()) test(nr); } while (wait(NULL) > 0) ; return 0; } Cc: stable@vger.kernel.org Suggested-by: Nadadv Amit Reported-by: Andrey Wagin Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 01d22b37556c..94ef72dce299 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2750,6 +2750,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) -- cgit v1.2.3-58-ga151 From 8ed6d76781dd01451546d402a2136b6389861d81 Mon Sep 17 00:00:00 2001 From: Andrey Smetanin Date: Thu, 11 Feb 2016 16:44:57 +0300 Subject: kvm/x86: Rename Hyper-V long spin wait hypercall Rename HV_X64_HV_NOTIFY_LONG_SPIN_WAIT by HVCALL_NOTIFY_LONG_SPIN_WAIT, so the name is more consistent with the other hypercalls. Signed-off-by: Andrey Smetanin Reviewed-by: Roman Kagan CC: Gleb Natapov CC: Paolo Bonzini CC: Joerg Roedel CC: "K. Y. Srinivasan" CC: Haiyang Zhang CC: Roman Kagan CC: Denis V. Lunev CC: qemu-devel@nongnu.org [Change name, Andrey used HV_X64_HCALL_NOTIFY_LONG_SPIN_WAIT. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/include/uapi/asm/hyperv.h | 2 +- arch/x86/kvm/hyperv.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index 7956412d09bd..5699e1c94693 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h @@ -226,7 +226,7 @@ (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) /* Declare the various hypercall operations. */ -#define HV_X64_HV_NOTIFY_LONG_SPIN_WAIT 0x0008 +#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 #define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001 #define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12 diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index c58ba67175ac..31222442ac57 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1084,7 +1084,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); switch (code) { - case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: + case HVCALL_NOTIFY_LONG_SPIN_WAIT: kvm_vcpu_on_spin(vcpu); break; default: -- cgit v1.2.3-58-ga151 From 18f098618aa031f4c8a907c550fcd6785280c977 Mon Sep 17 00:00:00 2001 From: Andrey Smetanin Date: Thu, 11 Feb 2016 16:44:58 +0300 Subject: drivers/hv: Move VMBus hypercall codes into Hyper-V UAPI header VMBus hypercall codes inside Hyper-V UAPI header will be used by QEMU to implement VMBus host devices support. Signed-off-by: Andrey Smetanin Acked-by: K. Y. Srinivasan Reviewed-by: Roman Kagan CC: Gleb Natapov CC: Paolo Bonzini CC: Joerg Roedel CC: "K. Y. Srinivasan" CC: Haiyang Zhang CC: Roman Kagan CC: Denis V. Lunev CC: qemu-devel@nongnu.org [Do not rename the constant at the same time as moving it, as that would cause semantic conflicts with the Hyper-V tree. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/include/uapi/asm/hyperv.h | 2 ++ drivers/hv/hyperv_vmbus.h | 6 ------ 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index 5699e1c94693..9b1a91834ac8 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h @@ -227,6 +227,8 @@ /* Declare the various hypercall operations. */ #define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 +#define HVCALL_POST_MESSAGE 0x005c +#define HVCALL_SIGNAL_EVENT 0x005d #define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001 #define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 4ebc796b4f33..2f8c0f40930b 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -256,12 +256,6 @@ struct hv_monitor_page { u8 rsvdz4[1984]; }; -/* Declare the various hypercall operations. */ -enum hv_call_code { - HVCALL_POST_MESSAGE = 0x005c, - HVCALL_SIGNAL_EVENT = 0x005d, -}; - /* Definition of the hv_post_message hypercall input structure. */ struct hv_input_post_message { union hv_connection_id connectionid; -- cgit v1.2.3-58-ga151 From 0d9c055eaaf41bebb0e6b095fff447523121fad3 Mon Sep 17 00:00:00 2001 From: Andrey Smetanin Date: Thu, 11 Feb 2016 16:44:59 +0300 Subject: kvm/x86: Pass return code of kvm_emulate_hypercall Pass the return code from kvm_emulate_hypercall on to the caller, in order to allow it to indicate to the userspace that the hypercall has to be handled there. Also adjust all the existing code paths to return 1 to make sure the hypercall isn't passed to the userspace without setting kvm_run appropriately. Signed-off-by: Andrey Smetanin Reviewed-by: Roman Kagan CC: Gleb Natapov CC: Paolo Bonzini CC: Joerg Roedel CC: "K. Y. Srinivasan" CC: Haiyang Zhang CC: Roman Kagan CC: Denis V. Lunev CC: qemu-devel@nongnu.org Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 2 +- arch/x86/kvm/svm.c | 3 +-- arch/x86/kvm/vmx.c | 3 +-- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 31222442ac57..599b06733f02 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1055,7 +1055,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) */ if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); - return 0; + return 1; } longmode = is_64_bit_mode(vcpu); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index c13a64b7d789..95070386d599 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1858,8 +1858,7 @@ static int halt_interception(struct vcpu_svm *svm) static int vmmcall_interception(struct vcpu_svm *svm) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; - kvm_emulate_hypercall(&svm->vcpu); - return 1; + return kvm_emulate_hypercall(&svm->vcpu); } static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index cb501d306416..9f08037ef14b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5758,8 +5758,7 @@ static int handle_halt(struct kvm_vcpu *vcpu) static int handle_vmcall(struct kvm_vcpu *vcpu) { - kvm_emulate_hypercall(vcpu); - return 1; + return kvm_emulate_hypercall(vcpu); } static int handle_invd(struct kvm_vcpu *vcpu) -- cgit v1.2.3-58-ga151 From b2fdc2570a6c4b1fe950c11a2e9ce949f5190765 Mon Sep 17 00:00:00 2001 From: Andrey Smetanin Date: Thu, 11 Feb 2016 16:45:00 +0300 Subject: kvm/x86: Reject Hyper-V hypercall continuation Currently we do not support Hyper-V hypercall continuation so reject it. Signed-off-by: Andrey Smetanin Reviewed-by: Roman Kagan CC: Gleb Natapov CC: Paolo Bonzini CC: Joerg Roedel CC: "K. Y. Srinivasan" CC: Haiyang Zhang CC: Roman Kagan CC: Denis V. Lunev CC: qemu-devel@nongnu.org Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 599b06733f02..e8af5978762b 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1083,6 +1083,12 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); + /* Hypercall continuation is not supported yet */ + if (rep_cnt || rep_idx) { + res = HV_STATUS_INVALID_HYPERCALL_CODE; + goto set_result; + } + switch (code) { case HVCALL_NOTIFY_LONG_SPIN_WAIT: kvm_vcpu_on_spin(vcpu); @@ -1092,6 +1098,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) break; } +set_result: ret = res | (((u64)rep_done & 0xfff) << 32); if (longmode) { kvm_register_write(vcpu, VCPU_REGS_RAX, ret); -- cgit v1.2.3-58-ga151 From 83326e43f27e9a8a501427a0060f8af519a39bb2 Mon Sep 17 00:00:00 2001 From: Andrey Smetanin Date: Thu, 11 Feb 2016 16:45:01 +0300 Subject: kvm/x86: Hyper-V VMBus hypercall userspace exit The patch implements KVM_EXIT_HYPERV userspace exit functionality for Hyper-V VMBus hypercalls: HV_X64_HCALL_POST_MESSAGE, HV_X64_HCALL_SIGNAL_EVENT. Changes v3: * use vcpu->arch.complete_userspace_io to setup hypercall result Changes v2: * use KVM_EXIT_HYPERV for hypercalls Signed-off-by: Andrey Smetanin Reviewed-by: Roman Kagan CC: Gleb Natapov CC: Paolo Bonzini CC: Joerg Roedel CC: "K. Y. Srinivasan" CC: Haiyang Zhang CC: Roman Kagan CC: Denis V. Lunev CC: qemu-devel@nongnu.org Signed-off-by: Paolo Bonzini --- Documentation/virtual/kvm/api.txt | 6 ++++++ arch/x86/kvm/hyperv.c | 39 ++++++++++++++++++++++++++++++++------- include/uapi/linux/kvm.h | 6 ++++++ 3 files changed, 44 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 07e4cdf02407..4a661e555c09 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -3339,6 +3339,7 @@ EOI was received. struct kvm_hyperv_exit { #define KVM_EXIT_HYPERV_SYNIC 1 +#define KVM_EXIT_HYPERV_HCALL 2 __u32 type; union { struct { @@ -3347,6 +3348,11 @@ EOI was received. __u64 evt_page; __u64 msg_page; } synic; + struct { + __u64 input; + __u64 result; + __u64 params[2]; + } hcall; } u; }; /* KVM_EXIT_HYPERV */ diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index e8af5978762b..5ff3485acb60 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1043,6 +1043,27 @@ bool kvm_hv_hypercall_enabled(struct kvm *kvm) return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; } +static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) +{ + bool longmode; + + longmode = is_64_bit_mode(vcpu); + if (longmode) + kvm_register_write(vcpu, VCPU_REGS_RAX, result); + else { + kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32); + kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff); + } +} + +static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + + kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result); + return 1; +} + int kvm_hv_hypercall(struct kvm_vcpu *vcpu) { u64 param, ingpa, outgpa, ret; @@ -1093,6 +1114,16 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) case HVCALL_NOTIFY_LONG_SPIN_WAIT: kvm_vcpu_on_spin(vcpu); break; + case HVCALL_POST_MESSAGE: + case HVCALL_SIGNAL_EVENT: + vcpu->run->exit_reason = KVM_EXIT_HYPERV; + vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; + vcpu->run->hyperv.u.hcall.input = param; + vcpu->run->hyperv.u.hcall.params[0] = ingpa; + vcpu->run->hyperv.u.hcall.params[1] = outgpa; + vcpu->arch.complete_userspace_io = + kvm_hv_hypercall_complete_userspace; + return 0; default: res = HV_STATUS_INVALID_HYPERCALL_CODE; break; @@ -1100,12 +1131,6 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) set_result: ret = res | (((u64)rep_done & 0xfff) << 32); - if (longmode) { - kvm_register_write(vcpu, VCPU_REGS_RAX, ret); - } else { - kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); - kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); - } - + kvm_hv_hypercall_set_result(vcpu, ret); return 1; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a2fe0ac1d61a..82581b6e944d 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -157,6 +157,7 @@ struct kvm_s390_skeys { struct kvm_hyperv_exit { #define KVM_EXIT_HYPERV_SYNIC 1 +#define KVM_EXIT_HYPERV_HCALL 2 __u32 type; union { struct { @@ -165,6 +166,11 @@ struct kvm_hyperv_exit { __u64 evt_page; __u64 msg_page; } synic; + struct { + __u64 input; + __u64 result; + __u64 params[2]; + } hcall; } u; }; -- cgit v1.2.3-58-ga151 From 3ae13faac40011e51234989d938fb70f4f0150d0 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 8 Feb 2016 15:11:15 +0100 Subject: KVM: x86: pass kvm_get_time_scale arguments in hertz Prepare for improving the precision in the next patch. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 94ef72dce299..2fb92c0af803 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1203,7 +1203,7 @@ static uint32_t div_frac(uint32_t dividend, uint32_t divisor) return dividend; } -static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, +static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz, s8 *pshift, u32 *pmultiplier) { uint64_t scaled64; @@ -1211,8 +1211,8 @@ static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, uint64_t tps64; uint32_t tps32; - tps64 = base_khz * 1000LL; - scaled64 = scaled_khz * 1000LL; + tps64 = base_hz; + scaled64 = scaled_hz; while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { tps64 >>= 1; shift--; @@ -1230,8 +1230,8 @@ static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, *pshift = shift; *pmultiplier = div_frac(scaled64, tps32); - pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n", - __func__, base_khz, scaled_khz, shift, *pmultiplier); + pr_debug("%s: base_hz %llu => %llu, shift %d, mul %u\n", + __func__, base_hz, scaled_hz, shift, *pmultiplier); } #ifdef CONFIG_X86_64 @@ -1303,7 +1303,7 @@ static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) } /* Compute a scale to convert nanoseconds in TSC cycles */ - kvm_get_time_scale(user_tsc_khz, NSEC_PER_SEC / 1000, + kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC, &vcpu->arch.virtual_tsc_shift, &vcpu->arch.virtual_tsc_mult); vcpu->arch.virtual_tsc_khz = user_tsc_khz; @@ -1779,7 +1779,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz); if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { - kvm_get_time_scale(NSEC_PER_SEC / 1000, tgt_tsc_khz, + kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, &vcpu->hv_clock.tsc_shift, &vcpu->hv_clock.tsc_to_system_mul); vcpu->hw_tsc_khz = tgt_tsc_khz; -- cgit v1.2.3-58-ga151 From ded5874946baa15b56b531f638f9c706266deb7b Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Mon, 22 Feb 2016 17:23:40 +0900 Subject: KVM: x86: MMU: Consolidate quickly_check_mmio_pf() and is_mmio_page_fault() These two have only slight differences: - whether 'addr' is of type u64 or of type gva_t - whether they have 'direct' parameter or not Concerning the former, quickly_check_mmio_pf()'s u64 is better because 'addr' needs to be able to have both a guest physical address and a guest virtual address. The latter is just a stylistic issue as we can always calculate the mode from the 'vcpu' as is_mmio_page_fault() does. This patch keeps the parameter to make the following patch cleaner. In addition, the patch renames the function to mmio_info_in_cache() to make it clear what it actually checks for. Signed-off-by: Takuya Yoshikawa Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 95a955de5964..a28b734774ac 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3273,7 +3273,7 @@ static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level); } -static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct) +static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) { if (direct) return vcpu_match_mmio_gpa(vcpu, addr); @@ -3332,7 +3332,7 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) u64 spte; bool reserved; - if (quickly_check_mmio_pf(vcpu, addr, direct)) + if (mmio_info_in_cache(vcpu, addr, direct)) return RET_MMIO_PF_EMULATE; reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); @@ -4354,19 +4354,12 @@ static void make_mmu_pages_available(struct kvm_vcpu *vcpu) kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); } -static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr) -{ - if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu)) - return vcpu_match_mmio_gpa(vcpu, addr); - - return vcpu_match_mmio_gva(vcpu, addr); -} - int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, void *insn, int insn_len) { int r, emulation_type = EMULTYPE_RETRY; enum emulation_result er; + bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu); r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); if (r < 0) @@ -4377,7 +4370,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, goto out; } - if (is_mmio_page_fault(vcpu, cr2)) + if (mmio_info_in_cache(vcpu, cr2, direct)) emulation_type = 0; er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); -- cgit v1.2.3-58-ga151 From e9ee956e311d3d0a1506995b98e8de0b30773e1a Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Mon, 22 Feb 2016 17:23:41 +0900 Subject: KVM: x86: MMU: Move handle_mmio_page_fault() call to kvm_mmu_page_fault() Rather than placing a handle_mmio_page_fault() call in each vcpu->arch.mmu.page_fault() handler, moving it up to kvm_mmu_page_fault() makes the code better: - avoids code duplication - for kvm_arch_async_page_ready(), which is the other caller of vcpu->arch.mmu.page_fault(), removes an extra error_code check - avoids returning both RET_MMIO_PF_* values and raw integer values from vcpu->arch.mmu.page_fault() Signed-off-by: Takuya Yoshikawa Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 39 ++++++++++++++++----------------------- arch/x86/kvm/paging_tmpl.h | 19 ++++++------------- 2 files changed, 22 insertions(+), 36 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a28b734774ac..2ce389245bd8 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3370,13 +3370,6 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); - if (unlikely(error_code & PFERR_RSVD_MASK)) { - r = handle_mmio_page_fault(vcpu, gva, true); - - if (likely(r != RET_MMIO_PF_INVALID)) - return r; - } - r = mmu_topup_memory_caches(vcpu); if (r) return r; @@ -3460,13 +3453,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); - if (unlikely(error_code & PFERR_RSVD_MASK)) { - r = handle_mmio_page_fault(vcpu, gpa, true); - - if (likely(r != RET_MMIO_PF_INVALID)) - return r; - } - r = mmu_topup_memory_caches(vcpu); if (r) return r; @@ -4361,18 +4347,27 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, enum emulation_result er; bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu); + if (unlikely(error_code & PFERR_RSVD_MASK)) { + r = handle_mmio_page_fault(vcpu, cr2, direct); + if (r == RET_MMIO_PF_EMULATE) { + emulation_type = 0; + goto emulate; + } + if (r == RET_MMIO_PF_RETRY) + return 1; + if (r < 0) + return r; + } + r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); if (r < 0) - goto out; - - if (!r) { - r = 1; - goto out; - } + return r; + if (!r) + return 1; if (mmio_info_in_cache(vcpu, cr2, direct)) emulation_type = 0; - +emulate: er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); switch (er) { @@ -4386,8 +4381,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, default: BUG(); } -out: - return r; } EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 6c9fed957cce..05827ff7bd2e 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -702,23 +702,16 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); - if (unlikely(error_code & PFERR_RSVD_MASK)) { - r = handle_mmio_page_fault(vcpu, addr, mmu_is_nested(vcpu)); - if (likely(r != RET_MMIO_PF_INVALID)) - return r; - - /* - * page fault with PFEC.RSVD = 1 is caused by shadow - * page fault, should not be used to walk guest page - * table. - */ - error_code &= ~PFERR_RSVD_MASK; - }; - r = mmu_topup_memory_caches(vcpu); if (r) return r; + /* + * If PFEC.RSVD is set, this is a shadow page fault. + * The bit needs to be cleared before walking guest page tables. + */ + error_code &= ~PFERR_RSVD_MASK; + /* * Look up the guest pte for the faulting address. */ -- cgit v1.2.3-58-ga151 From 652fc08dae8e01574cd2497d9a76948e5c9d3920 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 1 Jan 2016 19:47:13 +0800 Subject: KVM: x86: use list_for_each_entry* Use list_for_each_entry*() instead of list_for_each*() to simplify the code. Signed-off-by: Geliang Tang Signed-off-by: Paolo Bonzini --- arch/x86/kvm/assigned-dev.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/assigned-dev.c b/arch/x86/kvm/assigned-dev.c index 9dc091acd5fb..308b8597c691 100644 --- a/arch/x86/kvm/assigned-dev.c +++ b/arch/x86/kvm/assigned-dev.c @@ -51,11 +51,9 @@ struct kvm_assigned_dev_kernel { static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, int assigned_dev_id) { - struct list_head *ptr; struct kvm_assigned_dev_kernel *match; - list_for_each(ptr, head) { - match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); + list_for_each_entry(match, head, list) { if (match->assigned_dev_id == assigned_dev_id) return match; } @@ -373,14 +371,10 @@ static void kvm_free_assigned_device(struct kvm *kvm, void kvm_free_all_assigned_devices(struct kvm *kvm) { - struct list_head *ptr, *ptr2; - struct kvm_assigned_dev_kernel *assigned_dev; - - list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { - assigned_dev = list_entry(ptr, - struct kvm_assigned_dev_kernel, - list); + struct kvm_assigned_dev_kernel *assigned_dev, *tmp; + list_for_each_entry_safe(assigned_dev, tmp, + &kvm->arch.assigned_dev_head, list) { kvm_free_assigned_device(kvm, assigned_dev); } } -- cgit v1.2.3-58-ga151 From d74c0e6b54c95ace01f05264a22aed99b565fabb Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 1 Jan 2016 19:47:14 +0800 Subject: KVM: x86: use list_last_entry To make the intention clearer, use list_last_entry instead of list_entry. Signed-off-by: Geliang Tang Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 4 ++-- arch/x86/kvm/vmx.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2ce389245bd8..07f4c26a10d3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2354,8 +2354,8 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm, if (list_empty(&kvm->arch.active_mmu_pages)) return false; - sp = list_entry(kvm->arch.active_mmu_pages.prev, - struct kvm_mmu_page, link); + sp = list_last_entry(&kvm->arch.active_mmu_pages, + struct kvm_mmu_page, link); kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); return true; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9f08037ef14b..aa16d5874fe6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6445,8 +6445,8 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { /* Recycle the least recently used VMCS. */ - item = list_entry(vmx->nested.vmcs02_pool.prev, - struct vmcs02_list, list); + item = list_last_entry(&vmx->nested.vmcs02_pool, + struct vmcs02_list, list); item->vmptr = vmx->nested.current_vmptr; list_move(&item->list, &vmx->nested.vmcs02_pool); return &item->vmcs02; -- cgit v1.2.3-58-ga151 From bd7f561f76563f0b21701628874d8adc863b0c25 Mon Sep 17 00:00:00 2001 From: Suresh Warrier Date: Thu, 17 Dec 2015 14:59:03 -0600 Subject: powerpc/smp: Support more IPI messages This patch increases the number of demuxed messages for a controller with a single ipi to 8 for 64-bit systems. This is required because we want to use the IPI mechanism to send messages from a CPU running in KVM real mode in a guest to a CPU in the host to take some action. Currently, we only support 4 messages and all 4 are already taken. Define a fifth message PPC_MSG_RM_HOST_ACTION for this purpose. Signed-off-by: Suresh Warrier Acked-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/smp.h | 3 +++ arch/powerpc/kernel/smp.c | 8 ++++---- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 825663c30945..9ef9c37cb398 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -114,6 +114,9 @@ extern int cpu_to_core_id(int cpu); #define PPC_MSG_TICK_BROADCAST 2 #define PPC_MSG_DEBUGGER_BREAK 3 +/* This is only used by the powernv kernel */ +#define PPC_MSG_RM_HOST_ACTION 4 + /* for irq controllers that have dedicated ipis per message (4) */ extern int smp_request_message_ipi(int virq, int message); extern const char *smp_ipi_name[]; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index ec9ec2058d2d..a53a13047330 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -206,7 +206,7 @@ int smp_request_message_ipi(int virq, int msg) #ifdef CONFIG_PPC_SMP_MUXED_IPI struct cpu_messages { - int messages; /* current messages */ + long messages; /* current messages */ unsigned long data; /* data for cause ipi */ }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); @@ -236,15 +236,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg) } #ifdef __BIG_ENDIAN__ -#define IPI_MESSAGE(A) (1 << (24 - 8 * (A))) +#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A))) #else -#define IPI_MESSAGE(A) (1 << (8 * (A))) +#define IPI_MESSAGE(A) (1uL << (8 * (A))) #endif irqreturn_t smp_ipi_demux(void) { struct cpu_messages *info = this_cpu_ptr(&ipi_message); - unsigned int all; + unsigned long all; mb(); /* order any irq clear */ -- cgit v1.2.3-58-ga151 From 31639c77e0a7f9f742c813ae697f337b44981ed2 Mon Sep 17 00:00:00 2001 From: Suresh Warrier Date: Thu, 17 Dec 2015 14:59:04 -0600 Subject: powerpc/smp: Add smp_muxed_ipi_set_message smp_muxed_ipi_message_pass() invokes smp_ops->cause_ipi, which uses an ioremapped address to access registers on the XICS interrupt controller to cause the IPI. Because of this real mode callers cannot call smp_muxed_ipi_message_pass() for IPI messaging. This patch creates a separate function smp_muxed_ipi_set_message just to set the IPI message without the cause_ipi routine. After calling this function to set the IPI message, real mode callers must cause the IPI by writing to the XICS registers directly. As part of this, we also change smp_muxed_ipi_message_pass to call smp_muxed_ipi_set_message to set the message instead of doing it directly inside the routine. Signed-off-by: Suresh Warrier Acked-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/smp.h | 1 + arch/powerpc/kernel/smp.c | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 9ef9c37cb398..78083ed20792 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -124,6 +124,7 @@ extern const char *smp_ipi_name[]; /* for irq controllers with only a single ipi */ extern void smp_muxed_ipi_set_data(int cpu, unsigned long data); extern void smp_muxed_ipi_message_pass(int cpu, int msg); +extern void smp_muxed_ipi_set_message(int cpu, int msg); extern irqreturn_t smp_ipi_demux(void); void smp_init_pSeries(void); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a53a13047330..e222efcf6aef 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -218,7 +218,7 @@ void smp_muxed_ipi_set_data(int cpu, unsigned long data) info->data = data; } -void smp_muxed_ipi_message_pass(int cpu, int msg) +void smp_muxed_ipi_set_message(int cpu, int msg) { struct cpu_messages *info = &per_cpu(ipi_message, cpu); char *message = (char *)&info->messages; @@ -228,6 +228,13 @@ void smp_muxed_ipi_message_pass(int cpu, int msg) */ smp_mb(); message[msg] = 1; +} + +void smp_muxed_ipi_message_pass(int cpu, int msg) +{ + struct cpu_messages *info = &per_cpu(ipi_message, cpu); + + smp_muxed_ipi_set_message(cpu, msg); /* * cause_ipi functions are required to include a full barrier * before doing whatever causes the IPI. -- cgit v1.2.3-58-ga151 From ec13e9b6b13d66c54951fec7f1158bf85f68fecd Mon Sep 17 00:00:00 2001 From: Suresh Warrier Date: Thu, 17 Dec 2015 14:59:05 -0600 Subject: powerpc/xics: Add icp_native_cause_ipi_rm Function to cause an IPI by directly updating the MFFR register in the XICS. The function is meant for real-mode callers since they cannot use the smp_ops->cause_ipi function which uses an ioremapped address. Normal usage is for the the KVM real mode code to set the IPI message using smp_muxed_ipi_message_pass and then invoke icp_native_cause_ipi_rm to cause the actual IPI. The function requires kvm_hstate.xics_phys to have been initialized with the physical address of XICS. Signed-off-by: Suresh Warrier Acked-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/xics.h | 1 + arch/powerpc/sysdev/xics/icp-native.c | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index 0e25bdb190bb..254604856e69 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h @@ -30,6 +30,7 @@ #ifdef CONFIG_PPC_ICP_NATIVE extern int icp_native_init(void); extern void icp_native_flush_interrupt(void); +extern void icp_native_cause_ipi_rm(int cpu); #else static inline int icp_native_init(void) { return -ENODEV; } #endif diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c index eae32654bdf2..afdf62f2a695 100644 --- a/arch/powerpc/sysdev/xics/icp-native.c +++ b/arch/powerpc/sysdev/xics/icp-native.c @@ -159,6 +159,27 @@ static void icp_native_cause_ipi(int cpu, unsigned long data) icp_native_set_qirr(cpu, IPI_PRIORITY); } +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +void icp_native_cause_ipi_rm(int cpu) +{ + /* + * Currently not used to send IPIs to another CPU + * on the same core. Only caller is KVM real mode. + * Need the physical address of the XICS to be + * previously saved in kvm_hstate in the paca. + */ + unsigned long xics_phys; + + /* + * Just like the cause_ipi functions, it is required to + * include a full barrier (out8 includes a sync) before + * causing the IPI. + */ + xics_phys = paca[cpu].kvm_hstate.xics_phys; + out_rm8((u8 *)(xics_phys + XICS_MFRR), IPI_PRIORITY); +} +#endif + /* * Called when an interrupt is received on an off-line CPU to * clear the interrupt, so that the CPU can go back to nap mode. -- cgit v1.2.3-58-ga151 From 79b6c247e9afe35714c1f83cfcecf40a438ca4a4 Mon Sep 17 00:00:00 2001 From: Suresh Warrier Date: Thu, 17 Dec 2015 14:59:06 -0600 Subject: KVM: PPC: Book3S HV: Host-side RM data structures This patch defines the data structures to support the setting up of host side operations while running in real mode in the guest, and also the functions to allocate and free it. The operations are for now limited to virtual XICS operations. Currently, we have only defined one operation in the data structure: - Wake up a VCPU sleeping in the host when it receives a virtual interrupt The operations are assigned at the core level because PowerKVM requires that the host run in SMT off mode. For each core, we will need to manage its state atomically - where the state is defined by: 1. Is the core running in the host? 2. Is there a Real Mode (RM) operation pending on the host? Currently, core state is only managed at the whole-core level even when the system is in split-core mode. This just limits the number of free or "available" cores in the host to perform any host-side operations. The kvmppc_host_rm_core.rm_data allows any data to be passed by KVM in real mode to the host core along with the operation to be performed. The kvmppc_host_rm_ops structure is allocated the very first time a guest VM is started. Initial core state is also set - all online cores are in the host. This structure is never deleted, not even when there are no active guests. However, it needs to be freed when the module is unloaded because the kvmppc_host_rm_ops_hv can contain function pointers to kvm-hv.ko functions for the different supported host operations. Signed-off-by: Suresh Warrier Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_ppc.h | 31 ++++++++++++++++ arch/powerpc/kvm/book3s_hv.c | 70 ++++++++++++++++++++++++++++++++++++ arch/powerpc/kvm/book3s_hv_builtin.c | 3 ++ 3 files changed, 104 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 4cadee590deb..ded8ddac7dcf 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -453,6 +453,8 @@ static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu) { return vcpu->arch.irq_type == KVMPPC_IRQ_XICS; } +extern void kvmppc_alloc_host_rm_ops(void); +extern void kvmppc_free_host_rm_ops(void); extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu); extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server); extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args); @@ -462,6 +464,8 @@ extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval); extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, u32 cpu); #else +static inline void kvmppc_alloc_host_rm_ops(void) {}; +static inline void kvmppc_free_host_rm_ops(void) {}; static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu) { return 0; } static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { } @@ -475,6 +479,33 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) { return 0; } #endif +/* + * Host-side operations we want to set up while running in real + * mode in the guest operating on the xics. + * Currently only VCPU wakeup is supported. + */ + +union kvmppc_rm_state { + unsigned long raw; + struct { + u32 in_host; + u32 rm_action; + }; +}; + +struct kvmppc_host_rm_core { + union kvmppc_rm_state rm_state; + void *rm_data; + char pad[112]; +}; + +struct kvmppc_host_rm_ops { + struct kvmppc_host_rm_core *rm_core; + void (*vcpu_kick)(struct kvm_vcpu *vcpu); +}; + +extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; + static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu) { #ifdef CONFIG_KVM_BOOKE_HV diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 33b491e6f666..8b3332fb9ed2 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3008,6 +3008,73 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) goto out_srcu; } +#ifdef CONFIG_KVM_XICS +/* + * Allocate a per-core structure for managing state about which cores are + * running in the host versus the guest and for exchanging data between + * real mode KVM and CPU running in the host. + * This is only done for the first VM. + * The allocated structure stays even if all VMs have stopped. + * It is only freed when the kvm-hv module is unloaded. + * It's OK for this routine to fail, we just don't support host + * core operations like redirecting H_IPI wakeups. + */ +void kvmppc_alloc_host_rm_ops(void) +{ + struct kvmppc_host_rm_ops *ops; + unsigned long l_ops; + int cpu, core; + int size; + + /* Not the first time here ? */ + if (kvmppc_host_rm_ops_hv != NULL) + return; + + ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL); + if (!ops) + return; + + size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core); + ops->rm_core = kzalloc(size, GFP_KERNEL); + + if (!ops->rm_core) { + kfree(ops); + return; + } + + for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) { + if (!cpu_online(cpu)) + continue; + + core = cpu >> threads_shift; + ops->rm_core[core].rm_state.in_host = 1; + } + + /* + * Make the contents of the kvmppc_host_rm_ops structure visible + * to other CPUs before we assign it to the global variable. + * Do an atomic assignment (no locks used here), but if someone + * beats us to it, just free our copy and return. + */ + smp_wmb(); + l_ops = (unsigned long) ops; + + if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) { + kfree(ops->rm_core); + kfree(ops); + } +} + +void kvmppc_free_host_rm_ops(void) +{ + if (kvmppc_host_rm_ops_hv) { + kfree(kvmppc_host_rm_ops_hv->rm_core); + kfree(kvmppc_host_rm_ops_hv); + kvmppc_host_rm_ops_hv = NULL; + } +} +#endif + static int kvmppc_core_init_vm_hv(struct kvm *kvm) { unsigned long lpcr, lpid; @@ -3020,6 +3087,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) return -ENOMEM; kvm->arch.lpid = lpid; + kvmppc_alloc_host_rm_ops(); + /* * Since we don't flush the TLB when tearing down a VM, * and this lpid might have previously been used, @@ -3253,6 +3322,7 @@ static int kvmppc_book3s_init_hv(void) static void kvmppc_book3s_exit_hv(void) { + kvmppc_free_host_rm_ops(); kvmppc_hv_ops = NULL; } diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index fd7006bf6b1a..5f0380db3eab 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -283,3 +283,6 @@ void kvmhv_commence_exit(int trap) kvmhv_interrupt_vcore(vc, ee); } } + +struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; +EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); -- cgit v1.2.3-58-ga151 From b8e6a87c82927ed9ccf0f3ee42946a41cb9d75fe Mon Sep 17 00:00:00 2001 From: Suresh Warrier Date: Thu, 17 Dec 2015 14:59:07 -0600 Subject: KVM: PPC: Book3S HV: Manage core host state Update the core host state in kvmppc_host_rm_ops whenever the primary thread of the core enters the guest or returns back. Signed-off-by: Suresh Warrier Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8b3332fb9ed2..542ec97129b4 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2302,6 +2302,46 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) spin_unlock(&vc->lock); } +/* + * Clear core from the list of active host cores as we are about to + * enter the guest. Only do this if it is the primary thread of the + * core (not if a subcore) that is entering the guest. + */ +static inline void kvmppc_clear_host_core(int cpu) +{ + int core; + + if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu)) + return; + /* + * Memory barrier can be omitted here as we will do a smp_wmb() + * later in kvmppc_start_thread and we need ensure that state is + * visible to other CPUs only after we enter guest. + */ + core = cpu >> threads_shift; + kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0; +} + +/* + * Advertise this core as an active host core since we exited the guest + * Only need to do this if it is the primary thread of the core that is + * exiting. + */ +static inline void kvmppc_set_host_core(int cpu) +{ + int core; + + if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu)) + return; + + /* + * Memory barrier can be omitted here because we do a spin_unlock + * immediately after this which provides the memory barrier. + */ + core = cpu >> threads_shift; + kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1; +} + /* * Run a set of guest threads on a physical core. * Called with vc->lock held. @@ -2414,6 +2454,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) } } + kvmppc_clear_host_core(pcpu); + /* Start all the threads */ active = 0; for (sub = 0; sub < core_info.n_subcores; ++sub) { @@ -2510,6 +2552,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) kvmppc_ipi_thread(pcpu + i); } + kvmppc_set_host_core(pcpu); + spin_unlock(&vc->lock); /* make sure updates to secondary vcpu structs are visible now */ -- cgit v1.2.3-58-ga151 From 6f3bb80944148012cbac1f98da249f591cbcae43 Mon Sep 17 00:00:00 2001 From: Suresh Warrier Date: Thu, 17 Dec 2015 14:59:08 -0600 Subject: KVM: PPC: Book3S HV: kvmppc_host_rm_ops - handle offlining CPUs The kvmppc_host_rm_ops structure keeps track of which cores are are in the host by maintaining a bitmask of active/runnable online CPUs that have not entered the guest. This patch adds support to manage the bitmask when a CPU is offlined or onlined in the host. Signed-off-by: Suresh Warrier Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 542ec97129b4..16304d2c0cb7 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3053,6 +3053,36 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) } #ifdef CONFIG_KVM_XICS +static int kvmppc_cpu_notify(struct notifier_block *self, unsigned long action, + void *hcpu) +{ + unsigned long cpu = (long)hcpu; + + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + kvmppc_set_host_core(cpu); + break; + +#ifdef CONFIG_HOTPLUG_CPU + case CPU_DEAD: + case CPU_DEAD_FROZEN: + case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: + kvmppc_clear_host_core(cpu); + break; +#endif + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block kvmppc_cpu_notifier = { + .notifier_call = kvmppc_cpu_notify, +}; + /* * Allocate a per-core structure for managing state about which cores are * running in the host versus the guest and for exchanging data between @@ -3086,6 +3116,8 @@ void kvmppc_alloc_host_rm_ops(void) return; } + get_online_cpus(); + for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) { if (!cpu_online(cpu)) continue; @@ -3104,14 +3136,21 @@ void kvmppc_alloc_host_rm_ops(void) l_ops = (unsigned long) ops; if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) { + put_online_cpus(); kfree(ops->rm_core); kfree(ops); + return; } + + register_cpu_notifier(&kvmppc_cpu_notifier); + + put_online_cpus(); } void kvmppc_free_host_rm_ops(void) { if (kvmppc_host_rm_ops_hv) { + unregister_cpu_notifier(&kvmppc_cpu_notifier); kfree(kvmppc_host_rm_ops_hv->rm_core); kfree(kvmppc_host_rm_ops_hv); kvmppc_host_rm_ops_hv = NULL; -- cgit v1.2.3-58-ga151 From 0c2a66062470cd1f6d11ae6db31059f59d3f725f Mon Sep 17 00:00:00 2001 From: Suresh Warrier Date: Thu, 17 Dec 2015 14:59:09 -0600 Subject: KVM: PPC: Book3S HV: Host side kick VCPU when poked by real-mode KVM This patch adds the support for the kick VCPU operation for kvmppc_host_rm_ops. The kvmppc_xics_ipi_action() function provides the function to be invoked for a host side operation when poked by the real mode KVM. This is initiated by KVM by sending an IPI to any free host core. KVM real mode must set the rm_action to XICS_RM_KICK_VCPU and rm_data to point to the VCPU to be woken up before sending the IPI. Note that we have allocated one kvmppc_host_rm_core structure per core. The above values need to be set in the structure corresponding to the core to which the IPI will be sent. Signed-off-by: Suresh Warrier Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_ppc.h | 1 + arch/powerpc/kvm/book3s_hv.c | 2 ++ arch/powerpc/kvm/book3s_hv_rm_xics.c | 36 ++++++++++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index ded8ddac7dcf..bc14e9e0e4fe 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -463,6 +463,7 @@ extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu); extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval); extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, u32 cpu); +extern void kvmppc_xics_ipi_action(void); #else static inline void kvmppc_alloc_host_rm_ops(void) {}; static inline void kvmppc_free_host_rm_ops(void) {}; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 16304d2c0cb7..c3c731085c1f 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3126,6 +3126,8 @@ void kvmppc_alloc_host_rm_ops(void) ops->rm_core[core].rm_state.in_host = 1; } + ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv; + /* * Make the contents of the kvmppc_host_rm_ops structure visible * to other CPUs before we assign it to the global variable. diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 24f58076d49e..43ffbfe2a18a 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "book3s_xics.h" @@ -623,3 +624,38 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) bail: return check_too_hard(xics, icp); } + +/* --- Non-real mode XICS-related built-in routines --- */ + +/** + * Host Operations poked by RM KVM + */ +static void rm_host_ipi_action(int action, void *data) +{ + switch (action) { + case XICS_RM_KICK_VCPU: + kvmppc_host_rm_ops_hv->vcpu_kick(data); + break; + default: + WARN(1, "Unexpected rm_action=%d data=%p\n", action, data); + break; + } + +} + +void kvmppc_xics_ipi_action(void) +{ + int core; + unsigned int cpu = smp_processor_id(); + struct kvmppc_host_rm_core *rm_corep; + + core = cpu >> threads_shift; + rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core]; + + if (rm_corep->rm_data) { + rm_host_ipi_action(rm_corep->rm_state.rm_action, + rm_corep->rm_data); + rm_corep->rm_data = NULL; + rm_corep->rm_state.rm_action = 0; + } +} -- cgit v1.2.3-58-ga151 From e17769eb8c897101e2c6df62ec397e450b6e53b4 Mon Sep 17 00:00:00 2001 From: "Suresh E. Warrier" Date: Mon, 21 Dec 2015 16:22:51 -0600 Subject: KVM: PPC: Book3S HV: Send IPI to host core to wake VCPU This patch adds support to real-mode KVM to search for a core running in the host partition and send it an IPI message with VCPU to be woken. This avoids having to switch to the host partition to complete an H_IPI hypercall when the VCPU which is the target of the the H_IPI is not loaded (is not running in the guest). The patch also includes the support in the IPI handler running in the host to do the wakeup by calling kvmppc_xics_ipi_action for the PPC_MSG_RM_HOST_ACTION message. When a guest is being destroyed, we need to ensure that there are no pending IPIs waiting to wake up a VCPU before we free the VCPUs of the guest. This is accomplished by: - Forces a PPC_MSG_CALL_FUNCTION IPI to be completed by all CPUs before freeing any VCPUs in kvm_arch_destroy_vm(). - Any PPC_MSG_RM_HOST_ACTION messages must be executed first before any other PPC_MSG_CALL_FUNCTION messages. Signed-off-by: Suresh Warrier Acked-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/smp.c | 11 +++++ arch/powerpc/kvm/book3s_hv_rm_xics.c | 92 ++++++++++++++++++++++++++++++++++-- arch/powerpc/kvm/powerpc.c | 10 ++++ 3 files changed, 110 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index e222efcf6aef..cb8be5dc118a 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -257,6 +257,17 @@ irqreturn_t smp_ipi_demux(void) do { all = xchg(&info->messages, 0); +#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) + /* + * Must check for PPC_MSG_RM_HOST_ACTION messages + * before PPC_MSG_CALL_FUNCTION messages because when + * a VM is destroyed, we call kick_all_cpus_sync() + * to ensure that any pending PPC_MSG_RM_HOST_ACTION + * messages have completed before we free any VCPUs. + */ + if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION)) + kvmppc_xics_ipi_action(); +#endif if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) generic_smp_call_function_interrupt(); if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 43ffbfe2a18a..e673fb9fee98 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -51,11 +51,84 @@ static void ics_rm_check_resend(struct kvmppc_xics *xics, /* -- ICP routines -- */ +#ifdef CONFIG_SMP +static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) +{ + int hcpu; + + hcpu = hcore << threads_shift; + kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu; + smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION); + icp_native_cause_ipi_rm(hcpu); +} +#else +static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { } +#endif + +/* + * We start the search from our current CPU Id in the core map + * and go in a circle until we get back to our ID looking for a + * core that is running in host context and that hasn't already + * been targeted for another rm_host_ops. + * + * In the future, could consider using a fairer algorithm (one + * that distributes the IPIs better) + * + * Returns -1, if no CPU could be found in the host + * Else, returns a CPU Id which has been reserved for use + */ +static inline int grab_next_hostcore(int start, + struct kvmppc_host_rm_core *rm_core, int max, int action) +{ + bool success; + int core; + union kvmppc_rm_state old, new; + + for (core = start + 1; core < max; core++) { + old = new = READ_ONCE(rm_core[core].rm_state); + + if (!old.in_host || old.rm_action) + continue; + + /* Try to grab this host core if not taken already. */ + new.rm_action = action; + + success = cmpxchg64(&rm_core[core].rm_state.raw, + old.raw, new.raw) == old.raw; + if (success) { + /* + * Make sure that the store to the rm_action is made + * visible before we return to caller (and the + * subsequent store to rm_data) to synchronize with + * the IPI handler. + */ + smp_wmb(); + return core; + } + } + + return -1; +} + +static inline int find_available_hostcore(int action) +{ + int core; + int my_core = smp_processor_id() >> threads_shift; + struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core; + + core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action); + if (core == -1) + core = grab_next_hostcore(core, rm_core, my_core, action); + + return core; +} + static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, struct kvm_vcpu *this_vcpu) { struct kvmppc_icp *this_icp = this_vcpu->arch.icp; int cpu; + int hcore; /* Mark the target VCPU as having an interrupt pending */ vcpu->stat.queue_intr++; @@ -67,11 +140,22 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, return; } - /* Check if the core is loaded, if not, too hard */ + /* + * Check if the core is loaded, + * if not, find an available host core to post to wake the VCPU, + * if we can't find one, set up state to eventually return too hard. + */ cpu = vcpu->arch.thread_cpu; if (cpu < 0 || cpu >= nr_cpu_ids) { - this_icp->rm_action |= XICS_RM_KICK_VCPU; - this_icp->rm_kick_target = vcpu; + hcore = -1; + if (kvmppc_host_rm_ops_hv) + hcore = find_available_hostcore(XICS_RM_KICK_VCPU); + if (hcore != -1) { + icp_send_hcore_msg(hcore, vcpu); + } else { + this_icp->rm_action |= XICS_RM_KICK_VCPU; + this_icp->rm_kick_target = vcpu; + } return; } @@ -655,7 +739,9 @@ void kvmppc_xics_ipi_action(void) if (rm_corep->rm_data) { rm_host_ipi_action(rm_corep->rm_state.rm_action, rm_corep->rm_data); + /* Order these stores against the real mode KVM */ rm_corep->rm_data = NULL; + smp_wmb(); rm_corep->rm_state.rm_action = 0; } } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 69f897da782d..9258675e2ff7 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -437,6 +437,16 @@ void kvm_arch_destroy_vm(struct kvm *kvm) unsigned int i; struct kvm_vcpu *vcpu; +#ifdef CONFIG_KVM_XICS + /* + * We call kick_all_cpus_sync() to ensure that all + * CPUs have executed any pending IPIs before we + * continue and free VCPUs structures below. + */ + if (is_kvmppc_hv_enabled(kvm)) + kick_all_cpus_sync(); +#endif + kvm_for_each_vcpu(i, vcpu, kvm) kvm_arch_vcpu_free(vcpu); -- cgit v1.2.3-58-ga151 From 520fe9c607d3acea96391aad27e17518bd7d39bd Mon Sep 17 00:00:00 2001 From: "Suresh E. Warrier" Date: Mon, 21 Dec 2015 16:33:57 -0600 Subject: KVM: PPC: Book3S HV: Add tunable to control H_IPI redirection Redirecting the wakeup of a VCPU from the H_IPI hypercall to a core running in the host is usually a good idea, most workloads seemed to benefit. However, in one heavily interrupt-driven SMT1 workload, some regression was observed. This patch adds a kvm_hv module parameter called h_ipi_redirect to control this feature. The default value for this tunable is 1 - that is enable the feature. Signed-off-by: Suresh Warrier Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_ppc.h | 1 + arch/powerpc/kvm/book3s_hv.c | 11 +++++++++++ arch/powerpc/kvm/book3s_hv_rm_xics.c | 5 ++++- 3 files changed, 16 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index bc14e9e0e4fe..197a8aca2871 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -464,6 +464,7 @@ extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval); extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, u32 cpu); extern void kvmppc_xics_ipi_action(void); +extern int h_ipi_redirect; #else static inline void kvmppc_alloc_host_rm_ops(void) {}; static inline void kvmppc_free_host_rm_ops(void) {}; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index c3c731085c1f..f47fffefadc1 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -81,6 +81,17 @@ static int target_smt_mode; module_param(target_smt_mode, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)"); +#ifdef CONFIG_KVM_XICS +static struct kernel_param_ops module_param_ops = { + .set = param_set_int, + .get = param_get_int, +}; + +module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); +#endif + static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index e673fb9fee98..980d8a6f7284 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -24,6 +24,9 @@ #define DEBUG_PASSUP +int h_ipi_redirect = 1; +EXPORT_SYMBOL(h_ipi_redirect); + static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, u32 new_irq); @@ -148,7 +151,7 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, cpu = vcpu->arch.thread_cpu; if (cpu < 0 || cpu >= nr_cpu_ids) { hcore = -1; - if (kvmppc_host_rm_ops_hv) + if (kvmppc_host_rm_ops_hv && h_ipi_redirect) hcore = find_available_hostcore(XICS_RM_KICK_VCPU); if (hcore != -1) { icp_send_hcore_msg(hcore, vcpu); -- cgit v1.2.3-58-ga151 From 35a2491a624af1fa7ab6990639f5246cd5f12592 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 1 Feb 2016 17:54:35 +0000 Subject: arm/arm64: KVM: Add hook for C-based stage2 init As we're about to move the stage2 init to C code, introduce some C hooks that will later be populated with arch-specific implementations. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 4 ++++ arch/arm/kvm/arm.c | 1 + arch/arm64/include/asm/kvm_host.h | 4 ++++ 3 files changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index f9f27792d8ed..f1e86f1eb2e5 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -220,6 +220,10 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); } +static inline void __cpu_init_stage2(void) +{ +} + static inline int kvm_arch_dev_ioctl_check_extension(long ext) { return 0; diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index dda1959f0dde..6b76e0152e58 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -985,6 +985,7 @@ static void cpu_init_hyp_mode(void *dummy) vector_ptr = (unsigned long)__kvm_hyp_vector; __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); + __cpu_init_stage2(); kvm_arm_init_debug(); } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 689d4c95e12f..fe86cf9f288b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -332,6 +332,10 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, hyp_stack_ptr, vector_ptr); } +static inline void __cpu_init_stage2(void) +{ +} + static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} -- cgit v1.2.3-58-ga151 From 1a61ae7af4d65ee311a737d550da6cf92a3aea4c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 2 Jan 2016 13:57:18 +0000 Subject: ARM: KVM: Move the HYP code to its own section In order to be able to spread the HYP code into multiple compilation units, adopt a layout similar to that of arm64: - the HYP text is emited in its own section (.hyp.text) - two linker generated symbols are use to identify the boundaries of that section No functionnal change. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 6 ++++-- arch/arm/include/asm/virt.h | 4 ++++ arch/arm/kernel/vmlinux.lds.S | 6 ++++++ arch/arm/kvm/interrupts.S | 13 +++++-------- 4 files changed, 19 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 194c91b610ff..fa2fd253974f 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -19,6 +19,8 @@ #ifndef __ARM_KVM_ASM_H__ #define __ARM_KVM_ASM_H__ +#include + /* 0 is reserved as an invalid value. */ #define c0_MPIDR 1 /* MultiProcessor ID Register */ #define c0_CSSELR 2 /* Cache Size Selection Register */ @@ -91,8 +93,8 @@ extern char __kvm_hyp_exit_end[]; extern char __kvm_hyp_vector[]; -extern char __kvm_hyp_code_start[]; -extern char __kvm_hyp_code_end[]; +#define __kvm_hyp_code_start __hyp_text_start +#define __kvm_hyp_code_end __hyp_text_end extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 4371f45c5784..5fdbfea6defb 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -74,6 +74,10 @@ static inline bool is_hyp_mode_mismatched(void) { return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); } + +/* The section containing the hypervisor text */ +extern char __hyp_text_start[]; +extern char __hyp_text_end[]; #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 8b60fde5ce48..b4139cbbbdd9 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -18,6 +18,11 @@ *(.proc.info.init) \ VMLINUX_SYMBOL(__proc_info_end) = .; +#define HYPERVISOR_TEXT \ + VMLINUX_SYMBOL(__hyp_text_start) = .; \ + *(.hyp.text) \ + VMLINUX_SYMBOL(__hyp_text_end) = .; + #define IDMAP_TEXT \ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__idmap_text_start) = .; \ @@ -108,6 +113,7 @@ SECTIONS TEXT_TEXT SCHED_TEXT LOCK_TEXT + HYPERVISOR_TEXT KPROBES_TEXT *(.gnu.warning) *(.glue_7) diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 900ef6dd8f72..9d9cb71df449 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -28,9 +28,7 @@ #include "interrupts_head.S" .text - -__kvm_hyp_code_start: - .globl __kvm_hyp_code_start + .pushsection .hyp.text, "ax" /******************************************************************** * Flush per-VMID TLBs @@ -314,8 +312,6 @@ THUMB( orr r2, r2, #PSR_T_BIT ) eret .endm - .text - .align 5 __kvm_hyp_vector: .globl __kvm_hyp_vector @@ -511,10 +507,9 @@ hyp_fiq: .ltorg -__kvm_hyp_code_end: - .globl __kvm_hyp_code_end + .popsection - .section ".rodata" + .pushsection ".rodata" und_die_str: .ascii "unexpected undefined exception in Hyp mode at: %#08x\n" @@ -524,3 +519,5 @@ dabt_die_str: .ascii "unexpected data abort in Hyp mode at: %#08x\n" svc_die_str: .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n" + + .popsection -- cgit v1.2.3-58-ga151 From 42428525a9eefea9dda68de684381ce9f3dc4266 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 2 Jan 2016 14:04:48 +0000 Subject: ARM: KVM: Remove __kvm_hyp_code_start/__kvm_hyp_code_end Now that we've unified the way we refer to the HYP text between arm and arm64, drop __kvm_hyp_code_start/end, and just use the __hyp_text_start/end symbols. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 3 --- arch/arm/kvm/arm.c | 2 +- arch/arm64/include/asm/kvm_asm.h | 3 --- 3 files changed, 1 insertion(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index fa2fd253974f..4841225d10ea 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -93,9 +93,6 @@ extern char __kvm_hyp_exit_end[]; extern char __kvm_hyp_vector[]; -#define __kvm_hyp_code_start __hyp_text_start -#define __kvm_hyp_code_end __hyp_text_end - extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 6b76e0152e58..fcf6c130c986 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -1075,7 +1075,7 @@ static int init_hyp_mode(void) /* * Map the Hyp-code called directly from the host */ - err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); + err = create_hyp_mappings(__hyp_text_start, __hyp_text_end); if (err) { kvm_err("Cannot map world-switch code\n"); goto out_free_mappings; diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 52b777b7d407..2ad8930e7eb3 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -35,9 +35,6 @@ extern char __kvm_hyp_init_end[]; extern char __kvm_hyp_vector[]; -#define __kvm_hyp_code_start __hyp_text_start -#define __kvm_hyp_code_end __hyp_text_end - extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); -- cgit v1.2.3-58-ga151 From 0ca5565df8ef7534c0d85ec87e6c74f8ebe86e88 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 3 Jan 2016 11:01:49 +0000 Subject: ARM: KVM: Move VFP registers to a CPU context structure In order to turn the WS code into something that looks a bit more like the arm64 version, move the VFP registers into a CPU context container for both the host and the guest. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 11 +++++++---- arch/arm/kernel/asm-offsets.c | 5 +++-- arch/arm/kvm/coproc.c | 20 ++++++++++---------- arch/arm/kvm/interrupts.S | 10 ++++++---- 4 files changed, 26 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index f1e86f1eb2e5..b64ac8e4adaa 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -88,9 +88,15 @@ struct kvm_vcpu_fault_info { u32 hyp_pc; /* PC when exception was taken from Hyp mode */ }; -typedef struct vfp_hard_struct kvm_cpu_context_t; +struct kvm_cpu_context { + struct vfp_hard_struct vfp; +}; + +typedef struct kvm_cpu_context kvm_cpu_context_t; struct kvm_vcpu_arch { + struct kvm_cpu_context ctxt; + struct kvm_regs regs; int target; /* Processor target */ @@ -111,9 +117,6 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; - /* Floating point registers (VFP and Advanced SIMD/NEON) */ - struct vfp_hard_struct vfp_guest; - /* Host FP context */ kvm_cpu_context_t *host_cpu_context; diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 871b8267d211..346bfca29720 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -173,8 +173,9 @@ int main(void) DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); - DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); - DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.host_cpu_context)); + DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); + DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); + DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index f3d88dc388bc..1a643f38031d 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -901,7 +901,7 @@ static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) if (vfpid < num_fp_regs()) { if (KVM_REG_SIZE(id) != 8) return -ENOENT; - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid], + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid], id); } @@ -911,13 +911,13 @@ static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) switch (vfpid) { case KVM_REG_ARM_VFP_FPEXC: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id); case KVM_REG_ARM_VFP_FPSCR: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id); case KVM_REG_ARM_VFP_FPINST: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id); case KVM_REG_ARM_VFP_FPINST2: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id); case KVM_REG_ARM_VFP_MVFR0: val = fmrx(MVFR0); return reg_to_user(uaddr, &val, id); @@ -945,7 +945,7 @@ static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) if (vfpid < num_fp_regs()) { if (KVM_REG_SIZE(id) != 8) return -ENOENT; - return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid], + return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid], uaddr, id); } @@ -955,13 +955,13 @@ static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) switch (vfpid) { case KVM_REG_ARM_VFP_FPEXC: - return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id); case KVM_REG_ARM_VFP_FPSCR: - return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id); case KVM_REG_ARM_VFP_FPINST: - return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id); case KVM_REG_ARM_VFP_FPINST2: - return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id); /* These are invariant. */ case KVM_REG_ARM_VFP_MVFR0: if (reg_from_user(&val, uaddr, id)) diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 9d9cb71df449..7bfb28936914 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -172,10 +172,11 @@ __kvm_vcpu_return: #ifdef CONFIG_VFPv3 @ Switch VFP/NEON hardware state to the host's - add r7, vcpu, #VCPU_VFP_GUEST + add r7, vcpu, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) store_vfp_state r7 - add r7, vcpu, #VCPU_VFP_HOST + add r7, vcpu, #VCPU_HOST_CTXT ldr r7, [r7] + add r7, r7, #CPU_CTXT_VFP restore_vfp_state r7 after_vfp_restore: @@ -482,10 +483,11 @@ switch_to_guest_vfp: set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11)) @ Switch VFP/NEON hardware state to the guest's - add r7, r0, #VCPU_VFP_HOST + add r7, r0, #VCPU_HOST_CTXT ldr r7, [r7] + add r7, r7, #CPU_CTXT_VFP store_vfp_state r7 - add r7, r0, #VCPU_VFP_GUEST + add r7, r0, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) restore_vfp_state r7 pop {r3-r7} -- cgit v1.2.3-58-ga151 From fb32a52a1d4487f3ac5b7ccb659d0beb11ec504f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 3 Jan 2016 11:26:01 +0000 Subject: ARM: KVM: Move CP15 array into the CPU context structure Continuing our rework of the CPU context, we now move the CP15 array into the CPU context structure. As this causes quite a bit of churn, we introduce the vcpu_cp15() macro that abstract the location of the actual array. This will probably help next time we have to revisit that code. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 2 +- arch/arm/include/asm/kvm_host.h | 6 +++--- arch/arm/include/asm/kvm_mmu.h | 2 +- arch/arm/kernel/asm-offsets.c | 2 +- arch/arm/kvm/coproc.c | 32 ++++++++++++++++---------------- arch/arm/kvm/coproc.h | 16 ++++++++-------- arch/arm/kvm/emulate.c | 22 +++++++++++----------- arch/arm/kvm/interrupts_head.S | 3 ++- 8 files changed, 43 insertions(+), 42 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 3095df091ff8..32bb52a489d0 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -192,7 +192,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) { - return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK; + return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK; } static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index b64ac8e4adaa..4203701cc7f4 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -90,6 +90,7 @@ struct kvm_vcpu_fault_info { struct kvm_cpu_context { struct vfp_hard_struct vfp; + u32 cp15[NR_CP15_REGS]; }; typedef struct kvm_cpu_context kvm_cpu_context_t; @@ -102,9 +103,6 @@ struct kvm_vcpu_arch { int target; /* Processor target */ DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); - /* System control coprocessor (cp15) */ - u32 cp15[NR_CP15_REGS]; - /* The CPU type we expose to the VM */ u32 midr; @@ -161,6 +159,8 @@ struct kvm_vcpu_stat { u64 exits; }; +#define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r] + int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index a520b7987a29..da44be9db4fa 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -179,7 +179,7 @@ struct kvm; static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) { - return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; + return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101; } static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 346bfca29720..43f8b01072c1 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -172,10 +172,10 @@ int main(void) #ifdef CONFIG_KVM_ARM_HOST DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); - DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); + DEFINE(CPU_CTXT_CP15, offsetof(struct kvm_cpu_context, cp15)); DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 1a643f38031d..e3e86c4cfed2 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -54,8 +54,8 @@ static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu, const struct coproc_reg *r, u64 val) { - vcpu->arch.cp15[r->reg] = val & 0xffffffff; - vcpu->arch.cp15[r->reg + 1] = val >> 32; + vcpu_cp15(vcpu, r->reg) = val & 0xffffffff; + vcpu_cp15(vcpu, r->reg + 1) = val >> 32; } static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, @@ -63,9 +63,9 @@ static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, { u64 val; - val = vcpu->arch.cp15[r->reg + 1]; + val = vcpu_cp15(vcpu, r->reg + 1); val = val << 32; - val = val | vcpu->arch.cp15[r->reg]; + val = val | vcpu_cp15(vcpu, r->reg); return val; } @@ -104,7 +104,7 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) * vcpu_id, but we read the 'U' bit from the underlying * hardware directly. */ - vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | + vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | (vcpu->vcpu_id & 3)); } @@ -117,7 +117,7 @@ static bool access_actlr(struct kvm_vcpu *vcpu, if (p->is_write) return ignore_write(vcpu, p); - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; + *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR); return true; } @@ -139,7 +139,7 @@ static bool access_l2ctlr(struct kvm_vcpu *vcpu, if (p->is_write) return ignore_write(vcpu, p); - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; + *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR); return true; } @@ -156,7 +156,7 @@ static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) ncores = min(ncores, 3U); l2ctlr |= (ncores & 3) << 24; - vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; + vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr; } static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) @@ -171,7 +171,7 @@ static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) else actlr &= ~(1U << 6); - vcpu->arch.cp15[c1_ACTLR] = actlr; + vcpu_cp15(vcpu, c1_ACTLR) = actlr; } /* @@ -218,9 +218,9 @@ bool access_vm_reg(struct kvm_vcpu *vcpu, BUG_ON(!p->is_write); - vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); + vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1); if (p->is_64bit) - vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); + vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2); kvm_toggle_cache(vcpu, was_enabled); return true; @@ -1030,7 +1030,7 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) val = vcpu_cp15_reg64_get(vcpu, r); ret = reg_to_user(uaddr, &val, reg->id); } else if (KVM_REG_SIZE(reg->id) == 4) { - ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); + ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id); } return ret; @@ -1060,7 +1060,7 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if (!ret) vcpu_cp15_reg64_set(vcpu, r, val); } else if (KVM_REG_SIZE(reg->id) == 4) { - ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); + ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id); } return ret; @@ -1248,7 +1248,7 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) const struct coproc_reg *table; /* Catch someone adding a register without putting in reset entry. */ - memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); + memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15)); /* Generic chip reset first (so target could override). */ reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); @@ -1257,6 +1257,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) reset_coproc_regs(vcpu, table, num); for (num = 1; num < NR_CP15_REGS; num++) - if (vcpu->arch.cp15[num] == 0x42424242) - panic("Didn't reset vcpu->arch.cp15[%zi]", num); + if (vcpu_cp15(vcpu, num) == 0x42424242) + panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); } diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 88d24a3a9778..27351323871d 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h @@ -47,7 +47,7 @@ struct coproc_reg { /* Initialization for vcpu. */ void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); - /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */ + /* Index into vcpu_cp15(vcpu, ...), or 0 if we don't need to save it. */ unsigned long reg; /* Value (usually reset value) */ @@ -104,25 +104,25 @@ static inline void reset_unknown(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); - vcpu->arch.cp15[r->reg] = 0xdecafbad; + BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); + vcpu_cp15(vcpu, r->reg) = 0xdecafbad; } static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); - vcpu->arch.cp15[r->reg] = r->val; + BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); + vcpu_cp15(vcpu, r->reg) = r->val; } static inline void reset_unknown64(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { BUG_ON(!r->reg); - BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15)); + BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); - vcpu->arch.cp15[r->reg] = 0xdecafbad; - vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee; + vcpu_cp15(vcpu, r->reg) = 0xdecafbad; + vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee; } static inline int cmp_reg(const struct coproc_reg *i1, diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index dc99159857b4..ee161b1c66da 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -266,8 +266,8 @@ void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) static u32 exc_vector_base(struct kvm_vcpu *vcpu) { - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; - u32 vbar = vcpu->arch.cp15[c12_VBAR]; + u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); + u32 vbar = vcpu_cp15(vcpu, c12_VBAR); if (sctlr & SCTLR_V) return 0xffff0000; @@ -282,7 +282,7 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu) static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode) { unsigned long cpsr = *vcpu_cpsr(vcpu); - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; + u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode; @@ -357,22 +357,22 @@ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) if (is_pabt) { /* Set IFAR and IFSR */ - vcpu->arch.cp15[c6_IFAR] = addr; - is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); + vcpu_cp15(vcpu, c6_IFAR) = addr; + is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); /* Always give debug fault for now - should give guest a clue */ if (is_lpae) - vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22; + vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22; else - vcpu->arch.cp15[c5_IFSR] = 2; + vcpu_cp15(vcpu, c5_IFSR) = 2; } else { /* !iabt */ /* Set DFAR and DFSR */ - vcpu->arch.cp15[c6_DFAR] = addr; - is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); + vcpu_cp15(vcpu, c6_DFAR) = addr; + is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); /* Always give debug fault for now - should give guest a clue */ if (is_lpae) - vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22; + vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22; else - vcpu->arch.cp15[c5_DFSR] = 2; + vcpu_cp15(vcpu, c5_DFSR) = 2; } } diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 51a59504bef4..b9d953158877 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S @@ -4,7 +4,8 @@ #define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) #define VCPU_USR_SP (VCPU_USR_REG(13)) #define VCPU_USR_LR (VCPU_USR_REG(14)) -#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4)) +#define VCPU_CP15_BASE (VCPU_GUEST_CTXT + CPU_CTXT_CP15) +#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15_BASE + (_cp15_reg_idx * 4)) /* * Many of these macros need to access the VCPU structure, which is always -- cgit v1.2.3-58-ga151 From c2a8dab507ca6f8990c12372052efc830f51dd3f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 3 Jan 2016 11:26:01 +0000 Subject: ARM: KVM: Move GP registers into the CPU context structure Continuing our rework of the CPU context, we now move the GP registers into the CPU context structure. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 8 ++++---- arch/arm/include/asm/kvm_host.h | 3 +-- arch/arm/kernel/asm-offsets.c | 18 +++++++++--------- arch/arm/kvm/emulate.c | 12 ++++++------ arch/arm/kvm/guest.c | 4 ++-- arch/arm/kvm/interrupts_head.S | 11 +++++++++++ arch/arm/kvm/reset.c | 2 +- 7 files changed, 34 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 32bb52a489d0..f710616ccadc 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -68,12 +68,12 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) { - return &vcpu->arch.regs.usr_regs.ARM_pc; + return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; } static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) { - return &vcpu->arch.regs.usr_regs.ARM_cpsr; + return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; } static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) @@ -83,13 +83,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) { - unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; + unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); } static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) { - unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; + unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; return cpsr_mode > USR_MODE;; } diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 4203701cc7f4..02932ba8a653 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -89,6 +89,7 @@ struct kvm_vcpu_fault_info { }; struct kvm_cpu_context { + struct kvm_regs gp_regs; struct vfp_hard_struct vfp; u32 cp15[NR_CP15_REGS]; }; @@ -98,8 +99,6 @@ typedef struct kvm_cpu_context kvm_cpu_context_t; struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; - struct kvm_regs regs; - int target; /* Processor target */ DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 43f8b01072c1..2f3e0b064066 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -176,15 +176,15 @@ int main(void) DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); DEFINE(CPU_CTXT_CP15, offsetof(struct kvm_cpu_context, cp15)); - DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); - DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); - DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); - DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs)); - DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs)); - DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs)); - DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); - DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); + DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); + DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs)); + DEFINE(GP_REGS_SVC, offsetof(struct kvm_regs, svc_regs)); + DEFINE(GP_REGS_ABT, offsetof(struct kvm_regs, abt_regs)); + DEFINE(GP_REGS_UND, offsetof(struct kvm_regs, und_regs)); + DEFINE(GP_REGS_IRQ, offsetof(struct kvm_regs, irq_regs)); + DEFINE(GP_REGS_FIQ, offsetof(struct kvm_regs, fiq_regs)); + DEFINE(GP_REGS_PC, offsetof(struct kvm_regs, usr_regs.ARM_pc)); + DEFINE(GP_REGS_CPSR, offsetof(struct kvm_regs, usr_regs.ARM_cpsr)); DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr)); DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index ee161b1c66da..a494def3f195 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -112,7 +112,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = { */ unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) { - unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs; + unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs; unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; switch (mode) { @@ -147,15 +147,15 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; switch (mode) { case SVC_MODE: - return &vcpu->arch.regs.KVM_ARM_SVC_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr; case ABT_MODE: - return &vcpu->arch.regs.KVM_ARM_ABT_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr; case UND_MODE: - return &vcpu->arch.regs.KVM_ARM_UND_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr; case IRQ_MODE: - return &vcpu->arch.regs.KVM_ARM_IRQ_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr; case FIQ_MODE: - return &vcpu->arch.regs.KVM_ARM_FIQ_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr; default: BUG(); } diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 5fa69d7bae58..86e26fbd5ba3 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -55,7 +55,7 @@ static u64 core_reg_offset_from_id(u64 id) static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.regs; + struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; u64 off; if (KVM_REG_SIZE(reg->id) != 4) @@ -72,7 +72,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.regs; + struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; u64 off, val; if (KVM_REG_SIZE(reg->id) != 4) diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index b9d953158877..e0943cb80ab3 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S @@ -1,6 +1,17 @@ #include #include +/* Compat macro, until we get rid of this file entierely */ +#define VCPU_GP_REGS (VCPU_GUEST_CTXT + CPU_CTXT_GP_REGS) +#define VCPU_USR_REGS (VCPU_GP_REGS + GP_REGS_USR) +#define VCPU_SVC_REGS (VCPU_GP_REGS + GP_REGS_SVC) +#define VCPU_ABT_REGS (VCPU_GP_REGS + GP_REGS_ABT) +#define VCPU_UND_REGS (VCPU_GP_REGS + GP_REGS_UND) +#define VCPU_IRQ_REGS (VCPU_GP_REGS + GP_REGS_IRQ) +#define VCPU_FIQ_REGS (VCPU_GP_REGS + GP_REGS_FIQ) +#define VCPU_PC (VCPU_GP_REGS + GP_REGS_PC) +#define VCPU_CPSR (VCPU_GP_REGS + GP_REGS_CPSR) + #define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) #define VCPU_USR_SP (VCPU_USR_REG(13)) #define VCPU_USR_LR (VCPU_USR_REG(14)) diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index eeb85858d6bb..0048b5a62a50 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c @@ -71,7 +71,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) } /* Reset core registers */ - memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs)); + memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs)); /* Reset CP15 registers */ kvm_reset_coprocs(vcpu); -- cgit v1.2.3-58-ga151 From 08dcbfda0774d5550447835f20a647b7e4c94481 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 21 Oct 2015 10:09:49 +0100 Subject: ARM: KVM: Add a HYP-specific header file In order to expose the various HYP services that are private to the hypervisor, add a new hyp.h file. So far, it only contains mundane things such as section annotation and VA manipulation. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/hyp.h | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 arch/arm/kvm/hyp/hyp.h (limited to 'arch') diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h new file mode 100644 index 000000000000..c72387073b09 --- /dev/null +++ b/arch/arm/kvm/hyp/hyp.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ARM_KVM_HYP_H__ +#define __ARM_KVM_HYP_H__ + +#include +#include +#include + +#define __hyp_text __section(.hyp.text) notrace + +#define kern_hyp_va(v) (v) +#define hyp_kern_va(v) (v) + +#endif /* __ARM_KVM_HYP_H__ */ -- cgit v1.2.3-58-ga151 From 3c29568768dfe6965ca51e1a78f9f31ebc0c500a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 2 Jan 2016 15:07:13 +0000 Subject: ARM: KVM: Add system register accessor macros In order to move system register (CP15, mostly) access to C code, add a few macros to facilitate this, and minimize the difference between 32 and 64bit CP15 registers. This will get heavily used in the following patches. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/hyp.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch') diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index c72387073b09..727089f0ddb6 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -27,4 +27,19 @@ #define kern_hyp_va(v) (v) #define hyp_kern_va(v) (v) +#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ + "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 +#define __ACCESS_CP15_64(Op1, CRm) \ + "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 + +#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) +#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) + +#define __read_sysreg(r, w, c, t) ({ \ + t __val; \ + asm volatile(r " " c : "=r" (__val)); \ + __val; \ +}) +#define read_sysreg(...) __read_sysreg(__VA_ARGS__) + #endif /* __ARM_KVM_HYP_H__ */ -- cgit v1.2.3-58-ga151 From 1d58d2cbf723704e070d560507787b9912b63839 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 2 Jan 2016 15:09:54 +0000 Subject: ARM: KVM: Add TLB invalidation code Convert the TLB invalidation code to C, hooking it into the build system whilst we're at it. Signed-off-by: Marc Zyngier --- arch/arm/kvm/Makefile | 1 + arch/arm/kvm/hyp/Makefile | 5 ++++ arch/arm/kvm/hyp/hyp.h | 5 ++++ arch/arm/kvm/hyp/tlb.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 81 insertions(+) create mode 100644 arch/arm/kvm/hyp/Makefile create mode 100644 arch/arm/kvm/hyp/tlb.c (limited to 'arch') diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index c5eef02c52ba..eb1bf4309c13 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile @@ -17,6 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) KVM := ../../../virt/kvm kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o +obj-$(CONFIG_KVM_ARM_HOST) += hyp/ obj-y += kvm-arm.o init.o interrupts.o obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile new file mode 100644 index 000000000000..36c760df2360 --- /dev/null +++ b/arch/arm/kvm/hyp/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for Kernel-based Virtual Machine module, HYP part +# + +obj-$(CONFIG_KVM_ARM_HOST) += tlb.o diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 727089f0ddb6..5808bbd38c5f 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -42,4 +42,9 @@ }) #define read_sysreg(...) __read_sysreg(__VA_ARGS__) +#define VTTBR __ACCESS_CP15_64(6, c2) +#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) +#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) +#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) + #endif /* __ARM_KVM_HYP_H__ */ diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c new file mode 100644 index 000000000000..aaa44bbac766 --- /dev/null +++ b/arch/arm/kvm/hyp/tlb.c @@ -0,0 +1,70 @@ +/* + * Original code: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + * + * Mostly rewritten in C by Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "hyp.h" + +/** + * Flush per-VMID TLBs + * + * __kvm_tlb_flush_vmid(struct kvm *kvm); + * + * We rely on the hardware to broadcast the TLB invalidation to all CPUs + * inside the inner-shareable domain (which is the case for all v7 + * implementations). If we come across a non-IS SMP implementation, we'll + * have to use an IPI based mechanism. Until then, we stick to the simple + * hardware assisted version. + * + * As v7 does not support flushing per IPA, just nuke the whole TLB + * instead, ignoring the ipa value. + */ +static void __hyp_text __tlb_flush_vmid(struct kvm *kvm) +{ + dsb(ishst); + + /* Switch to requested VMID */ + kvm = kern_hyp_va(kvm); + write_sysreg(kvm->arch.vttbr, VTTBR); + isb(); + + write_sysreg(0, TLBIALLIS); + dsb(ish); + isb(); + + write_sysreg(0, VTTBR); +} + +__alias(__tlb_flush_vmid) void __weak __kvm_tlb_flush_vmid(struct kvm *kvm); + +static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) +{ + __tlb_flush_vmid(kvm); +} + +__alias(__tlb_flush_vmid_ipa) void __weak __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, + phys_addr_t ipa); + +static void __hyp_text __tlb_flush_vm_context(void) +{ + write_sysreg(0, TLBIALLNSNHIS); + write_sysreg(0, ICIALLUIS); + dsb(ish); +} + +__alias(__tlb_flush_vm_context) void __weak __kvm_flush_vm_context(void); -- cgit v1.2.3-58-ga151 From c7ce6c63a05f83998996fdebc4867b007a571f82 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 3 Jan 2016 12:55:01 +0000 Subject: ARM: KVM: Add CP15 save/restore code Concert the CP15 save/restore code to C. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/cp15-sr.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/hyp/hyp.h | 28 ++++++++++++++++ 3 files changed, 113 insertions(+) create mode 100644 arch/arm/kvm/hyp/cp15-sr.c (limited to 'arch') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 36c760df2360..9f96fcbbcd8d 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_KVM_ARM_HOST) += tlb.o +obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c new file mode 100644 index 000000000000..732abbc34bd0 --- /dev/null +++ b/arch/arm/kvm/hyp/cp15-sr.c @@ -0,0 +1,84 @@ +/* + * Original code: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + * + * Mostly rewritten in C by Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "hyp.h" + +static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx) +{ + return (u64 *)(ctxt->cp15 + idx); +} + +void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) +{ + ctxt->cp15[c0_MPIDR] = read_sysreg(VMPIDR); + ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR); + ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR); + ctxt->cp15[c1_CPACR] = read_sysreg(CPACR); + *cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0); + *cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1); + ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR); + ctxt->cp15[c3_DACR] = read_sysreg(DACR); + ctxt->cp15[c5_DFSR] = read_sysreg(DFSR); + ctxt->cp15[c5_IFSR] = read_sysreg(IFSR); + ctxt->cp15[c5_ADFSR] = read_sysreg(ADFSR); + ctxt->cp15[c5_AIFSR] = read_sysreg(AIFSR); + ctxt->cp15[c6_DFAR] = read_sysreg(DFAR); + ctxt->cp15[c6_IFAR] = read_sysreg(IFAR); + *cp15_64(ctxt, c7_PAR) = read_sysreg(PAR); + ctxt->cp15[c10_PRRR] = read_sysreg(PRRR); + ctxt->cp15[c10_NMRR] = read_sysreg(NMRR); + ctxt->cp15[c10_AMAIR0] = read_sysreg(AMAIR0); + ctxt->cp15[c10_AMAIR1] = read_sysreg(AMAIR1); + ctxt->cp15[c12_VBAR] = read_sysreg(VBAR); + ctxt->cp15[c13_CID] = read_sysreg(CID); + ctxt->cp15[c13_TID_URW] = read_sysreg(TID_URW); + ctxt->cp15[c13_TID_URO] = read_sysreg(TID_URO); + ctxt->cp15[c13_TID_PRIV] = read_sysreg(TID_PRIV); + ctxt->cp15[c14_CNTKCTL] = read_sysreg(CNTKCTL); +} + +void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) +{ + write_sysreg(ctxt->cp15[c0_MPIDR], VMPIDR); + write_sysreg(ctxt->cp15[c0_CSSELR], CSSELR); + write_sysreg(ctxt->cp15[c1_SCTLR], SCTLR); + write_sysreg(ctxt->cp15[c1_CPACR], CPACR); + write_sysreg(*cp15_64(ctxt, c2_TTBR0), TTBR0); + write_sysreg(*cp15_64(ctxt, c2_TTBR1), TTBR1); + write_sysreg(ctxt->cp15[c2_TTBCR], TTBCR); + write_sysreg(ctxt->cp15[c3_DACR], DACR); + write_sysreg(ctxt->cp15[c5_DFSR], DFSR); + write_sysreg(ctxt->cp15[c5_IFSR], IFSR); + write_sysreg(ctxt->cp15[c5_ADFSR], ADFSR); + write_sysreg(ctxt->cp15[c5_AIFSR], AIFSR); + write_sysreg(ctxt->cp15[c6_DFAR], DFAR); + write_sysreg(ctxt->cp15[c6_IFAR], IFAR); + write_sysreg(*cp15_64(ctxt, c7_PAR), PAR); + write_sysreg(ctxt->cp15[c10_PRRR], PRRR); + write_sysreg(ctxt->cp15[c10_NMRR], NMRR); + write_sysreg(ctxt->cp15[c10_AMAIR0], AMAIR0); + write_sysreg(ctxt->cp15[c10_AMAIR1], AMAIR1); + write_sysreg(ctxt->cp15[c12_VBAR], VBAR); + write_sysreg(ctxt->cp15[c13_CID], CID); + write_sysreg(ctxt->cp15[c13_TID_URW], TID_URW); + write_sysreg(ctxt->cp15[c13_TID_URO], TID_URO); + write_sysreg(ctxt->cp15[c13_TID_PRIV], TID_PRIV); + write_sysreg(ctxt->cp15[c14_CNTKCTL], CNTKCTL); +} diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 5808bbd38c5f..ab2cb828d60a 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -42,9 +42,37 @@ }) #define read_sysreg(...) __read_sysreg(__VA_ARGS__) +#define TTBR0 __ACCESS_CP15_64(0, c2) +#define TTBR1 __ACCESS_CP15_64(1, c2) #define VTTBR __ACCESS_CP15_64(6, c2) +#define PAR __ACCESS_CP15_64(0, c7) +#define CSSELR __ACCESS_CP15(c0, 2, c0, 0) +#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) +#define SCTLR __ACCESS_CP15(c1, 0, c0, 0) +#define CPACR __ACCESS_CP15(c1, 0, c0, 2) +#define TTBCR __ACCESS_CP15(c2, 0, c0, 2) +#define DACR __ACCESS_CP15(c3, 0, c0, 0) +#define DFSR __ACCESS_CP15(c5, 0, c0, 0) +#define IFSR __ACCESS_CP15(c5, 0, c0, 1) +#define ADFSR __ACCESS_CP15(c5, 0, c1, 0) +#define AIFSR __ACCESS_CP15(c5, 0, c1, 1) +#define DFAR __ACCESS_CP15(c6, 0, c0, 0) +#define IFAR __ACCESS_CP15(c6, 0, c0, 2) #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) #define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) +#define PRRR __ACCESS_CP15(c10, 0, c2, 0) +#define NMRR __ACCESS_CP15(c10, 0, c2, 1) +#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0) +#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1) +#define VBAR __ACCESS_CP15(c12, 0, c0, 0) +#define CID __ACCESS_CP15(c13, 0, c0, 1) +#define TID_URW __ACCESS_CP15(c13, 0, c0, 2) +#define TID_URO __ACCESS_CP15(c13, 0, c0, 3) +#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) +#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) + +void __sysreg_save_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_state(struct kvm_cpu_context *ctxt); #endif /* __ARM_KVM_HYP_H__ */ -- cgit v1.2.3-58-ga151 From e59bff9bf302bf1332c6421b39ba2e82b84e63a6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 4 Jan 2016 08:54:50 +0000 Subject: ARM: KVM: Add timer save/restore This patch shouldn't exist, as we should be able to reuse the arm64 version for free. I'll get there eventually, but in the meantime I need a timer ticking. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/hyp.h | 8 +++++ arch/arm/kvm/hyp/timer-sr.c | 71 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+) create mode 100644 arch/arm/kvm/hyp/timer-sr.c (limited to 'arch') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 9f96fcbbcd8d..9241ae845252 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index ab2cb828d60a..4924418aee4f 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -46,6 +46,9 @@ #define TTBR1 __ACCESS_CP15_64(1, c2) #define VTTBR __ACCESS_CP15_64(6, c2) #define PAR __ACCESS_CP15_64(0, c7) +#define CNTV_CVAL __ACCESS_CP15_64(3, c14) +#define CNTVOFF __ACCESS_CP15_64(4, c14) + #define CSSELR __ACCESS_CP15(c0, 2, c0, 0) #define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) #define SCTLR __ACCESS_CP15(c1, 0, c0, 0) @@ -71,6 +74,11 @@ #define TID_URO __ACCESS_CP15(c13, 0, c0, 3) #define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) #define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) +#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) +#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) + +void __timer_save_state(struct kvm_vcpu *vcpu); +void __timer_restore_state(struct kvm_vcpu *vcpu); void __sysreg_save_state(struct kvm_cpu_context *ctxt); void __sysreg_restore_state(struct kvm_cpu_context *ctxt); diff --git a/arch/arm/kvm/hyp/timer-sr.c b/arch/arm/kvm/hyp/timer-sr.c new file mode 100644 index 000000000000..d7535fd0784e --- /dev/null +++ b/arch/arm/kvm/hyp/timer-sr.c @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "hyp.h" + +/* vcpu is already in the HYP VA space */ +void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + u64 val; + + if (kvm->arch.timer.enabled) { + timer->cntv_ctl = read_sysreg(CNTV_CTL); + timer->cntv_cval = read_sysreg(CNTV_CVAL); + } + + /* Disable the virtual timer */ + write_sysreg(0, CNTV_CTL); + + /* Allow physical timer/counter access for the host */ + val = read_sysreg(CNTHCTL); + val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; + write_sysreg(val, CNTHCTL); + + /* Clear cntvoff for the host */ + write_sysreg(0, CNTVOFF); +} + +void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + u64 val; + + /* + * Disallow physical timer access for the guest + * Physical counter access is allowed + */ + val = read_sysreg(CNTHCTL); + val &= ~CNTHCTL_EL1PCEN; + val |= CNTHCTL_EL1PCTEN; + write_sysreg(val, CNTHCTL); + + if (kvm->arch.timer.enabled) { + write_sysreg(kvm->arch.timer.cntvoff, CNTVOFF); + write_sysreg(timer->cntv_cval, CNTV_CVAL); + isb(); + write_sysreg(timer->cntv_ctl, CNTV_CTL); + } +} -- cgit v1.2.3-58-ga151 From c0c2cdbffef2369a94998fc6d85af25eded92b60 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 4 Jan 2016 09:06:11 +0000 Subject: ARM: KVM: Add vgic v2 save/restore This patch shouldn't exist, as we should be able to reuse the arm64 version for free. I'll get there eventually, but in the meantime I need an interrupt controller. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/hyp.h | 3 ++ arch/arm/kvm/hyp/vgic-v2-sr.c | 84 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+) create mode 100644 arch/arm/kvm/hyp/vgic-v2-sr.c (limited to 'arch') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 9241ae845252..d8acbb691249 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 4924418aee4f..7eb1c21d2d21 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -80,6 +80,9 @@ void __timer_save_state(struct kvm_vcpu *vcpu); void __timer_restore_state(struct kvm_vcpu *vcpu); +void __vgic_v2_save_state(struct kvm_vcpu *vcpu); +void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); + void __sysreg_save_state(struct kvm_cpu_context *ctxt); void __sysreg_restore_state(struct kvm_cpu_context *ctxt); diff --git a/arch/arm/kvm/hyp/vgic-v2-sr.c b/arch/arm/kvm/hyp/vgic-v2-sr.c new file mode 100644 index 000000000000..e71761238cfc --- /dev/null +++ b/arch/arm/kvm/hyp/vgic-v2-sr.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "hyp.h" + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; + struct vgic_dist *vgic = &kvm->arch.vgic; + void __iomem *base = kern_hyp_va(vgic->vctrl_base); + u32 eisr0, eisr1, elrsr0, elrsr1; + int i, nr_lr; + + if (!base) + return; + + nr_lr = vcpu->arch.vgic_cpu.nr_lr; + cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR); + cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR); + eisr0 = readl_relaxed(base + GICH_EISR0); + elrsr0 = readl_relaxed(base + GICH_ELRSR0); + if (unlikely(nr_lr > 32)) { + eisr1 = readl_relaxed(base + GICH_EISR1); + elrsr1 = readl_relaxed(base + GICH_ELRSR1); + } else { + eisr1 = elrsr1 = 0; + } +#ifdef CONFIG_CPU_BIG_ENDIAN + cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1; + cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; +#else + cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0; + cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; +#endif + cpu_if->vgic_apr = readl_relaxed(base + GICH_APR); + + writel_relaxed(0, base + GICH_HCR); + + for (i = 0; i < nr_lr; i++) + cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); +} + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; + struct vgic_dist *vgic = &kvm->arch.vgic; + void __iomem *base = kern_hyp_va(vgic->vctrl_base); + int i, nr_lr; + + if (!base) + return; + + writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); + writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR); + writel_relaxed(cpu_if->vgic_apr, base + GICH_APR); + + nr_lr = vcpu->arch.vgic_cpu.nr_lr; + for (i = 0; i < nr_lr; i++) + writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4)); +} -- cgit v1.2.3-58-ga151 From 59cbcdb5d83b49d1d2e161f3468f850f9fa4b968 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 4 Jan 2016 15:41:51 +0000 Subject: ARM: KVM: Add VFP save/restore This is almost a copy/paste of the existing version, with a couple of subtle differences: - Only write to FPEXC once on the save path - Add an isb when enabling VFP access The patch also defines a few sysreg accessors and a __vfp_enabled predicate that test the VFP trapping state. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/hyp.h | 13 +++++++++ arch/arm/kvm/hyp/vfp.S | 68 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+) create mode 100644 arch/arm/kvm/hyp/vfp.S (limited to 'arch') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index d8acbb691249..5a45f4c21f83 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += vfp.o diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 7eb1c21d2d21..dce0f7305cf5 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -21,6 +21,7 @@ #include #include #include +#include #define __hyp_text __section(.hyp.text) notrace @@ -31,6 +32,8 @@ "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 #define __ACCESS_CP15_64(Op1, CRm) \ "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 +#define __ACCESS_VFP(CRn) \ + "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) #define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) @@ -53,6 +56,7 @@ #define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) #define SCTLR __ACCESS_CP15(c1, 0, c0, 0) #define CPACR __ACCESS_CP15(c1, 0, c0, 2) +#define HCPTR __ACCESS_CP15(c1, 4, c1, 2) #define TTBCR __ACCESS_CP15(c2, 0, c0, 2) #define DACR __ACCESS_CP15(c3, 0, c0, 0) #define DFSR __ACCESS_CP15(c5, 0, c0, 0) @@ -77,6 +81,8 @@ #define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) #define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) +#define VFP_FPEXC __ACCESS_VFP(FPEXC) + void __timer_save_state(struct kvm_vcpu *vcpu); void __timer_restore_state(struct kvm_vcpu *vcpu); @@ -86,4 +92,11 @@ void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); void __sysreg_save_state(struct kvm_cpu_context *ctxt); void __sysreg_restore_state(struct kvm_cpu_context *ctxt); +void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); +void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); +static inline bool __vfp_enabled(void) +{ + return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); +} + #endif /* __ARM_KVM_HYP_H__ */ diff --git a/arch/arm/kvm/hyp/vfp.S b/arch/arm/kvm/hyp/vfp.S new file mode 100644 index 000000000000..7c297e87eb8b --- /dev/null +++ b/arch/arm/kvm/hyp/vfp.S @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + + .text + .pushsection .hyp.text, "ax" + +/* void __vfp_save_state(struct vfp_hard_struct *vfp); */ +ENTRY(__vfp_save_state) + push {r4, r5} + VFPFMRX r1, FPEXC + + @ Make sure *really* VFP is enabled so we can touch the registers. + orr r5, r1, #FPEXC_EN + tst r5, #FPEXC_EX @ Check for VFP Subarchitecture + bic r5, r5, #FPEXC_EX @ FPEXC_EX disable + VFPFMXR FPEXC, r5 + isb + + VFPFMRX r2, FPSCR + beq 1f + + @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so + @ we only need to save them if FPEXC_EX is set. + VFPFMRX r3, FPINST + tst r5, #FPEXC_FP2V + VFPFMRX r4, FPINST2, ne @ vmrsne +1: + VFPFSTMIA r0, r5 @ Save VFP registers + stm r0, {r1-r4} @ Save FPEXC, FPSCR, FPINST, FPINST2 + pop {r4, r5} + bx lr +ENDPROC(__vfp_save_state) + +/* void __vfp_restore_state(struct vfp_hard_struct *vfp); + * Assume FPEXC_EN is on and FPEXC_EX is off */ +ENTRY(__vfp_restore_state) + VFPFLDMIA r0, r1 @ Load VFP registers + ldm r0, {r0-r3} @ Load FPEXC, FPSCR, FPINST, FPINST2 + + VFPFMXR FPSCR, r1 + tst r0, #FPEXC_EX @ Check for VFP Subarchitecture + beq 1f + VFPFMXR FPINST, r2 + tst r0, #FPEXC_FP2V + VFPFMXR FPINST2, r3, ne +1: + VFPFMXR FPEXC, r0 @ FPEXC (last, in case !EN) + bx lr +ENDPROC(__vfp_restore_state) + + .popsection -- cgit v1.2.3-58-ga151 From 33280b4cd1dc0bc7df8d6d3bd1b64c377c9e44d9 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:38:09 +0000 Subject: ARM: KVM: Add banked registers save/restore Banked registers are one of the many perks of the 32bit architecture, and the world switch needs to cope with it. This requires some "special" accessors, as these are not accessed using a standard coprocessor instruction. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/banked-sr.c | 77 ++++++++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/hyp/hyp.h | 11 +++++++ 3 files changed, 89 insertions(+) create mode 100644 arch/arm/kvm/hyp/banked-sr.c (limited to 'arch') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 5a45f4c21f83..173bd1dd77e7 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o +obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c new file mode 100644 index 000000000000..d02dc804f611 --- /dev/null +++ b/arch/arm/kvm/hyp/banked-sr.c @@ -0,0 +1,77 @@ +/* + * Original code: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + * + * Mostly rewritten in C by Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "hyp.h" + +__asm__(".arch_extension virt"); + +void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) +{ + ctxt->gp_regs.usr_regs.ARM_sp = read_special(SP_usr); + ctxt->gp_regs.usr_regs.ARM_pc = read_special(ELR_hyp); + ctxt->gp_regs.usr_regs.ARM_cpsr = read_special(SPSR); + ctxt->gp_regs.KVM_ARM_SVC_sp = read_special(SP_svc); + ctxt->gp_regs.KVM_ARM_SVC_lr = read_special(LR_svc); + ctxt->gp_regs.KVM_ARM_SVC_spsr = read_special(SPSR_svc); + ctxt->gp_regs.KVM_ARM_ABT_sp = read_special(SP_abt); + ctxt->gp_regs.KVM_ARM_ABT_lr = read_special(LR_abt); + ctxt->gp_regs.KVM_ARM_ABT_spsr = read_special(SPSR_abt); + ctxt->gp_regs.KVM_ARM_UND_sp = read_special(SP_und); + ctxt->gp_regs.KVM_ARM_UND_lr = read_special(LR_und); + ctxt->gp_regs.KVM_ARM_UND_spsr = read_special(SPSR_und); + ctxt->gp_regs.KVM_ARM_IRQ_sp = read_special(SP_irq); + ctxt->gp_regs.KVM_ARM_IRQ_lr = read_special(LR_irq); + ctxt->gp_regs.KVM_ARM_IRQ_spsr = read_special(SPSR_irq); + ctxt->gp_regs.KVM_ARM_FIQ_r8 = read_special(R8_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_r9 = read_special(R9_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_r10 = read_special(R10_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_fp = read_special(R11_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_ip = read_special(R12_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_sp = read_special(SP_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_lr = read_special(LR_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_spsr = read_special(SPSR_fiq); +} + +void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt) +{ + write_special(ctxt->gp_regs.usr_regs.ARM_sp, SP_usr); + write_special(ctxt->gp_regs.usr_regs.ARM_pc, ELR_hyp); + write_special(ctxt->gp_regs.usr_regs.ARM_cpsr, SPSR_cxsf); + write_special(ctxt->gp_regs.KVM_ARM_SVC_sp, SP_svc); + write_special(ctxt->gp_regs.KVM_ARM_SVC_lr, LR_svc); + write_special(ctxt->gp_regs.KVM_ARM_SVC_spsr, SPSR_svc); + write_special(ctxt->gp_regs.KVM_ARM_ABT_sp, SP_abt); + write_special(ctxt->gp_regs.KVM_ARM_ABT_lr, LR_abt); + write_special(ctxt->gp_regs.KVM_ARM_ABT_spsr, SPSR_abt); + write_special(ctxt->gp_regs.KVM_ARM_UND_sp, SP_und); + write_special(ctxt->gp_regs.KVM_ARM_UND_lr, LR_und); + write_special(ctxt->gp_regs.KVM_ARM_UND_spsr, SPSR_und); + write_special(ctxt->gp_regs.KVM_ARM_IRQ_sp, SP_irq); + write_special(ctxt->gp_regs.KVM_ARM_IRQ_lr, LR_irq); + write_special(ctxt->gp_regs.KVM_ARM_IRQ_spsr, SPSR_irq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_r8, R8_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_r9, R9_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_r10, R10_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_fp, R11_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_ip, R12_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_sp, SP_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_lr, LR_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_spsr, SPSR_fiq); +} diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index dce0f7305cf5..278eb1fa5231 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -45,6 +45,14 @@ }) #define read_sysreg(...) __read_sysreg(__VA_ARGS__) +#define write_special(v, r) \ + asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) +#define read_special(r) ({ \ + u32 __val; \ + asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ + __val; \ +}) + #define TTBR0 __ACCESS_CP15_64(0, c2) #define TTBR1 __ACCESS_CP15_64(1, c2) #define VTTBR __ACCESS_CP15_64(6, c2) @@ -99,4 +107,7 @@ static inline bool __vfp_enabled(void) return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); } +void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); +void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); + #endif /* __ARM_KVM_HYP_H__ */ -- cgit v1.2.3-58-ga151 From 89ef2b21ed2173e01995371261a9f9789bc1e47a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:40:51 +0000 Subject: ARM: KVM: Add guest entry code Add the very minimal piece of code that is now required to jump into the guest (and return from it). This code is only concerned with save/restoring the USR registers (r0-r12+lr for the guest, r4-r12+lr for the host), as everything else is dealt with in C (VFP is another matter though). Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/entry.S | 70 +++++++++++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/hyp/hyp.h | 2 ++ 3 files changed, 73 insertions(+) create mode 100644 arch/arm/kvm/hyp/entry.S (limited to 'arch') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 173bd1dd77e7..c77969008665 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += entry.o diff --git a/arch/arm/kvm/hyp/entry.S b/arch/arm/kvm/hyp/entry.S new file mode 100644 index 000000000000..32f79b090040 --- /dev/null +++ b/arch/arm/kvm/hyp/entry.S @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2016 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . +*/ + +#include +#include +#include + + .arch_extension virt + + .text + .pushsection .hyp.text, "ax" + +#define USR_REGS_OFFSET (CPU_CTXT_GP_REGS + GP_REGS_USR) + +/* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */ +ENTRY(__guest_enter) + @ Save host registers + add r1, r1, #(USR_REGS_OFFSET + S_R4) + stm r1!, {r4-r12} + str lr, [r1, #4] @ Skip SP_usr (already saved) + + @ Restore guest registers + add r0, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0) + ldr lr, [r0, #S_LR] + ldm r0, {r0-r12} + + clrex + eret +ENDPROC(__guest_enter) + +ENTRY(__guest_exit) + /* + * return convention: + * guest r0, r1, r2 saved on the stack + * r0: vcpu pointer + * r1: exception code + */ + + add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3) + stm r2!, {r3-r12} + str lr, [r2, #4] + add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0) + pop {r3, r4, r5} @ r0, r1, r2 + stm r2, {r3-r5} + + ldr r0, [r0, #VCPU_HOST_CTXT] + add r0, r0, #(USR_REGS_OFFSET + S_R4) + ldm r0!, {r4-r12} + ldr lr, [r0, #4] + + mov r0, r1 + bx lr +ENDPROC(__guest_exit) + + .popsection + diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 278eb1fa5231..b3f6ed233564 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -110,4 +110,6 @@ static inline bool __vfp_enabled(void) void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); +int asmlinkage __guest_enter(struct kvm_vcpu *vcpu, + struct kvm_cpu_context *host); #endif /* __ARM_KVM_HYP_H__ */ -- cgit v1.2.3-58-ga151 From 96e5e670cc681703c00eaf442c8796da6aa25ca0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 6 Jan 2016 13:53:51 +0000 Subject: ARM: KVM: Add VFP lazy save/restore handler Similar to the arm64 version, add the code that deals with VFP traps, re-enabling VFP, save/restoring the registers and resuming the guest. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/entry.S | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'arch') diff --git a/arch/arm/kvm/hyp/entry.S b/arch/arm/kvm/hyp/entry.S index 32f79b090040..21c238871c9e 100644 --- a/arch/arm/kvm/hyp/entry.S +++ b/arch/arm/kvm/hyp/entry.S @@ -66,5 +66,36 @@ ENTRY(__guest_exit) bx lr ENDPROC(__guest_exit) +/* + * If VFPv3 support is not available, then we will not switch the VFP + * registers; however cp10 and cp11 accesses will still trap and fallback + * to the regular coprocessor emulation code, which currently will + * inject an undefined exception to the guest. + */ +#ifdef CONFIG_VFPv3 +ENTRY(__vfp_guest_restore) + push {r3, r4, lr} + + @ NEON/VFP used. Turn on VFP access. + mrc p15, 4, r1, c1, c1, 2 @ HCPTR + bic r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11)) + mcr p15, 4, r1, c1, c1, 2 @ HCPTR + isb + + @ Switch VFP/NEON hardware state to the guest's + mov r4, r0 + ldr r0, [r0, #VCPU_HOST_CTXT] + add r0, r0, #CPU_CTXT_VFP + bl __vfp_save_state + add r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) + bl __vfp_restore_state + + pop {r3, r4, lr} + pop {r0, r1, r2} + clrex + eret +ENDPROC(__vfp_guest_restore) +#endif + .popsection -- cgit v1.2.3-58-ga151 From 9dddc2dfa5da95fa380635d400f80767077ef936 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:42:49 +0000 Subject: ARM: KVM: Add the new world switch implementation The new world switch implementation is modeled after the arm64 one, calling the various save/restore functions in turn, and having as little state as possible. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/hyp.h | 7 +++ arch/arm/kvm/hyp/switch.c | 140 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 148 insertions(+) create mode 100644 arch/arm/kvm/hyp/switch.c (limited to 'arch') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index c77969008665..cfab402d7695 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o +obj-$(CONFIG_KVM_ARM_HOST) += switch.o diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index b3f6ed233564..ef582c9ad96d 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -60,11 +60,16 @@ #define CNTV_CVAL __ACCESS_CP15_64(3, c14) #define CNTVOFF __ACCESS_CP15_64(4, c14) +#define MIDR __ACCESS_CP15(c0, 0, c0, 0) #define CSSELR __ACCESS_CP15(c0, 2, c0, 0) +#define VPIDR __ACCESS_CP15(c0, 4, c0, 0) #define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) #define SCTLR __ACCESS_CP15(c1, 0, c0, 0) #define CPACR __ACCESS_CP15(c1, 0, c0, 2) +#define HCR __ACCESS_CP15(c1, 4, c1, 0) +#define HDCR __ACCESS_CP15(c1, 4, c1, 1) #define HCPTR __ACCESS_CP15(c1, 4, c1, 2) +#define HSTR __ACCESS_CP15(c1, 4, c1, 3) #define TTBCR __ACCESS_CP15(c2, 0, c0, 2) #define DACR __ACCESS_CP15(c3, 0, c0, 0) #define DFSR __ACCESS_CP15(c5, 0, c0, 0) @@ -73,6 +78,7 @@ #define AIFSR __ACCESS_CP15(c5, 0, c1, 1) #define DFAR __ACCESS_CP15(c6, 0, c0, 0) #define IFAR __ACCESS_CP15(c6, 0, c0, 2) +#define HDFAR __ACCESS_CP15(c6, 4, c0, 0) #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) #define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) @@ -85,6 +91,7 @@ #define TID_URW __ACCESS_CP15(c13, 0, c0, 2) #define TID_URO __ACCESS_CP15(c13, 0, c0, 3) #define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) +#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2) #define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) #define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) #define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c new file mode 100644 index 000000000000..a1f3c1cf8f74 --- /dev/null +++ b/arch/arm/kvm/hyp/switch.c @@ -0,0 +1,140 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include "hyp.h" + +__asm__(".arch_extension virt"); + +/* + * Activate the traps, saving the host's fpexc register before + * overwriting it. We'll restore it on VM exit. + */ +static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host) +{ + u32 val; + + /* + * We are about to set HCPTR.TCP10/11 to trap all floating point + * register accesses to HYP, however, the ARM ARM clearly states that + * traps are only taken to HYP if the operation would not otherwise + * trap to SVC. Therefore, always make sure that for 32-bit guests, + * we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits. + */ + val = read_sysreg(VFP_FPEXC); + *fpexc_host = val; + if (!(val & FPEXC_EN)) { + write_sysreg(val | FPEXC_EN, VFP_FPEXC); + isb(); + } + + write_sysreg(vcpu->arch.hcr | vcpu->arch.irq_lines, HCR); + /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ + write_sysreg(HSTR_T(15), HSTR); + write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); + val = read_sysreg(HDCR); + write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR); +} + +static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) +{ + u32 val; + + write_sysreg(0, HCR); + write_sysreg(0, HSTR); + val = read_sysreg(HDCR); + write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR); + write_sysreg(0, HCPTR); +} + +static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + write_sysreg(kvm->arch.vttbr, VTTBR); + write_sysreg(vcpu->arch.midr, VPIDR); +} + +static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) +{ + write_sysreg(0, VTTBR); + write_sysreg(read_sysreg(MIDR), VPIDR); +} + +static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) +{ + __vgic_v2_save_state(vcpu); +} + +static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) +{ + __vgic_v2_restore_state(vcpu); +} + +static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *host_ctxt; + struct kvm_cpu_context *guest_ctxt; + bool fp_enabled; + u64 exit_code; + u32 fpexc; + + vcpu = kern_hyp_va(vcpu); + write_sysreg(vcpu, HTPIDR); + + host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + guest_ctxt = &vcpu->arch.ctxt; + + __sysreg_save_state(host_ctxt); + __banked_save_state(host_ctxt); + + __activate_traps(vcpu, &fpexc); + __activate_vm(vcpu); + + __vgic_restore_state(vcpu); + __timer_restore_state(vcpu); + + __sysreg_restore_state(guest_ctxt); + __banked_restore_state(guest_ctxt); + + /* Jump in the fire! */ + exit_code = __guest_enter(vcpu, host_ctxt); + /* And we're baaack! */ + + fp_enabled = __vfp_enabled(); + + __banked_save_state(guest_ctxt); + __sysreg_save_state(guest_ctxt); + __timer_save_state(vcpu); + __vgic_save_state(vcpu); + + __deactivate_traps(vcpu); + __deactivate_vm(vcpu); + + __banked_restore_state(host_ctxt); + __sysreg_restore_state(host_ctxt); + + if (fp_enabled) { + __vfp_save_state(&guest_ctxt->vfp); + __vfp_restore_state(&host_ctxt->vfp); + } + + write_sysreg(fpexc, VFP_FPEXC); + + return exit_code; +} + +__alias(__guest_run) int __weak __kvm_vcpu_run(struct kvm_vcpu *vcpu); -- cgit v1.2.3-58-ga151 From 97e964371377ebf3958701b082597c2162e3df18 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Jan 2016 19:02:51 +0000 Subject: ARM: KVM: Add populating of fault data structure On guest exit, we must take care of populating our fault data structure so that the host code can handle it. This includes resolving the IPA for permission faults, which can result in restarting the guest. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/hyp.h | 4 ++++ arch/arm/kvm/hyp/switch.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) (limited to 'arch') diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index ef582c9ad96d..8b1156b691ff 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -76,10 +76,14 @@ #define IFSR __ACCESS_CP15(c5, 0, c0, 1) #define ADFSR __ACCESS_CP15(c5, 0, c1, 0) #define AIFSR __ACCESS_CP15(c5, 0, c1, 1) +#define HSR __ACCESS_CP15(c5, 4, c2, 0) #define DFAR __ACCESS_CP15(c6, 0, c0, 0) #define IFAR __ACCESS_CP15(c6, 0, c0, 2) #define HDFAR __ACCESS_CP15(c6, 4, c0, 0) +#define HIFAR __ACCESS_CP15(c6, 4, c0, 2) +#define HPFAR __ACCESS_CP15(c6, 4, c0, 4) #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) +#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) #define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) #define PRRR __ACCESS_CP15(c10, 0, c2, 0) diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index a1f3c1cf8f74..0dd0ba33b8a7 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -84,6 +84,56 @@ static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) __vgic_v2_restore_state(vcpu); } +static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) +{ + u32 hsr = read_sysreg(HSR); + u8 ec = hsr >> HSR_EC_SHIFT; + u32 hpfar, far; + + vcpu->arch.fault.hsr = hsr; + + if (ec == HSR_EC_IABT) + far = read_sysreg(HIFAR); + else if (ec == HSR_EC_DABT) + far = read_sysreg(HDFAR); + else + return true; + + /* + * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: + * + * Abort on the stage 2 translation for a memory access from a + * Non-secure PL1 or PL0 mode: + * + * For any Access flag fault or Translation fault, and also for any + * Permission fault on the stage 2 translation of a memory access + * made as part of a translation table walk for a stage 1 translation, + * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR + * is UNKNOWN. + */ + if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) { + u64 par, tmp; + + par = read_sysreg(PAR); + write_sysreg(far, ATS1CPR); + isb(); + + tmp = read_sysreg(PAR); + write_sysreg(par, PAR); + + if (unlikely(tmp & 1)) + return false; /* Translation failed, back to guest */ + + hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4; + } else { + hpfar = read_sysreg(HPFAR); + } + + vcpu->arch.fault.hxfar = far; + vcpu->arch.fault.hpfar = hpfar; + return true; +} + static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; @@ -111,9 +161,13 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) __banked_restore_state(guest_ctxt); /* Jump in the fire! */ +again: exit_code = __guest_enter(vcpu, host_ctxt); /* And we're baaack! */ + if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu)) + goto again; + fp_enabled = __vfp_enabled(); __banked_save_state(guest_ctxt); -- cgit v1.2.3-58-ga151 From bafc6c2a22553193d1d1ce83783b7faa0924158e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:43:18 +0000 Subject: ARM: KVM: Add HYP mode entry code This part is almost entierely borrowed from the existing code, just slightly simplifying the HYP function call (as we now save SPSR_hyp in the world switch). Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/hyp-entry.S | 157 +++++++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/hyp/hyp.h | 2 + 3 files changed, 160 insertions(+) create mode 100644 arch/arm/kvm/hyp/hyp-entry.S (limited to 'arch') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index cfab402d7695..a7d3a7e0b702 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -9,4 +9,5 @@ obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o +obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S new file mode 100644 index 000000000000..1b4aa02fd364 --- /dev/null +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -0,0 +1,157 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include + + .arch_extension virt + + .text + .pushsection .hyp.text, "ax" + +.macro load_vcpu reg + mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR +.endm + +/******************************************************************** + * Hypervisor exception vector and handlers + * + * + * The KVM/ARM Hypervisor ABI is defined as follows: + * + * Entry to Hyp mode from the host kernel will happen _only_ when an HVC + * instruction is issued since all traps are disabled when running the host + * kernel as per the Hyp-mode initialization at boot time. + * + * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc + * below) when the HVC instruction is called from SVC mode (i.e. a guest or the + * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC + * instructions are called from within Hyp-mode. + * + * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): + * Switching to Hyp mode is done through a simple HVC #0 instruction. The + * exception vector code will check that the HVC comes from VMID==0. + * - r0 contains a pointer to a HYP function + * - r1, r2, and r3 contain arguments to the above function. + * - The HYP function will be called with its arguments in r0, r1 and r2. + * On HYP function return, we return directly to SVC. + * + * Note that the above is used to execute code in Hyp-mode from a host-kernel + * point of view, and is a different concept from performing a world-switch and + * executing guest code SVC mode (with a VMID != 0). + */ + + .align 5 +__hyp_vector: + .global __hyp_vector +__kvm_hyp_vector: + .weak __kvm_hyp_vector + + @ Hyp-mode exception vector + W(b) hyp_reset + W(b) hyp_undef + W(b) hyp_svc + W(b) hyp_pabt + W(b) hyp_dabt + W(b) hyp_hvc + W(b) hyp_irq + W(b) hyp_fiq + +.macro invalid_vector label, cause + .align +\label: b . +.endm + + invalid_vector hyp_reset + invalid_vector hyp_undef + invalid_vector hyp_svc + invalid_vector hyp_pabt + invalid_vector hyp_dabt + invalid_vector hyp_fiq + +hyp_hvc: + /* + * Getting here is either because of a trap from a guest, + * or from executing HVC from the host kernel, which means + * "do something in Hyp mode". + */ + push {r0, r1, r2} + + @ Check syndrome register + mrc p15, 4, r1, c5, c2, 0 @ HSR + lsr r0, r1, #HSR_EC_SHIFT + cmp r0, #HSR_EC_HVC + bne guest_trap @ Not HVC instr. + + /* + * Let's check if the HVC came from VMID 0 and allow simple + * switch to Hyp mode + */ + mrrc p15, 6, r0, r2, c2 + lsr r2, r2, #16 + and r2, r2, #0xff + cmp r2, #0 + bne guest_trap @ Guest called HVC + + /* + * Getting here means host called HVC, we shift parameters and branch + * to Hyp function. + */ + pop {r0, r1, r2} + + /* Check for __hyp_get_vectors */ + cmp r0, #-1 + mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR + beq 1f + + push {lr} + + mov lr, r0 + mov r0, r1 + mov r1, r2 + mov r2, r3 + +THUMB( orr lr, #1) + blx lr @ Call the HYP function + + pop {lr} +1: eret + +guest_trap: + load_vcpu r0 @ Load VCPU pointer to r0 + +#ifdef CONFIG_VFPv3 + @ Check for a VFP access + lsr r1, r1, #HSR_EC_SHIFT + cmp r1, #HSR_EC_CP_0_13 + beq __vfp_guest_restore +#endif + + mov r1, #ARM_EXCEPTION_HVC + b __guest_exit + +hyp_irq: + push {r0, r1, r2} + mov r1, #ARM_EXCEPTION_IRQ + load_vcpu r0 @ Load VCPU pointer to r0 + b __guest_exit + + .ltorg + + .popsection diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 8b1156b691ff..8b9c2eb5a9dc 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -123,4 +123,6 @@ void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); int asmlinkage __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host); +int asmlinkage __hyp_do_panic(const char *, int, u32); + #endif /* __ARM_KVM_HYP_H__ */ -- cgit v1.2.3-58-ga151 From c36b6db5f3e4c1bd21659aee8e67226352d254ae Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 6 Jan 2016 09:12:42 +0000 Subject: ARM: KVM: Add panic handling code Instead of spinning forever, let's "properly" handle any unexpected exception ("properly" meaning "print a spat on the console and die"). This has proved useful quite a few times... Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/hyp-entry.S | 28 +++++++++++++++++++++------- arch/arm/kvm/hyp/switch.c | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S index 1b4aa02fd364..54a8d67ad980 100644 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -75,15 +75,29 @@ __kvm_hyp_vector: .macro invalid_vector label, cause .align -\label: b . +\label: mov r0, #\cause + b __hyp_panic .endm - invalid_vector hyp_reset - invalid_vector hyp_undef - invalid_vector hyp_svc - invalid_vector hyp_pabt - invalid_vector hyp_dabt - invalid_vector hyp_fiq + invalid_vector hyp_reset ARM_EXCEPTION_RESET + invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED + invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE + invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT + invalid_vector hyp_dabt ARM_EXCEPTION_DATA_ABORT + invalid_vector hyp_fiq ARM_EXCEPTION_FIQ + +ENTRY(__hyp_do_panic) + mrs lr, cpsr + bic lr, lr, #MODE_MASK + orr lr, lr, #SVC_MODE +THUMB( orr lr, lr, #PSR_T_BIT ) + msr spsr_cxsf, lr + ldr lr, =panic + msr ELR_hyp, lr + ldr lr, =kvm_call_hyp + clrex + eret +ENDPROC(__hyp_do_panic) hyp_hvc: /* diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index 0dd0ba33b8a7..abbe90b5f2ff 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -192,3 +192,41 @@ again: } __alias(__guest_run) int __weak __kvm_vcpu_run(struct kvm_vcpu *vcpu); + +static const char * const __hyp_panic_string[] = { + [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x", + [ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x", + [ARM_EXCEPTION_SOFTWARE] = "\nHYP panic: SVC PC:%08x CPSR:%08x", + [ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x", + [ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x", + [ARM_EXCEPTION_IRQ] = "\nHYP panic: IRQ PC:%08x CPSR:%08x", + [ARM_EXCEPTION_FIQ] = "\nHYP panic: FIQ PC:%08x CPSR:%08x", + [ARM_EXCEPTION_HVC] = "\nHYP panic: HVC PC:%08x CPSR:%08x", +}; + +void __hyp_text __noreturn __hyp_panic(int cause) +{ + u32 elr = read_special(ELR_hyp); + u32 val; + + if (cause == ARM_EXCEPTION_DATA_ABORT) + val = read_sysreg(HDFAR); + else + val = read_special(SPSR); + + if (read_sysreg(VTTBR)) { + struct kvm_vcpu *vcpu; + struct kvm_cpu_context *host_ctxt; + + vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR); + host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + __deactivate_traps(vcpu); + __deactivate_vm(vcpu); + __sysreg_restore_state(host_ctxt); + } + + /* Call panic for real */ + __hyp_do_panic(__hyp_panic_string[cause], elr, val); + + unreachable(); +} -- cgit v1.2.3-58-ga151 From b57cd6f6407d420d522ab71b9c0dd11993e49ba1 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 6 Jan 2016 12:10:58 +0000 Subject: ARM: KVM: Change kvm_call_hyp return type to unsigned long Having u64 as the kvm_call_hyp return type is problematic, as it forces all kind of tricks for the return values from HYP to be promoted to 64bit (LE has the LSB in r0, and BE has them in r1). Since the only user of the return value is perfectly happy with a 32bit value, let's make kvm_call_hyp return an unsigned long, which is 32bit on ARM. This solves yet another headache. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 2 +- arch/arm/kvm/interrupts.S | 10 ++-------- 2 files changed, 3 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 02932ba8a653..c62d71751f7a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -165,7 +165,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); -u64 kvm_call_hyp(void *hypfn, ...); +unsigned long kvm_call_hyp(void *hypfn, ...); void force_vm_exit(const cpumask_t *mask); #define KVM_ARCH_WANT_MMU_NOTIFIER diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 7bfb28936914..01eb169f38f6 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -207,20 +207,14 @@ after_vfp_restore: restore_host_regs clrex @ Clear exclusive monitor -#ifndef CONFIG_CPU_ENDIAN_BE8 mov r0, r1 @ Return the return code - mov r1, #0 @ Clear upper bits in return value -#else - @ r1 already has return code - mov r0, #0 @ Clear upper bits in return value -#endif /* CONFIG_CPU_ENDIAN_BE8 */ bx lr @ return to IOCTL /******************************************************************** * Call function in Hyp mode * * - * u64 kvm_call_hyp(void *hypfn, ...); + * unsigned long kvm_call_hyp(void *hypfn, ...); * * This is not really a variadic function in the classic C-way and care must * be taken when calling this to ensure parameters are passed in registers @@ -231,7 +225,7 @@ after_vfp_restore: * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the * function pointer can be passed). The function being called must be mapped * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are - * passed in r0 and r1. + * passed in r0 (strictly 32bit). * * A function pointer with a value of 0xffffffff has a special meaning, * and is used to implement __hyp_get_vectors in the same way as in -- cgit v1.2.3-58-ga151 From b98e2e728eed3091edbce64cfcc447a482b7726c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:45:17 +0000 Subject: ARM: KVM: Remove the old world switch As we now have a full reimplementation of the world switch, it is time to kiss the old stuff goodbye. I'm not sure we'll miss it. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/interrupts.S | 469 +---------------------------- arch/arm/kvm/interrupts_head.S | 660 ----------------------------------------- 2 files changed, 1 insertion(+), 1128 deletions(-) delete mode 100644 arch/arm/kvm/interrupts_head.S (limited to 'arch') diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 01eb169f38f6..b1bd316f14c0 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -17,198 +17,8 @@ */ #include -#include -#include -#include -#include -#include -#include -#include -#include -#include "interrupts_head.S" .text - .pushsection .hyp.text, "ax" - -/******************************************************************** - * Flush per-VMID TLBs - * - * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); - * - * We rely on the hardware to broadcast the TLB invalidation to all CPUs - * inside the inner-shareable domain (which is the case for all v7 - * implementations). If we come across a non-IS SMP implementation, we'll - * have to use an IPI based mechanism. Until then, we stick to the simple - * hardware assisted version. - * - * As v7 does not support flushing per IPA, just nuke the whole TLB - * instead, ignoring the ipa value. - */ -ENTRY(__kvm_tlb_flush_vmid_ipa) - push {r2, r3} - - dsb ishst - add r0, r0, #KVM_VTTBR - ldrd r2, r3, [r0] - mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR - isb - mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) - dsb ish - isb - mov r2, #0 - mov r3, #0 - mcrr p15, 6, r2, r3, c2 @ Back to VMID #0 - isb @ Not necessary if followed by eret - - pop {r2, r3} - bx lr -ENDPROC(__kvm_tlb_flush_vmid_ipa) - -/** - * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs - * - * Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address - * parameter - */ - -ENTRY(__kvm_tlb_flush_vmid) - b __kvm_tlb_flush_vmid_ipa -ENDPROC(__kvm_tlb_flush_vmid) - -/******************************************************************** - * Flush TLBs and instruction caches of all CPUs inside the inner-shareable - * domain, for all VMIDs - * - * void __kvm_flush_vm_context(void); - */ -ENTRY(__kvm_flush_vm_context) - mov r0, #0 @ rn parameter for c15 flushes is SBZ - - /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */ - mcr p15, 4, r0, c8, c3, 4 - /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */ - mcr p15, 0, r0, c7, c1, 0 - dsb ish - isb @ Not necessary if followed by eret - - bx lr -ENDPROC(__kvm_flush_vm_context) - - -/******************************************************************** - * Hypervisor world-switch code - * - * - * int __kvm_vcpu_run(struct kvm_vcpu *vcpu) - */ -ENTRY(__kvm_vcpu_run) - @ Save the vcpu pointer - mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR - - save_host_regs - - restore_vgic_state - restore_timer_state - - @ Store hardware CP15 state and load guest state - read_cp15_state store_to_vcpu = 0 - write_cp15_state read_from_vcpu = 1 - - @ If the host kernel has not been configured with VFPv3 support, - @ then it is safer if we deny guests from using it as well. -#ifdef CONFIG_VFPv3 - @ Set FPEXC_EN so the guest doesn't trap floating point instructions - VFPFMRX r2, FPEXC @ VMRS - push {r2} - orr r2, r2, #FPEXC_EN - VFPFMXR FPEXC, r2 @ VMSR -#endif - - @ Configure Hyp-role - configure_hyp_role vmentry - - @ Trap coprocessor CRx accesses - set_hstr vmentry - set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) - set_hdcr vmentry - - @ Write configured ID register into MIDR alias - ldr r1, [vcpu, #VCPU_MIDR] - mcr p15, 4, r1, c0, c0, 0 - - @ Write guest view of MPIDR into VMPIDR - ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)] - mcr p15, 4, r1, c0, c0, 5 - - @ Set up guest memory translation - ldr r1, [vcpu, #VCPU_KVM] - add r1, r1, #KVM_VTTBR - ldrd r2, r3, [r1] - mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR - - @ We're all done, just restore the GPRs and go to the guest - restore_guest_regs - clrex @ Clear exclusive monitor - eret - -__kvm_vcpu_return: - /* - * return convention: - * guest r0, r1, r2 saved on the stack - * r0: vcpu pointer - * r1: exception code - */ - save_guest_regs - - @ Set VMID == 0 - mov r2, #0 - mov r3, #0 - mcrr p15, 6, r2, r3, c2 @ Write VTTBR - - @ Don't trap coprocessor accesses for host kernel - set_hstr vmexit - set_hdcr vmexit - set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore - -#ifdef CONFIG_VFPv3 - @ Switch VFP/NEON hardware state to the host's - add r7, vcpu, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) - store_vfp_state r7 - add r7, vcpu, #VCPU_HOST_CTXT - ldr r7, [r7] - add r7, r7, #CPU_CTXT_VFP - restore_vfp_state r7 - -after_vfp_restore: - @ Restore FPEXC_EN which we clobbered on entry - pop {r2} - VFPFMXR FPEXC, r2 -#else -after_vfp_restore: -#endif - - @ Reset Hyp-role - configure_hyp_role vmexit - - @ Let host read hardware MIDR - mrc p15, 0, r2, c0, c0, 0 - mcr p15, 4, r2, c0, c0, 0 - - @ Back to hardware MPIDR - mrc p15, 0, r2, c0, c0, 5 - mcr p15, 4, r2, c0, c0, 5 - - @ Store guest CP15 state and restore host state - read_cp15_state store_to_vcpu = 1 - write_cp15_state read_from_vcpu = 0 - - save_timer_state - save_vgic_state - - restore_host_regs - clrex @ Clear exclusive monitor - mov r0, r1 @ Return the return code - bx lr @ return to IOCTL /******************************************************************** * Call function in Hyp mode @@ -239,281 +49,4 @@ after_vfp_restore: ENTRY(kvm_call_hyp) hvc #0 bx lr - -/******************************************************************** - * Hypervisor exception vector and handlers - * - * - * The KVM/ARM Hypervisor ABI is defined as follows: - * - * Entry to Hyp mode from the host kernel will happen _only_ when an HVC - * instruction is issued since all traps are disabled when running the host - * kernel as per the Hyp-mode initialization at boot time. - * - * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc - * below) when the HVC instruction is called from SVC mode (i.e. a guest or the - * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC - * instructions are called from within Hyp-mode. - * - * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): - * Switching to Hyp mode is done through a simple HVC #0 instruction. The - * exception vector code will check that the HVC comes from VMID==0 and if - * so will push the necessary state (SPSR, lr_usr) on the Hyp stack. - * - r0 contains a pointer to a HYP function - * - r1, r2, and r3 contain arguments to the above function. - * - The HYP function will be called with its arguments in r0, r1 and r2. - * On HYP function return, we return directly to SVC. - * - * Note that the above is used to execute code in Hyp-mode from a host-kernel - * point of view, and is a different concept from performing a world-switch and - * executing guest code SVC mode (with a VMID != 0). - */ - -/* Handle undef, svc, pabt, or dabt by crashing with a user notice */ -.macro bad_exception exception_code, panic_str - push {r0-r2} - mrrc p15, 6, r0, r1, c2 @ Read VTTBR - lsr r1, r1, #16 - ands r1, r1, #0xff - beq 99f - - load_vcpu @ Load VCPU pointer - .if \exception_code == ARM_EXCEPTION_DATA_ABORT - mrc p15, 4, r2, c5, c2, 0 @ HSR - mrc p15, 4, r1, c6, c0, 0 @ HDFAR - str r2, [vcpu, #VCPU_HSR] - str r1, [vcpu, #VCPU_HxFAR] - .endif - .if \exception_code == ARM_EXCEPTION_PREF_ABORT - mrc p15, 4, r2, c5, c2, 0 @ HSR - mrc p15, 4, r1, c6, c0, 2 @ HIFAR - str r2, [vcpu, #VCPU_HSR] - str r1, [vcpu, #VCPU_HxFAR] - .endif - mov r1, #\exception_code - b __kvm_vcpu_return - - @ We were in the host already. Let's craft a panic-ing return to SVC. -99: mrs r2, cpsr - bic r2, r2, #MODE_MASK - orr r2, r2, #SVC_MODE -THUMB( orr r2, r2, #PSR_T_BIT ) - msr spsr_cxsf, r2 - mrs r1, ELR_hyp - ldr r2, =panic - msr ELR_hyp, r2 - ldr r0, =\panic_str - clrex @ Clear exclusive monitor - eret -.endm - - .align 5 -__kvm_hyp_vector: - .globl __kvm_hyp_vector - - @ Hyp-mode exception vector - W(b) hyp_reset - W(b) hyp_undef - W(b) hyp_svc - W(b) hyp_pabt - W(b) hyp_dabt - W(b) hyp_hvc - W(b) hyp_irq - W(b) hyp_fiq - - .align -hyp_reset: - b hyp_reset - - .align -hyp_undef: - bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str - - .align -hyp_svc: - bad_exception ARM_EXCEPTION_HVC, svc_die_str - - .align -hyp_pabt: - bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str - - .align -hyp_dabt: - bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str - - .align -hyp_hvc: - /* - * Getting here is either becuase of a trap from a guest or from calling - * HVC from the host kernel, which means "switch to Hyp mode". - */ - push {r0, r1, r2} - - @ Check syndrome register - mrc p15, 4, r1, c5, c2, 0 @ HSR - lsr r0, r1, #HSR_EC_SHIFT - cmp r0, #HSR_EC_HVC - bne guest_trap @ Not HVC instr. - - /* - * Let's check if the HVC came from VMID 0 and allow simple - * switch to Hyp mode - */ - mrrc p15, 6, r0, r2, c2 - lsr r2, r2, #16 - and r2, r2, #0xff - cmp r2, #0 - bne guest_trap @ Guest called HVC - - /* - * Getting here means host called HVC, we shift parameters and branch - * to Hyp function. - */ - pop {r0, r1, r2} - - /* Check for __hyp_get_vectors */ - cmp r0, #-1 - mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR - beq 1f - - push {lr} - mrs lr, SPSR - push {lr} - - mov lr, r0 - mov r0, r1 - mov r1, r2 - mov r2, r3 - -THUMB( orr lr, #1) - blx lr @ Call the HYP function - - pop {lr} - msr SPSR_csxf, lr - pop {lr} -1: eret - -guest_trap: - load_vcpu @ Load VCPU pointer to r0 - str r1, [vcpu, #VCPU_HSR] - - @ Check if we need the fault information - lsr r1, r1, #HSR_EC_SHIFT -#ifdef CONFIG_VFPv3 - cmp r1, #HSR_EC_CP_0_13 - beq switch_to_guest_vfp -#endif - cmp r1, #HSR_EC_IABT - mrceq p15, 4, r2, c6, c0, 2 @ HIFAR - beq 2f - cmp r1, #HSR_EC_DABT - bne 1f - mrc p15, 4, r2, c6, c0, 0 @ HDFAR - -2: str r2, [vcpu, #VCPU_HxFAR] - - /* - * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: - * - * Abort on the stage 2 translation for a memory access from a - * Non-secure PL1 or PL0 mode: - * - * For any Access flag fault or Translation fault, and also for any - * Permission fault on the stage 2 translation of a memory access - * made as part of a translation table walk for a stage 1 translation, - * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR - * is UNKNOWN. - */ - - /* Check for permission fault, and S1PTW */ - mrc p15, 4, r1, c5, c2, 0 @ HSR - and r0, r1, #HSR_FSC_TYPE - cmp r0, #FSC_PERM - tsteq r1, #(1 << 7) @ S1PTW - mrcne p15, 4, r2, c6, c0, 4 @ HPFAR - bne 3f - - /* Preserve PAR */ - mrrc p15, 0, r0, r1, c7 @ PAR - push {r0, r1} - - /* Resolve IPA using the xFAR */ - mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR - isb - mrrc p15, 0, r0, r1, c7 @ PAR - tst r0, #1 - bne 4f @ Failed translation - ubfx r2, r0, #12, #20 - lsl r2, r2, #4 - orr r2, r2, r1, lsl #24 - - /* Restore PAR */ - pop {r0, r1} - mcrr p15, 0, r0, r1, c7 @ PAR - -3: load_vcpu @ Load VCPU pointer to r0 - str r2, [r0, #VCPU_HPFAR] - -1: mov r1, #ARM_EXCEPTION_HVC - b __kvm_vcpu_return - -4: pop {r0, r1} @ Failed translation, return to guest - mcrr p15, 0, r0, r1, c7 @ PAR - clrex - pop {r0, r1, r2} - eret - -/* - * If VFPv3 support is not available, then we will not switch the VFP - * registers; however cp10 and cp11 accesses will still trap and fallback - * to the regular coprocessor emulation code, which currently will - * inject an undefined exception to the guest. - */ -#ifdef CONFIG_VFPv3 -switch_to_guest_vfp: - push {r3-r7} - - @ NEON/VFP used. Turn on VFP access. - set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11)) - - @ Switch VFP/NEON hardware state to the guest's - add r7, r0, #VCPU_HOST_CTXT - ldr r7, [r7] - add r7, r7, #CPU_CTXT_VFP - store_vfp_state r7 - add r7, r0, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) - restore_vfp_state r7 - - pop {r3-r7} - pop {r0-r2} - clrex - eret -#endif - - .align -hyp_irq: - push {r0, r1, r2} - mov r1, #ARM_EXCEPTION_IRQ - load_vcpu @ Load VCPU pointer to r0 - b __kvm_vcpu_return - - .align -hyp_fiq: - b hyp_fiq - - .ltorg - - .popsection - - .pushsection ".rodata" - -und_die_str: - .ascii "unexpected undefined exception in Hyp mode at: %#08x\n" -pabt_die_str: - .ascii "unexpected prefetch abort in Hyp mode at: %#08x\n" -dabt_die_str: - .ascii "unexpected data abort in Hyp mode at: %#08x\n" -svc_die_str: - .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n" - - .popsection +ENDPROC(kvm_call_hyp) diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S deleted file mode 100644 index e0943cb80ab3..000000000000 --- a/arch/arm/kvm/interrupts_head.S +++ /dev/null @@ -1,660 +0,0 @@ -#include -#include - -/* Compat macro, until we get rid of this file entierely */ -#define VCPU_GP_REGS (VCPU_GUEST_CTXT + CPU_CTXT_GP_REGS) -#define VCPU_USR_REGS (VCPU_GP_REGS + GP_REGS_USR) -#define VCPU_SVC_REGS (VCPU_GP_REGS + GP_REGS_SVC) -#define VCPU_ABT_REGS (VCPU_GP_REGS + GP_REGS_ABT) -#define VCPU_UND_REGS (VCPU_GP_REGS + GP_REGS_UND) -#define VCPU_IRQ_REGS (VCPU_GP_REGS + GP_REGS_IRQ) -#define VCPU_FIQ_REGS (VCPU_GP_REGS + GP_REGS_FIQ) -#define VCPU_PC (VCPU_GP_REGS + GP_REGS_PC) -#define VCPU_CPSR (VCPU_GP_REGS + GP_REGS_CPSR) - -#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) -#define VCPU_USR_SP (VCPU_USR_REG(13)) -#define VCPU_USR_LR (VCPU_USR_REG(14)) -#define VCPU_CP15_BASE (VCPU_GUEST_CTXT + CPU_CTXT_CP15) -#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15_BASE + (_cp15_reg_idx * 4)) - -/* - * Many of these macros need to access the VCPU structure, which is always - * held in r0. These macros should never clobber r1, as it is used to hold the - * exception code on the return path (except of course the macro that switches - * all the registers before the final jump to the VM). - */ -vcpu .req r0 @ vcpu pointer always in r0 - -/* Clobbers {r2-r6} */ -.macro store_vfp_state vfp_base - @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions - VFPFMRX r2, FPEXC - @ Make sure VFP is enabled so we can touch the registers. - orr r6, r2, #FPEXC_EN - VFPFMXR FPEXC, r6 - - VFPFMRX r3, FPSCR - tst r2, #FPEXC_EX @ Check for VFP Subarchitecture - beq 1f - @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so - @ we only need to save them if FPEXC_EX is set. - VFPFMRX r4, FPINST - tst r2, #FPEXC_FP2V - VFPFMRX r5, FPINST2, ne @ vmrsne - bic r6, r2, #FPEXC_EX @ FPEXC_EX disable - VFPFMXR FPEXC, r6 -1: - VFPFSTMIA \vfp_base, r6 @ Save VFP registers - stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2 -.endm - -/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */ -.macro restore_vfp_state vfp_base - VFPFLDMIA \vfp_base, r6 @ Load VFP registers - ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2 - - VFPFMXR FPSCR, r3 - tst r2, #FPEXC_EX @ Check for VFP Subarchitecture - beq 1f - VFPFMXR FPINST, r4 - tst r2, #FPEXC_FP2V - VFPFMXR FPINST2, r5, ne -1: - VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN) -.endm - -/* These are simply for the macros to work - value don't have meaning */ -.equ usr, 0 -.equ svc, 1 -.equ abt, 2 -.equ und, 3 -.equ irq, 4 -.equ fiq, 5 - -.macro push_host_regs_mode mode - mrs r2, SP_\mode - mrs r3, LR_\mode - mrs r4, SPSR_\mode - push {r2, r3, r4} -.endm - -/* - * Store all host persistent registers on the stack. - * Clobbers all registers, in all modes, except r0 and r1. - */ -.macro save_host_regs - /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */ - mrs r2, ELR_hyp - push {r2} - - /* usr regs */ - push {r4-r12} @ r0-r3 are always clobbered - mrs r2, SP_usr - mov r3, lr - push {r2, r3} - - push_host_regs_mode svc - push_host_regs_mode abt - push_host_regs_mode und - push_host_regs_mode irq - - /* fiq regs */ - mrs r2, r8_fiq - mrs r3, r9_fiq - mrs r4, r10_fiq - mrs r5, r11_fiq - mrs r6, r12_fiq - mrs r7, SP_fiq - mrs r8, LR_fiq - mrs r9, SPSR_fiq - push {r2-r9} -.endm - -.macro pop_host_regs_mode mode - pop {r2, r3, r4} - msr SP_\mode, r2 - msr LR_\mode, r3 - msr SPSR_\mode, r4 -.endm - -/* - * Restore all host registers from the stack. - * Clobbers all registers, in all modes, except r0 and r1. - */ -.macro restore_host_regs - pop {r2-r9} - msr r8_fiq, r2 - msr r9_fiq, r3 - msr r10_fiq, r4 - msr r11_fiq, r5 - msr r12_fiq, r6 - msr SP_fiq, r7 - msr LR_fiq, r8 - msr SPSR_fiq, r9 - - pop_host_regs_mode irq - pop_host_regs_mode und - pop_host_regs_mode abt - pop_host_regs_mode svc - - pop {r2, r3} - msr SP_usr, r2 - mov lr, r3 - pop {r4-r12} - - pop {r2} - msr ELR_hyp, r2 -.endm - -/* - * Restore SP, LR and SPSR for a given mode. offset is the offset of - * this mode's registers from the VCPU base. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r1, r2, r3, r4. - */ -.macro restore_guest_regs_mode mode, offset - add r1, vcpu, \offset - ldm r1, {r2, r3, r4} - msr SP_\mode, r2 - msr LR_\mode, r3 - msr SPSR_\mode, r4 -.endm - -/* - * Restore all guest registers from the vcpu struct. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers *all* registers. - */ -.macro restore_guest_regs - restore_guest_regs_mode svc, #VCPU_SVC_REGS - restore_guest_regs_mode abt, #VCPU_ABT_REGS - restore_guest_regs_mode und, #VCPU_UND_REGS - restore_guest_regs_mode irq, #VCPU_IRQ_REGS - - add r1, vcpu, #VCPU_FIQ_REGS - ldm r1, {r2-r9} - msr r8_fiq, r2 - msr r9_fiq, r3 - msr r10_fiq, r4 - msr r11_fiq, r5 - msr r12_fiq, r6 - msr SP_fiq, r7 - msr LR_fiq, r8 - msr SPSR_fiq, r9 - - @ Load return state - ldr r2, [vcpu, #VCPU_PC] - ldr r3, [vcpu, #VCPU_CPSR] - msr ELR_hyp, r2 - msr SPSR_cxsf, r3 - - @ Load user registers - ldr r2, [vcpu, #VCPU_USR_SP] - ldr r3, [vcpu, #VCPU_USR_LR] - msr SP_usr, r2 - mov lr, r3 - add vcpu, vcpu, #(VCPU_USR_REGS) - ldm vcpu, {r0-r12} -.endm - -/* - * Save SP, LR and SPSR for a given mode. offset is the offset of - * this mode's registers from the VCPU base. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2, r3, r4, r5. - */ -.macro save_guest_regs_mode mode, offset - add r2, vcpu, \offset - mrs r3, SP_\mode - mrs r4, LR_\mode - mrs r5, SPSR_\mode - stm r2, {r3, r4, r5} -.endm - -/* - * Save all guest registers to the vcpu struct - * Expects guest's r0, r1, r2 on the stack. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2, r3, r4, r5. - */ -.macro save_guest_regs - @ Store usr registers - add r2, vcpu, #VCPU_USR_REG(3) - stm r2, {r3-r12} - add r2, vcpu, #VCPU_USR_REG(0) - pop {r3, r4, r5} @ r0, r1, r2 - stm r2, {r3, r4, r5} - mrs r2, SP_usr - mov r3, lr - str r2, [vcpu, #VCPU_USR_SP] - str r3, [vcpu, #VCPU_USR_LR] - - @ Store return state - mrs r2, ELR_hyp - mrs r3, spsr - str r2, [vcpu, #VCPU_PC] - str r3, [vcpu, #VCPU_CPSR] - - @ Store other guest registers - save_guest_regs_mode svc, #VCPU_SVC_REGS - save_guest_regs_mode abt, #VCPU_ABT_REGS - save_guest_regs_mode und, #VCPU_UND_REGS - save_guest_regs_mode irq, #VCPU_IRQ_REGS -.endm - -/* Reads cp15 registers from hardware and stores them in memory - * @store_to_vcpu: If 0, registers are written in-order to the stack, - * otherwise to the VCPU struct pointed to by vcpup - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2 - r12 - */ -.macro read_cp15_state store_to_vcpu - mrc p15, 0, r2, c1, c0, 0 @ SCTLR - mrc p15, 0, r3, c1, c0, 2 @ CPACR - mrc p15, 0, r4, c2, c0, 2 @ TTBCR - mrc p15, 0, r5, c3, c0, 0 @ DACR - mrrc p15, 0, r6, r7, c2 @ TTBR 0 - mrrc p15, 1, r8, r9, c2 @ TTBR 1 - mrc p15, 0, r10, c10, c2, 0 @ PRRR - mrc p15, 0, r11, c10, c2, 1 @ NMRR - mrc p15, 2, r12, c0, c0, 0 @ CSSELR - - .if \store_to_vcpu == 0 - push {r2-r12} @ Push CP15 registers - .else - str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] - str r3, [vcpu, #CP15_OFFSET(c1_CPACR)] - str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] - str r5, [vcpu, #CP15_OFFSET(c3_DACR)] - add r2, vcpu, #CP15_OFFSET(c2_TTBR0) - strd r6, r7, [r2] - add r2, vcpu, #CP15_OFFSET(c2_TTBR1) - strd r8, r9, [r2] - str r10, [vcpu, #CP15_OFFSET(c10_PRRR)] - str r11, [vcpu, #CP15_OFFSET(c10_NMRR)] - str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] - .endif - - mrc p15, 0, r2, c13, c0, 1 @ CID - mrc p15, 0, r3, c13, c0, 2 @ TID_URW - mrc p15, 0, r4, c13, c0, 3 @ TID_URO - mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV - mrc p15, 0, r6, c5, c0, 0 @ DFSR - mrc p15, 0, r7, c5, c0, 1 @ IFSR - mrc p15, 0, r8, c5, c1, 0 @ ADFSR - mrc p15, 0, r9, c5, c1, 1 @ AIFSR - mrc p15, 0, r10, c6, c0, 0 @ DFAR - mrc p15, 0, r11, c6, c0, 2 @ IFAR - mrc p15, 0, r12, c12, c0, 0 @ VBAR - - .if \store_to_vcpu == 0 - push {r2-r12} @ Push CP15 registers - .else - str r2, [vcpu, #CP15_OFFSET(c13_CID)] - str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] - str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] - str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] - str r6, [vcpu, #CP15_OFFSET(c5_DFSR)] - str r7, [vcpu, #CP15_OFFSET(c5_IFSR)] - str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] - str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] - str r10, [vcpu, #CP15_OFFSET(c6_DFAR)] - str r11, [vcpu, #CP15_OFFSET(c6_IFAR)] - str r12, [vcpu, #CP15_OFFSET(c12_VBAR)] - .endif - - mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL - mrrc p15, 0, r4, r5, c7 @ PAR - mrc p15, 0, r6, c10, c3, 0 @ AMAIR0 - mrc p15, 0, r7, c10, c3, 1 @ AMAIR1 - - .if \store_to_vcpu == 0 - push {r2,r4-r7} - .else - str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] - add r12, vcpu, #CP15_OFFSET(c7_PAR) - strd r4, r5, [r12] - str r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)] - str r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)] - .endif -.endm - -/* - * Reads cp15 registers from memory and writes them to hardware - * @read_from_vcpu: If 0, registers are read in-order from the stack, - * otherwise from the VCPU struct pointed to by vcpup - * - * Assumes vcpu pointer in vcpu reg - */ -.macro write_cp15_state read_from_vcpu - .if \read_from_vcpu == 0 - pop {r2,r4-r7} - .else - ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] - add r12, vcpu, #CP15_OFFSET(c7_PAR) - ldrd r4, r5, [r12] - ldr r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)] - ldr r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)] - .endif - - mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL - mcrr p15, 0, r4, r5, c7 @ PAR - mcr p15, 0, r6, c10, c3, 0 @ AMAIR0 - mcr p15, 0, r7, c10, c3, 1 @ AMAIR1 - - .if \read_from_vcpu == 0 - pop {r2-r12} - .else - ldr r2, [vcpu, #CP15_OFFSET(c13_CID)] - ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] - ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] - ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] - ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)] - ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)] - ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] - ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] - ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)] - ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)] - ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)] - .endif - - mcr p15, 0, r2, c13, c0, 1 @ CID - mcr p15, 0, r3, c13, c0, 2 @ TID_URW - mcr p15, 0, r4, c13, c0, 3 @ TID_URO - mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV - mcr p15, 0, r6, c5, c0, 0 @ DFSR - mcr p15, 0, r7, c5, c0, 1 @ IFSR - mcr p15, 0, r8, c5, c1, 0 @ ADFSR - mcr p15, 0, r9, c5, c1, 1 @ AIFSR - mcr p15, 0, r10, c6, c0, 0 @ DFAR - mcr p15, 0, r11, c6, c0, 2 @ IFAR - mcr p15, 0, r12, c12, c0, 0 @ VBAR - - .if \read_from_vcpu == 0 - pop {r2-r12} - .else - ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] - ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)] - ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] - ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)] - add r12, vcpu, #CP15_OFFSET(c2_TTBR0) - ldrd r6, r7, [r12] - add r12, vcpu, #CP15_OFFSET(c2_TTBR1) - ldrd r8, r9, [r12] - ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)] - ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)] - ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] - .endif - - mcr p15, 0, r2, c1, c0, 0 @ SCTLR - mcr p15, 0, r3, c1, c0, 2 @ CPACR - mcr p15, 0, r4, c2, c0, 2 @ TTBCR - mcr p15, 0, r5, c3, c0, 0 @ DACR - mcrr p15, 0, r6, r7, c2 @ TTBR 0 - mcrr p15, 1, r8, r9, c2 @ TTBR 1 - mcr p15, 0, r10, c10, c2, 0 @ PRRR - mcr p15, 0, r11, c10, c2, 1 @ NMRR - mcr p15, 2, r12, c0, c0, 0 @ CSSELR -.endm - -/* - * Save the VGIC CPU state into memory - * - * Assumes vcpu pointer in vcpu reg - */ -.macro save_vgic_state - /* Get VGIC VCTRL base into r2 */ - ldr r2, [vcpu, #VCPU_KVM] - ldr r2, [r2, #KVM_VGIC_VCTRL] - cmp r2, #0 - beq 2f - - /* Compute the address of struct vgic_cpu */ - add r11, vcpu, #VCPU_VGIC_CPU - - /* Save all interesting registers */ - ldr r4, [r2, #GICH_VMCR] - ldr r5, [r2, #GICH_MISR] - ldr r6, [r2, #GICH_EISR0] - ldr r7, [r2, #GICH_EISR1] - ldr r8, [r2, #GICH_ELRSR0] - ldr r9, [r2, #GICH_ELRSR1] - ldr r10, [r2, #GICH_APR] -ARM_BE8(rev r4, r4 ) -ARM_BE8(rev r5, r5 ) -ARM_BE8(rev r6, r6 ) -ARM_BE8(rev r7, r7 ) -ARM_BE8(rev r8, r8 ) -ARM_BE8(rev r9, r9 ) -ARM_BE8(rev r10, r10 ) - - str r4, [r11, #VGIC_V2_CPU_VMCR] - str r5, [r11, #VGIC_V2_CPU_MISR] -#ifdef CONFIG_CPU_ENDIAN_BE8 - str r6, [r11, #(VGIC_V2_CPU_EISR + 4)] - str r7, [r11, #VGIC_V2_CPU_EISR] - str r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)] - str r9, [r11, #VGIC_V2_CPU_ELRSR] -#else - str r6, [r11, #VGIC_V2_CPU_EISR] - str r7, [r11, #(VGIC_V2_CPU_EISR + 4)] - str r8, [r11, #VGIC_V2_CPU_ELRSR] - str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)] -#endif - str r10, [r11, #VGIC_V2_CPU_APR] - - /* Clear GICH_HCR */ - mov r5, #0 - str r5, [r2, #GICH_HCR] - - /* Save list registers */ - add r2, r2, #GICH_LR0 - add r3, r11, #VGIC_V2_CPU_LR - ldr r4, [r11, #VGIC_CPU_NR_LR] -1: ldr r6, [r2], #4 -ARM_BE8(rev r6, r6 ) - str r6, [r3], #4 - subs r4, r4, #1 - bne 1b -2: -.endm - -/* - * Restore the VGIC CPU state from memory - * - * Assumes vcpu pointer in vcpu reg - */ -.macro restore_vgic_state - /* Get VGIC VCTRL base into r2 */ - ldr r2, [vcpu, #VCPU_KVM] - ldr r2, [r2, #KVM_VGIC_VCTRL] - cmp r2, #0 - beq 2f - - /* Compute the address of struct vgic_cpu */ - add r11, vcpu, #VCPU_VGIC_CPU - - /* We only restore a minimal set of registers */ - ldr r3, [r11, #VGIC_V2_CPU_HCR] - ldr r4, [r11, #VGIC_V2_CPU_VMCR] - ldr r8, [r11, #VGIC_V2_CPU_APR] -ARM_BE8(rev r3, r3 ) -ARM_BE8(rev r4, r4 ) -ARM_BE8(rev r8, r8 ) - - str r3, [r2, #GICH_HCR] - str r4, [r2, #GICH_VMCR] - str r8, [r2, #GICH_APR] - - /* Restore list registers */ - add r2, r2, #GICH_LR0 - add r3, r11, #VGIC_V2_CPU_LR - ldr r4, [r11, #VGIC_CPU_NR_LR] -1: ldr r6, [r3], #4 -ARM_BE8(rev r6, r6 ) - str r6, [r2], #4 - subs r4, r4, #1 - bne 1b -2: -.endm - -#define CNTHCTL_PL1PCTEN (1 << 0) -#define CNTHCTL_PL1PCEN (1 << 1) - -/* - * Save the timer state onto the VCPU and allow physical timer/counter access - * for the host. - * - * Assumes vcpu pointer in vcpu reg - * Clobbers r2-r5 - */ -.macro save_timer_state - ldr r4, [vcpu, #VCPU_KVM] - ldr r2, [r4, #KVM_TIMER_ENABLED] - cmp r2, #0 - beq 1f - - mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL - str r2, [vcpu, #VCPU_TIMER_CNTV_CTL] - - isb - - mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL - ldr r4, =VCPU_TIMER_CNTV_CVAL - add r5, vcpu, r4 - strd r2, r3, [r5] - - @ Ensure host CNTVCT == CNTPCT - mov r2, #0 - mcrr p15, 4, r2, r2, c14 @ CNTVOFF - -1: - mov r2, #0 @ Clear ENABLE - mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL - - @ Allow physical timer/counter access for the host - mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL - orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN) - mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL -.endm - -/* - * Load the timer state from the VCPU and deny physical timer/counter access - * for the host. - * - * Assumes vcpu pointer in vcpu reg - * Clobbers r2-r5 - */ -.macro restore_timer_state - @ Disallow physical timer access for the guest - @ Physical counter access is allowed - mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL - orr r2, r2, #CNTHCTL_PL1PCTEN - bic r2, r2, #CNTHCTL_PL1PCEN - mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL - - ldr r4, [vcpu, #VCPU_KVM] - ldr r2, [r4, #KVM_TIMER_ENABLED] - cmp r2, #0 - beq 1f - - ldr r2, [r4, #KVM_TIMER_CNTVOFF] - ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)] - mcrr p15, 4, rr_lo_hi(r2, r3), c14 @ CNTVOFF - - ldr r4, =VCPU_TIMER_CNTV_CVAL - add r5, vcpu, r4 - ldrd r2, r3, [r5] - mcrr p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL - isb - - ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL] - and r2, r2, #3 - mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL -1: -.endm - -.equ vmentry, 0 -.equ vmexit, 1 - -/* Configures the HSTR (Hyp System Trap Register) on entry/return - * (hardware reset value is 0) */ -.macro set_hstr operation - mrc p15, 4, r2, c1, c1, 3 - ldr r3, =HSTR_T(15) - .if \operation == vmentry - orr r2, r2, r3 @ Trap CR{15} - .else - bic r2, r2, r3 @ Don't trap any CRx accesses - .endif - mcr p15, 4, r2, c1, c1, 3 -.endm - -/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return - * (hardware reset value is 0). Keep previous value in r2. - * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if - * VFP wasn't already enabled (always executed on vmtrap). - * If a label is specified with vmexit, it is branched to if VFP wasn't - * enabled. - */ -.macro set_hcptr operation, mask, label = none - mrc p15, 4, r2, c1, c1, 2 - ldr r3, =\mask - .if \operation == vmentry - orr r3, r2, r3 @ Trap coproc-accesses defined in mask - .else - bic r3, r2, r3 @ Don't trap defined coproc-accesses - .endif - mcr p15, 4, r3, c1, c1, 2 - .if \operation != vmentry - .if \operation == vmexit - tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) - beq 1f - .endif - isb - .if \label != none - b \label - .endif -1: - .endif -.endm - -/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return - * (hardware reset value is 0) */ -.macro set_hdcr operation - mrc p15, 4, r2, c1, c1, 1 - ldr r3, =(HDCR_TPM|HDCR_TPMCR) - .if \operation == vmentry - orr r2, r2, r3 @ Trap some perfmon accesses - .else - bic r2, r2, r3 @ Don't trap any perfmon accesses - .endif - mcr p15, 4, r2, c1, c1, 1 -.endm - -/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */ -.macro configure_hyp_role operation - .if \operation == vmentry - ldr r2, [vcpu, #VCPU_HCR] - ldr r3, [vcpu, #VCPU_IRQ_LINES] - orr r2, r2, r3 - .else - mov r2, #0 - .endif - mcr p15, 4, r2, c1, c1, 0 @ HCR -.endm - -.macro load_vcpu - mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR -.endm -- cgit v1.2.3-58-ga151 From d4c7688c51e57be20ca5f3dffa4c8771888a42fc Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 1 Feb 2016 19:56:31 +0000 Subject: ARM: KVM: Switch to C-based stage2 init As we now have hooks to setup VTCR from C code, let's drop the original VTCR setup and reimplement it as part of the HYP code. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 2 ++ arch/arm/include/asm/kvm_host.h | 1 + arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/hyp.h | 2 ++ arch/arm/kvm/hyp/s2-setup.c | 34 ++++++++++++++++++++++++++++++++++ arch/arm/kvm/init.S | 8 -------- 6 files changed, 40 insertions(+), 8 deletions(-) create mode 100644 arch/arm/kvm/hyp/s2-setup.c (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 4841225d10ea..3283a2f63254 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -98,6 +98,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); + +extern void __init_stage2_translation(void); #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index c62d71751f7a..0fe41aaf2171 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -224,6 +224,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, static inline void __cpu_init_stage2(void) { + kvm_call_hyp(__init_stage2_translation); } static inline int kvm_arch_dev_ioctl_check_extension(long ext) diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index a7d3a7e0b702..7152369504a6 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o +obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 8b9c2eb5a9dc..ff6de6a3af2d 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -71,6 +71,8 @@ #define HCPTR __ACCESS_CP15(c1, 4, c1, 2) #define HSTR __ACCESS_CP15(c1, 4, c1, 3) #define TTBCR __ACCESS_CP15(c2, 0, c0, 2) +#define HTCR __ACCESS_CP15(c2, 4, c0, 2) +#define VTCR __ACCESS_CP15(c2, 4, c1, 2) #define DACR __ACCESS_CP15(c3, 0, c0, 0) #define DFSR __ACCESS_CP15(c5, 0, c0, 0) #define IFSR __ACCESS_CP15(c5, 0, c0, 1) diff --git a/arch/arm/kvm/hyp/s2-setup.c b/arch/arm/kvm/hyp/s2-setup.c new file mode 100644 index 000000000000..f5f49c53be28 --- /dev/null +++ b/arch/arm/kvm/hyp/s2-setup.c @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2016 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "hyp.h" + +void __hyp_text __init_stage2_translation(void) +{ + u64 val; + + val = read_sysreg(VTCR) & ~VTCR_MASK; + + val |= read_sysreg(HTCR) & VTCR_HTCR_SH; + val |= KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S; + + write_sysreg(val, VTCR); +} diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 3988e72d16ff..1f9ae17476f9 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S @@ -84,14 +84,6 @@ __do_hyp_init: orr r0, r0, r1 mcr p15, 4, r0, c2, c0, 2 @ HTCR - mrc p15, 4, r1, c2, c1, 2 @ VTCR - ldr r2, =VTCR_MASK - bic r1, r1, r2 - bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits - orr r1, r0, r1 - orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S) - mcr p15, 4, r1, c2, c1, 2 @ VTCR - @ Use the same memory attributes for hyp. accesses as the kernel @ (copy MAIRx ro HMAIRx). mrc p15, 0, r0, c10, c2, 0 -- cgit v1.2.3-58-ga151 From fa85e25dad0f3f4e7ff2c58a914dcfe53210f680 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 6 Jan 2016 09:32:23 +0000 Subject: ARM: KVM: Remove __weak attributes Now that the old code is long gone, we can remove all the weak attributes, as there is only one version of the code. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/hyp-entry.S | 4 +--- arch/arm/kvm/hyp/switch.c | 2 +- arch/arm/kvm/hyp/tlb.c | 6 +++--- 3 files changed, 5 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S index 54a8d67ad980..78091383a5d9 100644 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -58,10 +58,8 @@ */ .align 5 -__hyp_vector: - .global __hyp_vector __kvm_hyp_vector: - .weak __kvm_hyp_vector + .global __kvm_hyp_vector @ Hyp-mode exception vector W(b) hyp_reset diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index abbe90b5f2ff..f11ede159080 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -191,7 +191,7 @@ again: return exit_code; } -__alias(__guest_run) int __weak __kvm_vcpu_run(struct kvm_vcpu *vcpu); +__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu); static const char * const __hyp_panic_string[] = { [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x", diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c index aaa44bbac766..82958b8f6a74 100644 --- a/arch/arm/kvm/hyp/tlb.c +++ b/arch/arm/kvm/hyp/tlb.c @@ -50,14 +50,14 @@ static void __hyp_text __tlb_flush_vmid(struct kvm *kvm) write_sysreg(0, VTTBR); } -__alias(__tlb_flush_vmid) void __weak __kvm_tlb_flush_vmid(struct kvm *kvm); +__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm); static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { __tlb_flush_vmid(kvm); } -__alias(__tlb_flush_vmid_ipa) void __weak __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, +__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); static void __hyp_text __tlb_flush_vm_context(void) @@ -67,4 +67,4 @@ static void __hyp_text __tlb_flush_vm_context(void) dsb(ish); } -__alias(__tlb_flush_vm_context) void __weak __kvm_flush_vm_context(void); +__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void); -- cgit v1.2.3-58-ga151 From 4448932fb09a44d73f820afd8fa145a24b3b3995 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 22:53:33 +0000 Subject: ARM: KVM: Turn CP15 defines to an enum Just like on arm64, having the CP15 registers expressed as a set of #defines has been very conflict-prone. Let's turn it into an enum, which should make it more manageable. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 33 --------------------------------- arch/arm/include/asm/kvm_host.h | 39 +++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/guest.c | 1 - 3 files changed, 39 insertions(+), 34 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 3283a2f63254..083825f12c93 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -21,39 +21,6 @@ #include -/* 0 is reserved as an invalid value. */ -#define c0_MPIDR 1 /* MultiProcessor ID Register */ -#define c0_CSSELR 2 /* Cache Size Selection Register */ -#define c1_SCTLR 3 /* System Control Register */ -#define c1_ACTLR 4 /* Auxiliary Control Register */ -#define c1_CPACR 5 /* Coprocessor Access Control */ -#define c2_TTBR0 6 /* Translation Table Base Register 0 */ -#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */ -#define c2_TTBR1 8 /* Translation Table Base Register 1 */ -#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */ -#define c2_TTBCR 10 /* Translation Table Base Control R. */ -#define c3_DACR 11 /* Domain Access Control Register */ -#define c5_DFSR 12 /* Data Fault Status Register */ -#define c5_IFSR 13 /* Instruction Fault Status Register */ -#define c5_ADFSR 14 /* Auxilary Data Fault Status R */ -#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */ -#define c6_DFAR 16 /* Data Fault Address Register */ -#define c6_IFAR 17 /* Instruction Fault Address Register */ -#define c7_PAR 18 /* Physical Address Register */ -#define c7_PAR_high 19 /* PAR top 32 bits */ -#define c9_L2CTLR 20 /* Cortex A15/A7 L2 Control Register */ -#define c10_PRRR 21 /* Primary Region Remap Register */ -#define c10_NMRR 22 /* Normal Memory Remap Register */ -#define c12_VBAR 23 /* Vector Base Address Register */ -#define c13_CID 24 /* Context ID Register */ -#define c13_TID_URW 25 /* Thread ID, User R/W */ -#define c13_TID_URO 26 /* Thread ID, User R/O */ -#define c13_TID_PRIV 27 /* Thread ID, Privileged */ -#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */ -#define c10_AMAIR0 29 /* Auxilary Memory Attribute Indirection Reg0 */ -#define c10_AMAIR1 30 /* Auxilary Memory Attribute Indirection Reg1 */ -#define NR_CP15_REGS 31 /* Number of regs (incl. invalid) */ - #define ARM_EXCEPTION_RESET 0 #define ARM_EXCEPTION_UNDEFINED 1 #define ARM_EXCEPTION_SOFTWARE 2 diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 0fe41aaf2171..daf6a71071da 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -88,6 +88,45 @@ struct kvm_vcpu_fault_info { u32 hyp_pc; /* PC when exception was taken from Hyp mode */ }; +/* + * 0 is reserved as an invalid value. + * Order should be kept in sync with the save/restore code. + */ +enum vcpu_sysreg { + __INVALID_SYSREG__, + c0_MPIDR, /* MultiProcessor ID Register */ + c0_CSSELR, /* Cache Size Selection Register */ + c1_SCTLR, /* System Control Register */ + c1_ACTLR, /* Auxiliary Control Register */ + c1_CPACR, /* Coprocessor Access Control */ + c2_TTBR0, /* Translation Table Base Register 0 */ + c2_TTBR0_high, /* TTBR0 top 32 bits */ + c2_TTBR1, /* Translation Table Base Register 1 */ + c2_TTBR1_high, /* TTBR1 top 32 bits */ + c2_TTBCR, /* Translation Table Base Control R. */ + c3_DACR, /* Domain Access Control Register */ + c5_DFSR, /* Data Fault Status Register */ + c5_IFSR, /* Instruction Fault Status Register */ + c5_ADFSR, /* Auxilary Data Fault Status R */ + c5_AIFSR, /* Auxilary Instrunction Fault Status R */ + c6_DFAR, /* Data Fault Address Register */ + c6_IFAR, /* Instruction Fault Address Register */ + c7_PAR, /* Physical Address Register */ + c7_PAR_high, /* PAR top 32 bits */ + c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */ + c10_PRRR, /* Primary Region Remap Register */ + c10_NMRR, /* Normal Memory Remap Register */ + c12_VBAR, /* Vector Base Address Register */ + c13_CID, /* Context ID Register */ + c13_TID_URW, /* Thread ID, User R/W */ + c13_TID_URO, /* Thread ID, User R/O */ + c13_TID_PRIV, /* Thread ID, Privileged */ + c14_CNTKCTL, /* Timer Control Register (PL1) */ + c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */ + c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */ + NR_CP15_REGS /* Number of regs (incl. invalid) */ +}; + struct kvm_cpu_context { struct kvm_regs gp_regs; struct vfp_hard_struct vfp; diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 86e26fbd5ba3..12cbb6824443 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3-58-ga151 From ff3a01d1e029847abb2d0735843ac6f7ae1385ea Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:54:07 +0000 Subject: ARM: KVM: Cleanup asm-offsets.c Since we don't have much assembler left, most of the KVM stuff in asm-offsets.c is now superfluous. Let's get rid of it. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kernel/asm-offsets.c | 30 ------------------------------ 1 file changed, 30 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 2f3e0b064066..1f24c32e11fe 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -170,42 +170,12 @@ int main(void) DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE); BLANK(); #ifdef CONFIG_KVM_ARM_HOST - DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); - DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); - DEFINE(CPU_CTXT_CP15, offsetof(struct kvm_cpu_context, cp15)); DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs)); - DEFINE(GP_REGS_SVC, offsetof(struct kvm_regs, svc_regs)); - DEFINE(GP_REGS_ABT, offsetof(struct kvm_regs, abt_regs)); - DEFINE(GP_REGS_UND, offsetof(struct kvm_regs, und_regs)); - DEFINE(GP_REGS_IRQ, offsetof(struct kvm_regs, irq_regs)); - DEFINE(GP_REGS_FIQ, offsetof(struct kvm_regs, fiq_regs)); - DEFINE(GP_REGS_PC, offsetof(struct kvm_regs, usr_regs.ARM_pc)); - DEFINE(GP_REGS_CPSR, offsetof(struct kvm_regs, usr_regs.ARM_cpsr)); - DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr)); - DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); - DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); - DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); - DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); - DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); - DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); - DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); - DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); - DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr)); - DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr)); - DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); - DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); - DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); - DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); - DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); - DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); - DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); - DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); - DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); #endif BLANK(); #ifdef CONFIG_VDSO -- cgit v1.2.3-58-ga151 From 311b5b363cd28aa778de083178f147f32622e331 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:57:36 +0000 Subject: ARM: KVM: Remove unused hyp_pc field This field was never populated, and the panic code already does something similar. Delete the related code. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 5 ----- arch/arm/include/asm/kvm_host.h | 1 - arch/arm/kernel/asm-offsets.c | 1 - arch/arm/kvm/handle_exit.c | 5 ----- 4 files changed, 12 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index f710616ccadc..8a8c6ded9ca7 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -108,11 +108,6 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; } -static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.fault.hyp_pc; -} - static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index daf6a71071da..19e9aba85463 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -85,7 +85,6 @@ struct kvm_vcpu_fault_info { u32 hsr; /* Hyp Syndrome Register */ u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ u32 hpfar; /* Hyp IPA Fault Address Register */ - u32 hyp_pc; /* PC when exception was taken from Hyp mode */ }; /* diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 1f24c32e11fe..27d05813ff09 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -175,7 +175,6 @@ int main(void) DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs)); - DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); #endif BLANK(); #ifdef CONFIG_VDSO diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 3ede90d8b20b..5377d7539a40 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -147,11 +147,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, switch (exception_index) { case ARM_EXCEPTION_IRQ: return 1; - case ARM_EXCEPTION_UNDEFINED: - kvm_err("Undefined exception in Hyp mode at: %#08lx\n", - kvm_vcpu_get_hyp_pc(vcpu)); - BUG(); - panic("KVM: Hypervisor undefined exception!\n"); case ARM_EXCEPTION_DATA_ABORT: case ARM_EXCEPTION_PREF_ABORT: case ARM_EXCEPTION_HVC: -- cgit v1.2.3-58-ga151 From f9e515eeb1833e8bf621948b43ee7c6236c7a167 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:58:28 +0000 Subject: ARM: KVM: Remove handling of ARM_EXCEPTION_DATA/PREF_ABORT These are now handled as a panic, so there is little point in keeping them around. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/handle_exit.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 5377d7539a40..3f1ef0dbc899 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -147,8 +147,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, switch (exception_index) { case ARM_EXCEPTION_IRQ: return 1; - case ARM_EXCEPTION_DATA_ABORT: - case ARM_EXCEPTION_PREF_ABORT: case ARM_EXCEPTION_HVC: /* * See ARM ARM B1.14.1: "Hyp traps on instructions -- cgit v1.2.3-58-ga151 From 402f352876ba0df574533e59d72fc3e9871f791a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 22:55:10 +0000 Subject: ARM: KVM: Remove __kvm_hyp_exit/__kvm_hyp_exit_end I have no idea what these were for - probably a leftover from an early implementation. Good bye! Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 083825f12c93..15d58b42d5a1 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -55,9 +55,6 @@ struct kvm_vcpu; extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; -extern char __kvm_hyp_exit[]; -extern char __kvm_hyp_exit_end[]; - extern char __kvm_hyp_vector[]; extern void __kvm_flush_vm_context(void); -- cgit v1.2.3-58-ga151 From 57c841f131ef295b583365d2fddd6b0d16e82c10 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 29 Jan 2016 15:01:28 +0000 Subject: arm/arm64: KVM: Handle out-of-RAM cache maintenance as a NOP So far, our handling of cache maintenance by VA has been pretty simple: Either the access is in the guest RAM and generates a S2 fault, which results in the page being mapped RW, or we go down the io_mem_abort() path, and nuke the guest. The first one is fine, but the second one is extremely weird. Treating the CM as an I/O is wrong, and nothing in the ARM ARM indicates that we should generate a fault for something that cannot end-up in the cache anyway (even if the guest maps it, it will keep on faulting at stage-2 for emulation). So let's just skip this instruction, and let the guest get away with it. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 5 +++++ arch/arm/kvm/mmu.c | 16 ++++++++++++++++ arch/arm64/include/asm/kvm_emulate.h | 5 +++++ 3 files changed, 26 insertions(+) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 8a8c6ded9ca7..ee5328fc4b06 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -138,6 +138,11 @@ static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; } +static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM); +} + /* Get Access Size from a data abort */ static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) { diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index aba61fd3697a..c3eb10ea0971 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1430,6 +1430,22 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) goto out_unlock; } + /* + * Check for a cache maintenance operation. Since we + * ended-up here, we know it is outside of any memory + * slot. But we can't find out if that is for a device, + * or if the guest is just being stupid. The only thing + * we know for sure is that this range cannot be cached. + * + * So let's assume that the guest is just being + * cautious, and skip the instruction. + */ + if (kvm_vcpu_dabt_is_cm(vcpu)) { + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + ret = 1; + goto out_unlock; + } + /* * The IPA is reported as [MAX:12], so we need to * complement it with the bottom 12 bits from the diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 779a5872a2c5..4df8e7a58c6b 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -189,6 +189,11 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); } +static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); +} + static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) { return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); -- cgit v1.2.3-58-ga151 From 3a3604bc5eb4ae21ec95b13fdd15959e8f70c434 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 29 Jan 2015 13:19:45 +0000 Subject: arm64: KVM: Switch to C-based stage2 init There is no real need to leave the stage2 initialization as part of the early HYP bootstrap, and we can easily postpone it to the point where we can safely run C code. This will help VHE, which doesn't need any of this bootstrap. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_asm.h | 2 ++ arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/hyp-init.S | 15 +------------ arch/arm64/kvm/hyp/Makefile | 1 + arch/arm64/kvm/hyp/s2-setup.c | 44 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 50 insertions(+), 14 deletions(-) create mode 100644 arch/arm64/kvm/hyp/s2-setup.c (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 2ad8930e7eb3..1037392ae134 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -45,6 +45,8 @@ extern u64 __vgic_v3_get_ich_vtr_el2(void); extern u32 __kvm_get_mdcr_el2(void); +extern void __init_stage2_translation(void); + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index fe86cf9f288b..43688d93c756 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -334,6 +335,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, static inline void __cpu_init_stage2(void) { + kvm_call_hyp(__init_stage2_translation); } static inline void kvm_arch_hardware_disable(void) {} diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index d073b5a216f7..7d8747c6427c 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -87,26 +87,13 @@ __do_hyp_init: #endif /* * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in - * TCR_EL2 and VTCR_EL2. + * TCR_EL2. */ mrs x5, ID_AA64MMFR0_EL1 bfi x4, x5, #16, #3 msr tcr_el2, x4 - ldr x4, =VTCR_EL2_FLAGS - bfi x4, x5, #16, #3 - /* - * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in - * VTCR_EL2. - */ - mrs x5, ID_AA64MMFR1_EL1 - ubfx x5, x5, #5, #1 - lsl x5, x5, #VTCR_EL2_VS - orr x4, x4, x5 - - msr vtcr_el2, x4 - mrs x4, mair_el1 msr mair_el2, x4 isb diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 826032bc3945..5326e664fd41 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += switch.o obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o +obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c new file mode 100644 index 000000000000..17e8cc09a1d8 --- /dev/null +++ b/arch/arm64/kvm/hyp/s2-setup.c @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2016 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "hyp.h" + +void __hyp_text __init_stage2_translation(void) +{ + u64 val = VTCR_EL2_FLAGS; + u64 tmp; + + /* + * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS + * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while + * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2... + */ + val |= (read_sysreg(id_aa64mmfr0_el1) & 7) << 16; + + /* + * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS + * bit in VTCR_EL2. + */ + tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf; + val |= (tmp == 2) ? VTCR_EL2_VS : 0; + + write_sysreg(val, vtcr_el2); +} -- cgit v1.2.3-58-ga151 From 82deae0fc8ba256c1061dd4de42f0ef6cb9f954f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 9 Jun 2014 19:47:09 +0100 Subject: arm/arm64: Add new is_kernel_in_hyp_mode predicate With ARMv8.1 VHE extension, it will be possible to run the kernel at EL2 (aka HYP mode). In order for the kernel to easily find out where it is running, add a new predicate that returns whether or not the kernel is in HYP mode. For completeness, the 32bit code also get such a predicate (always returning false) so that code common to both architecture (timers, KVM) can use it transparently. Acked-by: Christoffer Dall Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier --- arch/arm/include/asm/virt.h | 5 +++++ arch/arm64/include/asm/virt.h | 10 ++++++++++ 2 files changed, 15 insertions(+) (limited to 'arch') diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 5fdbfea6defb..d4ceaf5f299b 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -75,6 +75,11 @@ static inline bool is_hyp_mode_mismatched(void) return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); } +static inline bool is_kernel_in_hyp_mode(void) +{ + return false; +} + /* The section containing the hypervisor text */ extern char __hyp_text_start[]; extern char __hyp_text_end[]; diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 7a5df5252dd7..9f22dd607958 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -23,6 +23,8 @@ #ifndef __ASSEMBLY__ +#include + /* * __boot_cpu_mode records what mode CPUs were booted in. * A correctly-implemented bootloader must start all CPUs in the same mode: @@ -50,6 +52,14 @@ static inline bool is_hyp_mode_mismatched(void) return __boot_cpu_mode[0] != __boot_cpu_mode[1]; } +static inline bool is_kernel_in_hyp_mode(void) +{ + u64 el; + + asm("mrs %0, CurrentEL" : "=r" (el)); + return el == CurrentEL_EL2; +} + /* The section containing the hypervisor text */ extern char __hyp_text_start[]; extern char __hyp_text_end[]; -- cgit v1.2.3-58-ga151 From d88701bea3664cea99b8b7380f63a3bd0ec3ead3 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 29 Jan 2015 11:24:05 +0000 Subject: arm64: Add ARM64_HAS_VIRT_HOST_EXTN feature Add a new ARM64_HAS_VIRT_HOST_EXTN features to indicate that the CPU has the ARMv8.1 VHE capability. This will be used to trigger kernel patching in KVM. Acked-by: Christoffer Dall Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/cpufeature.h | 6 +++++- arch/arm64/kernel/cpufeature.c | 11 +++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 8f271b83f910..a5c769b1c65b 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -30,8 +30,12 @@ #define ARM64_HAS_LSE_ATOMICS 5 #define ARM64_WORKAROUND_CAVIUM_23154 6 #define ARM64_WORKAROUND_834220 7 +/* #define ARM64_HAS_NO_HW_PREFETCH 8 */ +/* #define ARM64_HAS_UAO 9 */ +/* #define ARM64_ALT_PAN_NOT_UAO 10 */ +#define ARM64_HAS_VIRT_HOST_EXTN 11 -#define ARM64_NCAPS 8 +#define ARM64_NCAPS 12 #ifndef __ASSEMBLY__ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 5c90aa490a2b..ba745199297e 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -26,6 +26,7 @@ #include #include #include +#include unsigned long elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); @@ -621,6 +622,11 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry) return has_sre; } +static bool runs_at_el2(const struct arm64_cpu_capabilities *entry) +{ + return is_kernel_in_hyp_mode(); +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -651,6 +657,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .min_field_value = 2, }, #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ + { + .desc = "Virtualization Host Extensions", + .capability = ARM64_HAS_VIRT_HOST_EXTN, + .matches = runs_at_el2, + }, {}, }; -- cgit v1.2.3-58-ga151 From 1e947bad0b63b351cbdd9ad55ea5bf7e31c76036 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 29 Jan 2015 11:59:54 +0000 Subject: arm64: KVM: Skip HYP setup when already running in HYP With the kernel running at EL2, there is no point trying to configure page tables for HYP, as the kernel is already mapped. Take this opportunity to refactor the whole init a bit, allowing the various parts of the hypervisor bringup to be split across multiple functions. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/arm.c | 173 +++++++++++++++++++++++++++++++++++------------------ arch/arm/kvm/mmu.c | 7 +++ 2 files changed, 121 insertions(+), 59 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index fcf6c130c986..686350d05174 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -967,6 +967,11 @@ long kvm_arch_vm_ioctl(struct file *filp, } } +static void cpu_init_stage2(void *dummy) +{ + __cpu_init_stage2(); +} + static void cpu_init_hyp_mode(void *dummy) { phys_addr_t boot_pgd_ptr; @@ -1036,6 +1041,82 @@ static inline void hyp_cpu_pm_init(void) } #endif +static void teardown_common_resources(void) +{ + free_percpu(kvm_host_cpu_state); +} + +static int init_common_resources(void) +{ + kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); + if (!kvm_host_cpu_state) { + kvm_err("Cannot allocate host CPU state\n"); + return -ENOMEM; + } + + return 0; +} + +static int init_subsystems(void) +{ + int err; + + /* + * Init HYP view of VGIC + */ + err = kvm_vgic_hyp_init(); + switch (err) { + case 0: + vgic_present = true; + break; + case -ENODEV: + case -ENXIO: + vgic_present = false; + break; + default: + return err; + } + + /* + * Init HYP architected timer support + */ + err = kvm_timer_hyp_init(); + if (err) + return err; + + kvm_perf_init(); + kvm_coproc_table_init(); + + return 0; +} + +static void teardown_hyp_mode(void) +{ + int cpu; + + if (is_kernel_in_hyp_mode()) + return; + + free_hyp_pgds(); + for_each_possible_cpu(cpu) + free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); +} + +static int init_vhe_mode(void) +{ + /* + * Execute the init code on each CPU. + */ + on_each_cpu(cpu_init_stage2, NULL, 1); + + /* set size of VMID supported by CPU */ + kvm_vmid_bits = kvm_get_vmid_bits(); + kvm_info("%d-bit VMID\n", kvm_vmid_bits); + + kvm_info("VHE mode initialized successfully\n"); + return 0; +} + /** * Inits Hyp-mode on all online CPUs */ @@ -1066,7 +1147,7 @@ static int init_hyp_mode(void) stack_page = __get_free_page(GFP_KERNEL); if (!stack_page) { err = -ENOMEM; - goto out_free_stack_pages; + goto out_err; } per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; @@ -1078,13 +1159,13 @@ static int init_hyp_mode(void) err = create_hyp_mappings(__hyp_text_start, __hyp_text_end); if (err) { kvm_err("Cannot map world-switch code\n"); - goto out_free_mappings; + goto out_err; } err = create_hyp_mappings(__start_rodata, __end_rodata); if (err) { kvm_err("Cannot map rodata section\n"); - goto out_free_mappings; + goto out_err; } /* @@ -1096,20 +1177,10 @@ static int init_hyp_mode(void) if (err) { kvm_err("Cannot map hyp stack\n"); - goto out_free_mappings; + goto out_err; } } - /* - * Map the host CPU structures - */ - kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); - if (!kvm_host_cpu_state) { - err = -ENOMEM; - kvm_err("Cannot allocate host CPU state\n"); - goto out_free_mappings; - } - for_each_possible_cpu(cpu) { kvm_cpu_context_t *cpu_ctxt; @@ -1118,7 +1189,7 @@ static int init_hyp_mode(void) if (err) { kvm_err("Cannot map host CPU state: %d\n", err); - goto out_free_context; + goto out_err; } } @@ -1127,34 +1198,22 @@ static int init_hyp_mode(void) */ on_each_cpu(cpu_init_hyp_mode, NULL, 1); - /* - * Init HYP view of VGIC - */ - err = kvm_vgic_hyp_init(); - switch (err) { - case 0: - vgic_present = true; - break; - case -ENODEV: - case -ENXIO: - vgic_present = false; - break; - default: - goto out_free_context; - } - - /* - * Init HYP architected timer support - */ - err = kvm_timer_hyp_init(); - if (err) - goto out_free_context; - #ifndef CONFIG_HOTPLUG_CPU free_boot_hyp_pgd(); #endif - kvm_perf_init(); + cpu_notifier_register_begin(); + + err = __register_cpu_notifier(&hyp_init_cpu_nb); + + cpu_notifier_register_done(); + + if (err) { + kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); + goto out_err; + } + + hyp_cpu_pm_init(); /* set size of VMID supported by CPU */ kvm_vmid_bits = kvm_get_vmid_bits(); @@ -1163,14 +1222,9 @@ static int init_hyp_mode(void) kvm_info("Hyp mode initialized successfully\n"); return 0; -out_free_context: - free_percpu(kvm_host_cpu_state); -out_free_mappings: - free_hyp_pgds(); -out_free_stack_pages: - for_each_possible_cpu(cpu) - free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); + out_err: + teardown_hyp_mode(); kvm_err("error initializing Hyp mode: %d\n", err); return err; } @@ -1214,26 +1268,27 @@ int kvm_arch_init(void *opaque) } } - cpu_notifier_register_begin(); - - err = init_hyp_mode(); + err = init_common_resources(); if (err) - goto out_err; + return err; - err = __register_cpu_notifier(&hyp_init_cpu_nb); - if (err) { - kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); + if (is_kernel_in_hyp_mode()) + err = init_vhe_mode(); + else + err = init_hyp_mode(); + if (err) goto out_err; - } - - cpu_notifier_register_done(); - hyp_cpu_pm_init(); + err = init_subsystems(); + if (err) + goto out_hyp; - kvm_coproc_table_init(); return 0; + +out_hyp: + teardown_hyp_mode(); out_err: - cpu_notifier_register_done(); + teardown_common_resources(); return err; } diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index c3eb10ea0971..58dbd5c439df 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "trace.h" @@ -598,6 +599,9 @@ int create_hyp_mappings(void *from, void *to) unsigned long start = KERN_TO_HYP((unsigned long)from); unsigned long end = KERN_TO_HYP((unsigned long)to); + if (is_kernel_in_hyp_mode()) + return 0; + start = start & PAGE_MASK; end = PAGE_ALIGN(end); @@ -630,6 +634,9 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) unsigned long start = KERN_TO_HYP((unsigned long)from); unsigned long end = KERN_TO_HYP((unsigned long)to); + if (is_kernel_in_hyp_mode()) + return 0; + /* Check for a valid kernel IO mapping */ if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)) return -EINVAL; -- cgit v1.2.3-58-ga151 From b81125c791a2958cc60ae968fc1cdea82b7cd3b0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 29 Jan 2015 13:52:12 +0000 Subject: arm64: KVM: VHE: Patch out use of HVC With VHE, the host never issues an HVC instruction to get into the KVM code, as we can simply branch there. Use runtime code patching to simplify things a bit. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp.S | 7 +++++++ arch/arm64/kvm/hyp/hyp-entry.S | 40 +++++++++++++++++++++++++++++++--------- 2 files changed, 38 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 0ccdcbbef3c2..0689a74e6ba0 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -17,7 +17,9 @@ #include +#include #include +#include /* * u64 kvm_call_hyp(void *hypfn, ...); @@ -38,6 +40,11 @@ * arch/arm64/kernel/hyp_stub.S. */ ENTRY(kvm_call_hyp) +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN hvc #0 ret +alternative_else + b __vhe_hyp_call + nop +alternative_endif ENDPROC(kvm_call_hyp) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 93e8d983c0bd..1bdeee70833e 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -38,6 +38,34 @@ ldp x0, x1, [sp], #16 .endm +.macro do_el2_call + /* + * Shuffle the parameters before calling the function + * pointed to in x0. Assumes parameters in x[1,2,3]. + */ + sub sp, sp, #16 + str lr, [sp] + mov lr, x0 + mov x0, x1 + mov x1, x2 + mov x2, x3 + blr lr + ldr lr, [sp] + add sp, sp, #16 +.endm + +ENTRY(__vhe_hyp_call) + do_el2_call + /* + * We used to rely on having an exception return to get + * an implicit isb. In the E2H case, we don't have it anymore. + * rather than changing all the leaf functions, just do it here + * before returning to the rest of the kernel. + */ + isb + ret +ENDPROC(__vhe_hyp_call) + el1_sync: // Guest trapped into EL2 save_x0_to_x3 @@ -58,19 +86,13 @@ el1_sync: // Guest trapped into EL2 mrs x0, vbar_el2 b 2f -1: stp lr, xzr, [sp, #-16]! - +1: /* - * Compute the function address in EL2, and shuffle the parameters. + * Perform the EL2 call */ kern_hyp_va x0 - mov lr, x0 - mov x0, x1 - mov x1, x2 - mov x2, x3 - blr lr + do_el2_call - ldp lr, xzr, [sp], #16 2: eret el1_trap: -- cgit v1.2.3-58-ga151 From cedbb8b78c4f09f0d4519d5d35519b64487f1f0a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 29 Jan 2015 13:50:34 +0000 Subject: arm64: KVM: VHE: Patch out kern_hyp_va The kern_hyp_va macro is pretty meaninless with VHE, as there is only one mapping - the kernel one. In order to keep the code readable and efficient, use runtime patching to replace the 'and' instruction used to compute the VA with a 'nop'. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_mmu.h | 12 +++++++++++- arch/arm64/kvm/hyp/hyp.h | 25 ++++++++++++++++++++++--- 2 files changed, 33 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 736433912a1e..9a9318adefa6 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -23,13 +23,16 @@ #include /* - * As we only have the TTBR0_EL2 register, we cannot express + * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express * "negative" addresses. This makes it impossible to directly share * mappings with the kernel. * * Instead, give the HYP mode its own VA region at a fixed offset from * the kernel by just masking the top bits (which are all ones for a * kernel address). + * + * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these + * macros (the entire kernel runs at EL2). */ #define HYP_PAGE_OFFSET_SHIFT VA_BITS #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) @@ -56,12 +59,19 @@ #ifdef __ASSEMBLY__ +#include +#include + /* * Convert a kernel VA into a HYP VA. * reg: VA to be converted. */ .macro kern_hyp_va reg +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN and \reg, \reg, #HYP_PAGE_OFFSET_MASK +alternative_else + nop +alternative_endif .endm #else diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index fb275178b6af..fc502f356147 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -25,9 +25,28 @@ #define __hyp_text __section(.hyp.text) notrace -#define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK) -#define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \ - + PAGE_OFFSET) +static inline unsigned long __kern_hyp_va(unsigned long v) +{ + asm volatile(ALTERNATIVE("and %0, %0, %1", + "nop", + ARM64_HAS_VIRT_HOST_EXTN) + : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK)); + return v; +} + +#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) + +static inline unsigned long __hyp_kern_va(unsigned long v) +{ + u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET; + asm volatile(ALTERNATIVE("add %0, %0, %1", + "nop", + ARM64_HAS_VIRT_HOST_EXTN) + : "+r" (v) : "r" (offset)); + return v; +} + +#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v))) /** * hyp_alternate_select - Generates patchable code sequences that are -- cgit v1.2.3-58-ga151 From 915ccd1dbf21e0621fb9415ad76e5c2b31ec137a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 12:00:00 +0000 Subject: arm64: KVM: VHE: Introduce unified system register accessors VHE brings its own bag of new system registers, or rather system register accessors, as it define new ways to access both guest and host system registers. For example, from the host: - The host TCR_EL2 register is accessed using the TCR_EL1 accessor - The guest TCR_EL1 register is accessed using the TCR_EL12 accessor Obviously, this is confusing. A way to somehow reduce the complexity of writing code for both ARMv8 and ARMv8.1 is to use a set of unified accessors that will generate the right sysreg, depending on the mode the CPU is running in. For example: - read_sysreg_el1(tcr) will use TCR_EL1 on ARMv8, and TCR_EL12 on ARMv8.1 with VHE. - read_sysreg_el2(tcr) will use TCR_EL2 on ARMv8, and TCR_EL1 on ARMv8.1 with VHE. We end up with three sets of accessors ({read,write}_sysreg_el[012]) that can be directly used from C code. We take this opportunity to also add the definition for the new VHE sysregs. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/hyp.h | 72 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index fc502f356147..744c919cc8ef 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -48,6 +48,78 @@ static inline unsigned long __hyp_kern_va(unsigned long v) #define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v))) +#define read_sysreg_elx(r,nvh,vh) \ + ({ \ + u64 reg; \ + asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ + "mrs_s %0, " __stringify(r##vh),\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : "=r" (reg)); \ + reg; \ + }) + +#define write_sysreg_elx(v,r,nvh,vh) \ + do { \ + u64 __val = (u64)(v); \ + asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ + "msr_s " __stringify(r##vh) ", %x0",\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : : "rZ" (__val)); \ + } while (0) + +/* + * Unified accessors for registers that have a different encoding + * between VHE and non-VHE. They must be specified without their "ELx" + * encoding. + */ +#define read_sysreg_el2(r) \ + ({ \ + u64 reg; \ + asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\ + "mrs %0, " __stringify(r##_EL1),\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : "=r" (reg)); \ + reg; \ + }) + +#define write_sysreg_el2(v,r) \ + do { \ + u64 __val = (u64)(v); \ + asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\ + "msr " __stringify(r##_EL1) ", %x0",\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : : "rZ" (__val)); \ + } while (0) + +#define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) +#define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) +#define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) +#define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12) + +/* The VHE specific system registers and their encoding */ +#define sctlr_EL12 sys_reg(3, 5, 1, 0, 0) +#define cpacr_EL12 sys_reg(3, 5, 1, 0, 2) +#define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0) +#define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1) +#define tcr_EL12 sys_reg(3, 5, 2, 0, 2) +#define afsr0_EL12 sys_reg(3, 5, 5, 1, 0) +#define afsr1_EL12 sys_reg(3, 5, 5, 1, 1) +#define esr_EL12 sys_reg(3, 5, 5, 2, 0) +#define far_EL12 sys_reg(3, 5, 6, 0, 0) +#define mair_EL12 sys_reg(3, 5, 10, 2, 0) +#define amair_EL12 sys_reg(3, 5, 10, 3, 0) +#define vbar_EL12 sys_reg(3, 5, 12, 0, 0) +#define contextidr_EL12 sys_reg(3, 5, 13, 0, 1) +#define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0) +#define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0) +#define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1) +#define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2) +#define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0) +#define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1) +#define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2) +#define spsr_EL12 sys_reg(3, 5, 4, 0, 0) +#define elr_EL12 sys_reg(3, 5, 4, 0, 1) + /** * hyp_alternate_select - Generates patchable code sequences that are * used to switch between two implementations of a function, depending -- cgit v1.2.3-58-ga151 From edef528dc4bdab1504e72e0f5436b18f3777efc0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 12:17:35 +0000 Subject: arm64: KVM: VHE: Differenciate host/guest sysreg save/restore With ARMv8, host and guest share the same system register file, making the save/restore procedure completely symetrical. With VHE, host and guest now have different requirements, as they use different sysregs. In order to prepare for this, add split sysreg save/restore functions for both host and guest. No functional changes yet. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/hyp.h | 6 ++++-- arch/arm64/kvm/hyp/switch.c | 10 +++++----- arch/arm64/kvm/hyp/sysreg-sr.c | 24 ++++++++++++++++++++++-- 3 files changed, 31 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index 744c919cc8ef..5dfa8838a3e1 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -153,8 +153,10 @@ void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); void __timer_save_state(struct kvm_vcpu *vcpu); void __timer_restore_state(struct kvm_vcpu *vcpu); -void __sysreg_save_state(struct kvm_cpu_context *ctxt); -void __sysreg_restore_state(struct kvm_cpu_context *ctxt); +void __sysreg_save_host_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt); +void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt); void __sysreg32_save_state(struct kvm_vcpu *vcpu); void __sysreg32_restore_state(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index f0e7bdfae134..68f3cba25910 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -102,7 +102,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); guest_ctxt = &vcpu->arch.ctxt; - __sysreg_save_state(host_ctxt); + __sysreg_save_host_state(host_ctxt); __debug_cond_save_host_state(vcpu); __activate_traps(vcpu); @@ -116,7 +116,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) * to Cortex-A57 erratum #852523. */ __sysreg32_restore_state(vcpu); - __sysreg_restore_state(guest_ctxt); + __sysreg_restore_guest_state(guest_ctxt); __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); /* Jump in the fire! */ @@ -125,7 +125,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) fp_enabled = __fpsimd_enabled(); - __sysreg_save_state(guest_ctxt); + __sysreg_save_guest_state(guest_ctxt); __sysreg32_save_state(vcpu); __timer_save_state(vcpu); __vgic_save_state(vcpu); @@ -133,7 +133,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) __deactivate_traps(vcpu); __deactivate_vm(vcpu); - __sysreg_restore_state(host_ctxt); + __sysreg_restore_host_state(host_ctxt); if (fp_enabled) { __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs); @@ -165,7 +165,7 @@ void __hyp_text __noreturn __hyp_panic(void) host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); __deactivate_traps(vcpu); __deactivate_vm(vcpu); - __sysreg_restore_state(host_ctxt); + __sysreg_restore_host_state(host_ctxt); } /* Call panic for real */ diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 425630980229..bd5b543f90da 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -24,7 +24,7 @@ #include "hyp.h" /* ctxt is already in the HYP VA space */ -void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) +static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) { ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2); ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); @@ -57,7 +57,17 @@ void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1); } -void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) +void __hyp_text __sysreg_save_host_state(struct kvm_cpu_context *ctxt) +{ + __sysreg_save_state(ctxt); +} + +void __hyp_text __sysreg_save_guest_state(struct kvm_cpu_context *ctxt) +{ + __sysreg_save_state(ctxt); +} + +static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) { write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); @@ -90,6 +100,16 @@ void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1); } +void __hyp_text __sysreg_restore_host_state(struct kvm_cpu_context *ctxt) +{ + __sysreg_restore_state(ctxt); +} + +void __hyp_text __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt) +{ + __sysreg_restore_state(ctxt); +} + void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) { u64 *spsr, *sysreg; -- cgit v1.2.3-58-ga151 From 9c6c35683286ddf47baf8c6d1931f3af63379490 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 12:39:38 +0000 Subject: arm64: KVM: VHE: Split save/restore of registers shared between guest and host A handful of system registers are still shared between host and guest, even while using VHE (tpidr*_el[01] and actlr_el1). Also, some of the vcpu state (sp_el0, PC and PSTATE) must be save/restored on entry/exit, as they are used on the host as well. In order to facilitate the introduction of a VHE-specific sysreg save/restore, make move the access to these registers to their own save/restore functions. No functional change. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/sysreg-sr.c | 48 +++++++++++++++++++++++++++++------------- 1 file changed, 33 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index bd5b543f90da..61bad17a1d11 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -23,13 +23,29 @@ #include "hyp.h" -/* ctxt is already in the HYP VA space */ +/* + * Non-VHE: Both host and guest must save everything. + * + * VHE: Host must save tpidr*_el[01], actlr_el1, sp0, pc, pstate, and + * guest must save everything. + */ + +static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) +{ + ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); + ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); + ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); + ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); + ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); + ctxt->gp_regs.regs.pc = read_sysreg(elr_el2); + ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2); +} + static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) { ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2); ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); ctxt->sys_regs[SCTLR_EL1] = read_sysreg(sctlr_el1); - ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); ctxt->sys_regs[CPACR_EL1] = read_sysreg(cpacr_el1); ctxt->sys_regs[TTBR0_EL1] = read_sysreg(ttbr0_el1); ctxt->sys_regs[TTBR1_EL1] = read_sysreg(ttbr1_el1); @@ -41,17 +57,11 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) ctxt->sys_regs[MAIR_EL1] = read_sysreg(mair_el1); ctxt->sys_regs[VBAR_EL1] = read_sysreg(vbar_el1); ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg(contextidr_el1); - ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); - ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); - ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); ctxt->sys_regs[AMAIR_EL1] = read_sysreg(amair_el1); ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg(cntkctl_el1); ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); - ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); - ctxt->gp_regs.regs.pc = read_sysreg(elr_el2); - ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2); ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); ctxt->gp_regs.elr_el1 = read_sysreg(elr_el1); ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1); @@ -60,11 +70,24 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) void __hyp_text __sysreg_save_host_state(struct kvm_cpu_context *ctxt) { __sysreg_save_state(ctxt); + __sysreg_save_common_state(ctxt); } void __hyp_text __sysreg_save_guest_state(struct kvm_cpu_context *ctxt) { __sysreg_save_state(ctxt); + __sysreg_save_common_state(ctxt); +} + +static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) +{ + write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); + write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); + write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); + write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); + write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); + write_sysreg(ctxt->gp_regs.regs.pc, elr_el2); + write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2); } static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) @@ -72,7 +95,6 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); write_sysreg(ctxt->sys_regs[SCTLR_EL1], sctlr_el1); - write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); write_sysreg(ctxt->sys_regs[CPACR_EL1], cpacr_el1); write_sysreg(ctxt->sys_regs[TTBR0_EL1], ttbr0_el1); write_sysreg(ctxt->sys_regs[TTBR1_EL1], ttbr1_el1); @@ -84,17 +106,11 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->sys_regs[MAIR_EL1], mair_el1); write_sysreg(ctxt->sys_regs[VBAR_EL1], vbar_el1); write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1); - write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); - write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); - write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); write_sysreg(ctxt->sys_regs[AMAIR_EL1], amair_el1); write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1); write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); - write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); - write_sysreg(ctxt->gp_regs.regs.pc, elr_el2); - write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2); write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); write_sysreg(ctxt->gp_regs.elr_el1, elr_el1); write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1); @@ -103,11 +119,13 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) void __hyp_text __sysreg_restore_host_state(struct kvm_cpu_context *ctxt) { __sysreg_restore_state(ctxt); + __sysreg_restore_common_state(ctxt); } void __hyp_text __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt) { __sysreg_restore_state(ctxt); + __sysreg_restore_common_state(ctxt); } void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) -- cgit v1.2.3-58-ga151 From 094f8233c0da602712e8a206984431026a1530aa Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 12:56:25 +0000 Subject: arm64: KVM: VHE: Use unified system register accessors Use the recently introduced unified system register accessors for those sysregs that behave differently depending on VHE being in use or not. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/sysreg-sr.c | 84 +++++++++++++++++++++--------------------- 1 file changed, 42 insertions(+), 42 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 61bad17a1d11..7d7d75732a62 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -37,34 +37,34 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); - ctxt->gp_regs.regs.pc = read_sysreg(elr_el2); - ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2); + ctxt->gp_regs.regs.pc = read_sysreg_el2(elr); + ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr); } static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) { ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2); ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); - ctxt->sys_regs[SCTLR_EL1] = read_sysreg(sctlr_el1); - ctxt->sys_regs[CPACR_EL1] = read_sysreg(cpacr_el1); - ctxt->sys_regs[TTBR0_EL1] = read_sysreg(ttbr0_el1); - ctxt->sys_regs[TTBR1_EL1] = read_sysreg(ttbr1_el1); - ctxt->sys_regs[TCR_EL1] = read_sysreg(tcr_el1); - ctxt->sys_regs[ESR_EL1] = read_sysreg(esr_el1); - ctxt->sys_regs[AFSR0_EL1] = read_sysreg(afsr0_el1); - ctxt->sys_regs[AFSR1_EL1] = read_sysreg(afsr1_el1); - ctxt->sys_regs[FAR_EL1] = read_sysreg(far_el1); - ctxt->sys_regs[MAIR_EL1] = read_sysreg(mair_el1); - ctxt->sys_regs[VBAR_EL1] = read_sysreg(vbar_el1); - ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg(contextidr_el1); - ctxt->sys_regs[AMAIR_EL1] = read_sysreg(amair_el1); - ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg(cntkctl_el1); + ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr); + ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(cpacr); + ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(ttbr0); + ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(ttbr1); + ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(tcr); + ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(esr); + ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(afsr0); + ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(afsr1); + ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(far); + ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(mair); + ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(vbar); + ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(contextidr); + ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair); + ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl); ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); - ctxt->gp_regs.elr_el1 = read_sysreg(elr_el1); - ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1); + ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr); + ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr); } void __hyp_text __sysreg_save_host_state(struct kvm_cpu_context *ctxt) @@ -86,34 +86,34 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); - write_sysreg(ctxt->gp_regs.regs.pc, elr_el2); - write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2); + write_sysreg_el2(ctxt->gp_regs.regs.pc, elr); + write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr); } static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) { - write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); - write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); - write_sysreg(ctxt->sys_regs[SCTLR_EL1], sctlr_el1); - write_sysreg(ctxt->sys_regs[CPACR_EL1], cpacr_el1); - write_sysreg(ctxt->sys_regs[TTBR0_EL1], ttbr0_el1); - write_sysreg(ctxt->sys_regs[TTBR1_EL1], ttbr1_el1); - write_sysreg(ctxt->sys_regs[TCR_EL1], tcr_el1); - write_sysreg(ctxt->sys_regs[ESR_EL1], esr_el1); - write_sysreg(ctxt->sys_regs[AFSR0_EL1], afsr0_el1); - write_sysreg(ctxt->sys_regs[AFSR1_EL1], afsr1_el1); - write_sysreg(ctxt->sys_regs[FAR_EL1], far_el1); - write_sysreg(ctxt->sys_regs[MAIR_EL1], mair_el1); - write_sysreg(ctxt->sys_regs[VBAR_EL1], vbar_el1); - write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1); - write_sysreg(ctxt->sys_regs[AMAIR_EL1], amair_el1); - write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1); - write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); - write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); - - write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); - write_sysreg(ctxt->gp_regs.elr_el1, elr_el1); - write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1); + write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); + write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); + write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], sctlr); + write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], cpacr); + write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], ttbr0); + write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], ttbr1); + write_sysreg_el1(ctxt->sys_regs[TCR_EL1], tcr); + write_sysreg_el1(ctxt->sys_regs[ESR_EL1], esr); + write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], afsr0); + write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], afsr1); + write_sysreg_el1(ctxt->sys_regs[FAR_EL1], far); + write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], mair); + write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], vbar); + write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr); + write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair); + write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl); + write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); + write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); + + write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); + write_sysreg_el1(ctxt->gp_regs.elr_el1, elr); + write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr); } void __hyp_text __sysreg_restore_host_state(struct kvm_cpu_context *ctxt) -- cgit v1.2.3-58-ga151 From d1526e5efc3978efe8c9c37a2396d91e4702251b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 13:59:46 +0000 Subject: arm64: KVM: VHE: Enable minimal sysreg save/restore We're now in a position where we can introduce VHE's minimal save/restore, which is limited to the handful of shared sysregs. Add the required alternative function calls that result in a "do nothing" call on VHE, and the normal save/restore for non-VHE. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/sysreg-sr.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 7d7d75732a62..74b5f81678c2 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -23,6 +23,9 @@ #include "hyp.h" +/* Yes, this does nothing, on purpose */ +static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { } + /* * Non-VHE: Both host and guest must save everything. * @@ -67,9 +70,13 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr); } +static hyp_alternate_select(__sysreg_call_save_host_state, + __sysreg_save_state, __sysreg_do_nothing, + ARM64_HAS_VIRT_HOST_EXTN); + void __hyp_text __sysreg_save_host_state(struct kvm_cpu_context *ctxt) { - __sysreg_save_state(ctxt); + __sysreg_call_save_host_state()(ctxt); __sysreg_save_common_state(ctxt); } @@ -116,9 +123,13 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr); } +static hyp_alternate_select(__sysreg_call_restore_host_state, + __sysreg_restore_state, __sysreg_do_nothing, + ARM64_HAS_VIRT_HOST_EXTN); + void __hyp_text __sysreg_restore_host_state(struct kvm_cpu_context *ctxt) { - __sysreg_restore_state(ctxt); + __sysreg_call_restore_host_state()(ctxt); __sysreg_restore_common_state(ctxt); } -- cgit v1.2.3-58-ga151 From 328762247cd33b4533f9dd89a4faf40288f359b7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 14:15:45 +0000 Subject: arm64: KVM: VHE: Make __fpsimd_enabled VHE aware As non-VHE and VHE have different ways to express the trapping of FPSIMD registers to EL2, make __fpsimd_enabled a patchable predicate and provide a VHE implementation. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_arm.h | 3 +++ arch/arm64/kvm/hyp/hyp.h | 5 +---- arch/arm64/kvm/hyp/switch.c | 19 +++++++++++++++++++ 3 files changed, 23 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index d201d4b396d1..afa2f4a96210 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -216,4 +216,7 @@ ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \ ECN(BKPT32), ECN(VECTOR32), ECN(BRK64) +#define CPACR_EL1_FPEN (3 << 20) +#define CPACR_EL1_TTA (1 << 28) + #endif /* __ARM64_KVM_ARM_H__ */ diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index 5dfa8838a3e1..44eaff70da6a 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -171,10 +171,7 @@ void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); -static inline bool __fpsimd_enabled(void) -{ - return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); -} +bool __fpsimd_enabled(void); u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); void __noreturn __hyp_do_panic(unsigned long, ...); diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 68f3cba25910..0d82ae921b9c 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -17,6 +17,25 @@ #include "hyp.h" +static bool __hyp_text __fpsimd_enabled_nvhe(void) +{ + return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); +} + +static bool __hyp_text __fpsimd_enabled_vhe(void) +{ + return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN); +} + +static hyp_alternate_select(__fpsimd_is_enabled, + __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + +bool __hyp_text __fpsimd_enabled(void) +{ + return __fpsimd_is_enabled()(); +} + static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) { u64 val; -- cgit v1.2.3-58-ga151 From 68908bf789b7fd376538a4bad8367d5dcb9ec983 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 29 Jan 2015 15:47:55 +0000 Subject: arm64: KVM: VHE: Implement VHE activate/deactivate_traps Running the kernel in HYP mode requires the HCR_E2H bit to be set at all times, and the HCR_TGE bit to be set when running as a host (and cleared when running as a guest). At the same time, the vector must be set to the current role of the kernel (either host or hypervisor), and a couple of system registers differ between VHE and non-VHE. We implement these by using another set of alternate functions that get dynamically patched. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_arm.h | 3 +- arch/arm64/include/asm/kvm_emulate.h | 3 ++ arch/arm64/kvm/hyp/switch.c | 53 ++++++++++++++++++++++++++++++++---- 3 files changed, 52 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index afa2f4a96210..b56a0a81e4cb 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -23,6 +23,7 @@ #include /* Hyp Configuration Register (HCR) bits */ +#define HCR_E2H (UL(1) << 34) #define HCR_ID (UL(1) << 33) #define HCR_CD (UL(1) << 32) #define HCR_RW_SHIFT 31 @@ -81,7 +82,7 @@ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW) #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO) - +#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) /* Hyp System Control Register (SCTLR_EL2) bits */ #define SCTLR_EL2_EE (1 << 25) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 4df8e7a58c6b..40bc1681b6d5 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -29,6 +29,7 @@ #include #include #include +#include unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); @@ -43,6 +44,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; + if (is_kernel_in_hyp_mode()) + vcpu->arch.hcr_el2 |= HCR_E2H; if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) vcpu->arch.hcr_el2 &= ~HCR_RW; } diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 0d82ae921b9c..e609942ef79c 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -15,6 +15,8 @@ * along with this program. If not, see . */ +#include + #include "hyp.h" static bool __hyp_text __fpsimd_enabled_nvhe(void) @@ -36,6 +38,31 @@ bool __hyp_text __fpsimd_enabled(void) return __fpsimd_is_enabled()(); } +static void __hyp_text __activate_traps_vhe(void) +{ + u64 val; + + val = read_sysreg(cpacr_el1); + val |= CPACR_EL1_TTA; + val &= ~CPACR_EL1_FPEN; + write_sysreg(val, cpacr_el1); + + write_sysreg(__kvm_hyp_vector, vbar_el1); +} + +static void __hyp_text __activate_traps_nvhe(void) +{ + u64 val; + + val = CPTR_EL2_DEFAULT; + val |= CPTR_EL2_TTA | CPTR_EL2_TFP; + write_sysreg(val, cptr_el2); +} + +static hyp_alternate_select(__activate_traps_arch, + __activate_traps_nvhe, __activate_traps_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) { u64 val; @@ -55,20 +82,34 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) write_sysreg(val, hcr_el2); /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ write_sysreg(1 << 15, hstr_el2); + write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); + __activate_traps_arch()(); +} - val = CPTR_EL2_DEFAULT; - val |= CPTR_EL2_TTA | CPTR_EL2_TFP; - write_sysreg(val, cptr_el2); +static void __hyp_text __deactivate_traps_vhe(void) +{ + extern char vectors[]; /* kernel exception vectors */ - write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); + write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); + write_sysreg(CPACR_EL1_FPEN, cpacr_el1); + write_sysreg(vectors, vbar_el1); } -static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) +static void __hyp_text __deactivate_traps_nvhe(void) { write_sysreg(HCR_RW, hcr_el2); + write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); +} + +static hyp_alternate_select(__deactivate_traps_arch, + __deactivate_traps_nvhe, __deactivate_traps_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + +static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) +{ + __deactivate_traps_arch()(); write_sysreg(0, hstr_el2); write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); - write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); } static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) -- cgit v1.2.3-58-ga151 From 5efe6de13807fe927f0ecc63d83197b5cd3c7782 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 14:36:20 +0000 Subject: arm64: KVM: VHE: Use unified sysreg accessors for timer Switch the timer code to the unified sysreg accessors. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/timer-sr.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/arch/arm64/kvm/hyp/timer-sr.c index 1051e5d7320f..f276d9e74411 100644 --- a/arch/arm64/kvm/hyp/timer-sr.c +++ b/arch/arm64/kvm/hyp/timer-sr.c @@ -31,12 +31,12 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) u64 val; if (kvm->arch.timer.enabled) { - timer->cntv_ctl = read_sysreg(cntv_ctl_el0); - timer->cntv_cval = read_sysreg(cntv_cval_el0); + timer->cntv_ctl = read_sysreg_el0(cntv_ctl); + timer->cntv_cval = read_sysreg_el0(cntv_cval); } /* Disable the virtual timer */ - write_sysreg(0, cntv_ctl_el0); + write_sysreg_el0(0, cntv_ctl); /* Allow physical timer/counter access for the host */ val = read_sysreg(cnthctl_el2); @@ -64,8 +64,8 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) if (kvm->arch.timer.enabled) { write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); - write_sysreg(timer->cntv_cval, cntv_cval_el0); + write_sysreg_el0(timer->cntv_cval, cntv_cval); isb(); - write_sysreg(timer->cntv_ctl, cntv_ctl_el0); + write_sysreg_el0(timer->cntv_ctl, cntv_ctl); } } -- cgit v1.2.3-58-ga151 From 77cb2d91333312d7426055d4369f3821e5e8bda0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 14:42:09 +0000 Subject: arm64: KVM: VHE: Add fpsimd enabling on guest access Despite the fact that a VHE enabled kernel runs at EL2, it uses CPACR_EL1 to trap FPSIMD access. Add the required alternative code to re-enable guest FPSIMD access when it has trapped to EL2. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/entry.S | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index fd0fbe9b7e6a..ce9e5e5f28cf 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -130,9 +130,15 @@ ENDPROC(__guest_exit) ENTRY(__fpsimd_guest_restore) stp x4, lr, [sp, #-16]! +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs x2, cptr_el2 bic x2, x2, #CPTR_EL2_TFP msr cptr_el2, x2 +alternative_else + mrs x2, cpacr_el1 + orr x2, x2, #CPACR_EL1_FPEN + msr cpacr_el1, x2 +alternative_endif isb mrs x3, tpidr_el2 -- cgit v1.2.3-58-ga151 From 253dcbd39adb00890f3c350230ae310fcfeeb760 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 17 Nov 2015 14:07:45 +0000 Subject: arm64: KVM: VHE: Add alternative panic handling As the kernel fully runs in HYP when VHE is enabled, we can directly branch to the kernel's panic() implementation, and not perform an exception return. Add the alternative code to deal with this. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/switch.c | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index e609942ef79c..731f0a2ffee0 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -210,11 +210,34 @@ __alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu); static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; -void __hyp_text __noreturn __hyp_panic(void) +static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par) { unsigned long str_va = (unsigned long)__hyp_panic_string; - u64 spsr = read_sysreg(spsr_el2); - u64 elr = read_sysreg(elr_el2); + + __hyp_do_panic(hyp_kern_va(str_va), + spsr, elr, + read_sysreg(esr_el2), read_sysreg_el2(far), + read_sysreg(hpfar_el2), par, + (void *)read_sysreg(tpidr_el2)); +} + +static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par) +{ + panic(__hyp_panic_string, + spsr, elr, + read_sysreg_el2(esr), read_sysreg_el2(far), + read_sysreg(hpfar_el2), par, + (void *)read_sysreg(tpidr_el2)); +} + +static hyp_alternate_select(__hyp_call_panic, + __hyp_call_panic_nvhe, __hyp_call_panic_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + +void __hyp_text __noreturn __hyp_panic(void) +{ + u64 spsr = read_sysreg_el2(spsr); + u64 elr = read_sysreg_el2(elr); u64 par = read_sysreg(par_el1); if (read_sysreg(vttbr_el2)) { @@ -229,11 +252,7 @@ void __hyp_text __noreturn __hyp_panic(void) } /* Call panic for real */ - __hyp_do_panic(hyp_kern_va(str_va), - spsr, elr, - read_sysreg(esr_el2), read_sysreg(far_el2), - read_sysreg(hpfar_el2), par, - (void *)read_sysreg(tpidr_el2)); + __hyp_call_panic()(spsr, elr, par); unreachable(); } -- cgit v1.2.3-58-ga151 From 5f05a72aed023e5824eebb2542b5397cb89188f4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 15:06:47 +0000 Subject: arm64: KVM: Move most of the fault decoding to C The fault decoding process (including computing the IPA in the case of a permission fault) would be much better done in C code, as we have a reasonable infrastructure to deal with the VHE/non-VHE differences. Let's move the whole thing to C, including the workaround for erratum 834220, and just patch the odd ESR_EL2 access remaining in hyp-entry.S. Reviewed-by: Christoffer Dall Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier --- arch/arm64/kernel/asm-offsets.c | 3 -- arch/arm64/kvm/hyp/hyp-entry.S | 69 +++------------------------------ arch/arm64/kvm/hyp/switch.c | 85 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+), 67 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index fffa4ac6c25a..b0ab4e93db0d 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -110,9 +110,6 @@ int main(void) DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2])); - DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2)); - DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2)); - DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); #endif #ifdef CONFIG_CPU_PM diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 1bdeee70833e..3488894397ff 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -69,7 +68,11 @@ ENDPROC(__vhe_hyp_call) el1_sync: // Guest trapped into EL2 save_x0_to_x3 +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs x1, esr_el2 +alternative_else + mrs x1, esr_el1 +alternative_endif lsr x2, x1, #ESR_ELx_EC_SHIFT cmp x2, #ESR_ELx_EC_HVC64 @@ -105,72 +108,10 @@ el1_trap: cmp x2, #ESR_ELx_EC_FP_ASIMD b.eq __fpsimd_guest_restore - cmp x2, #ESR_ELx_EC_DABT_LOW - mov x0, #ESR_ELx_EC_IABT_LOW - ccmp x2, x0, #4, ne - b.ne 1f // Not an abort we care about - - /* This is an abort. Check for permission fault */ -alternative_if_not ARM64_WORKAROUND_834220 - and x2, x1, #ESR_ELx_FSC_TYPE - cmp x2, #FSC_PERM - b.ne 1f // Not a permission fault -alternative_else - nop // Use the permission fault path to - nop // check for a valid S1 translation, - nop // regardless of the ESR value. -alternative_endif - - /* - * Check for Stage-1 page table walk, which is guaranteed - * to give a valid HPFAR_EL2. - */ - tbnz x1, #7, 1f // S1PTW is set - - /* Preserve PAR_EL1 */ - mrs x3, par_el1 - stp x3, xzr, [sp, #-16]! - - /* - * Permission fault, HPFAR_EL2 is invalid. - * Resolve the IPA the hard way using the guest VA. - * Stage-1 translation already validated the memory access rights. - * As such, we can use the EL1 translation regime, and don't have - * to distinguish between EL0 and EL1 access. - */ - mrs x2, far_el2 - at s1e1r, x2 - isb - - /* Read result */ - mrs x3, par_el1 - ldp x0, xzr, [sp], #16 // Restore PAR_EL1 from the stack - msr par_el1, x0 - tbnz x3, #0, 3f // Bail out if we failed the translation - ubfx x3, x3, #12, #36 // Extract IPA - lsl x3, x3, #4 // and present it like HPFAR - b 2f - -1: mrs x3, hpfar_el2 - mrs x2, far_el2 - -2: mrs x0, tpidr_el2 - str w1, [x0, #VCPU_ESR_EL2] - str x2, [x0, #VCPU_FAR_EL2] - str x3, [x0, #VCPU_HPFAR_EL2] - + mrs x0, tpidr_el2 mov x1, #ARM_EXCEPTION_TRAP b __guest_exit - /* - * Translation failed. Just return to the guest and - * let it fault again. Another CPU is probably playing - * behind our back. - */ -3: restore_x0_to_x3 - - eret - el1_irq: save_x0_to_x3 mrs x0, tpidr_el2 diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 731f0a2ffee0..ecf5b05d1e16 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -15,6 +15,7 @@ * along with this program. If not, see . */ +#include #include #include "hyp.h" @@ -149,6 +150,86 @@ static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) __vgic_call_restore_state()(vcpu); } +static bool __hyp_text __true_value(void) +{ + return true; +} + +static bool __hyp_text __false_value(void) +{ + return false; +} + +static hyp_alternate_select(__check_arm_834220, + __false_value, __true_value, + ARM64_WORKAROUND_834220); + +static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) +{ + u64 par, tmp; + + /* + * Resolve the IPA the hard way using the guest VA. + * + * Stage-1 translation already validated the memory access + * rights. As such, we can use the EL1 translation regime, and + * don't have to distinguish between EL0 and EL1 access. + * + * We do need to save/restore PAR_EL1 though, as we haven't + * saved the guest context yet, and we may return early... + */ + par = read_sysreg(par_el1); + asm volatile("at s1e1r, %0" : : "r" (far)); + isb(); + + tmp = read_sysreg(par_el1); + write_sysreg(par, par_el1); + + if (unlikely(tmp & 1)) + return false; /* Translation failed, back to guest */ + + /* Convert PAR to HPFAR format */ + *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4; + return true; +} + +static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) +{ + u64 esr = read_sysreg_el2(esr); + u8 ec = esr >> ESR_ELx_EC_SHIFT; + u64 hpfar, far; + + vcpu->arch.fault.esr_el2 = esr; + + if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) + return true; + + far = read_sysreg_el2(far); + + /* + * The HPFAR can be invalid if the stage 2 fault did not + * happen during a stage 1 page table walk (the ESR_EL2.S1PTW + * bit is clear) and one of the two following cases are true: + * 1. The fault was due to a permission fault + * 2. The processor carries errata 834220 + * + * Therefore, for all non S1PTW faults where we either have a + * permission fault or the errata workaround is enabled, we + * resolve the IPA using the AT instruction. + */ + if (!(esr & ESR_ELx_S1PTW) && + (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { + if (!__translate_far_to_hpfar(far, &hpfar)) + return false; + } else { + hpfar = read_sysreg(hpfar_el2); + } + + vcpu->arch.fault.far_el2 = far; + vcpu->arch.fault.hpfar_el2 = hpfar; + return true; +} + static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; @@ -180,9 +261,13 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); /* Jump in the fire! */ +again: exit_code = __guest_enter(vcpu, host_ctxt); /* And we're baaack! */ + if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) + goto again; + fp_enabled = __fpsimd_enabled(); __sysreg_save_guest_state(guest_ctxt); -- cgit v1.2.3-58-ga151 From d98ecdaca296815b3c4d11032b3699af6b49b70b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 25 Jan 2016 17:31:13 +0000 Subject: arm64: perf: Count EL2 events if the kernel is running in HYP When the kernel is running in HYP (with VHE), it is necessary to include EL2 events if the user requests counting kernel or hypervisor events. Reviewed-by: Christoffer Dall Acked-by: Catalin Marinas Acked-by: Will Deacon Signed-off-by: Marc Zyngier --- arch/arm64/kernel/perf_event.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index f7ab14c4d5df..1b52269ffa87 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -20,6 +20,7 @@ */ #include +#include #include #include @@ -691,9 +692,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, if (attr->exclude_idle) return -EPERM; + if (is_kernel_in_hyp_mode() && + attr->exclude_kernel != attr->exclude_hv) + return -EINVAL; if (attr->exclude_user) config_base |= ARMV8_EXCLUDE_EL0; - if (attr->exclude_kernel) + if (!is_kernel_in_hyp_mode() && attr->exclude_kernel) config_base |= ARMV8_EXCLUDE_EL1; if (!attr->exclude_hv) config_base |= ARMV8_INCLUDE_EL2; -- cgit v1.2.3-58-ga151 From ae7e27fe6834d4a78fd8e4576a8e1dd15ae0c008 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 25 Jan 2016 18:50:12 +0000 Subject: arm64: hw_breakpoint: Allow EL2 breakpoints if running in HYP With VHE, we place kernel {watch,break}-points at EL2 to get things like kgdb and "perf -e mem:..." working. This requires a bit of repainting in the low-level encore/decode, but is otherwise pretty simple. Acked-by: Will Deacon Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/hw_breakpoint.h | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index 9732908bfc8a..115ea2a64520 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -18,6 +18,7 @@ #include #include +#include #ifdef __KERNEL__ @@ -35,10 +36,21 @@ struct arch_hw_breakpoint { struct arch_hw_breakpoint_ctrl ctrl; }; +/* Privilege Levels */ +#define AARCH64_BREAKPOINT_EL1 1 +#define AARCH64_BREAKPOINT_EL0 2 + +#define DBG_HMC_HYP (1 << 13) + static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl) { - return (ctrl.len << 5) | (ctrl.type << 3) | (ctrl.privilege << 1) | + u32 val = (ctrl.len << 5) | (ctrl.type << 3) | (ctrl.privilege << 1) | ctrl.enabled; + + if (is_kernel_in_hyp_mode() && ctrl.privilege == AARCH64_BREAKPOINT_EL1) + val |= DBG_HMC_HYP; + + return val; } static inline void decode_ctrl_reg(u32 reg, @@ -61,10 +73,6 @@ static inline void decode_ctrl_reg(u32 reg, #define ARM_BREAKPOINT_STORE 2 #define AARCH64_ESR_ACCESS_MASK (1 << 6) -/* Privilege Levels */ -#define AARCH64_BREAKPOINT_EL1 1 -#define AARCH64_BREAKPOINT_EL0 2 - /* Lengths */ #define ARM_BREAKPOINT_LEN_1 0x1 #define ARM_BREAKPOINT_LEN_2 0x3 -- cgit v1.2.3-58-ga151 From 1f364c8c48a0767885a18451ee074c64b454157a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 19 Feb 2014 09:33:14 +0000 Subject: arm64: VHE: Add support for running Linux in EL2 mode With ARMv8.1 VHE, the architecture is able to (almost) transparently run the kernel at EL2, despite being written for EL1. This patch takes care of the "almost" part, mostly preventing the kernel from dropping from EL2 to EL1, and setting up the HYP configuration. Reviewed-by: Christoffer Dall Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier --- arch/arm64/Kconfig | 13 +++++++++++++ arch/arm64/kernel/head.S | 28 +++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 8cc62289a63e..cf118d93290d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -750,6 +750,19 @@ config ARM64_LSE_ATOMICS not support these instructions and requires the kernel to be built with binutils >= 2.25. +config ARM64_VHE + bool "Enable support for Virtualization Host Extensions (VHE)" + default y + help + Virtualization Host Extensions (VHE) allow the kernel to run + directly at EL2 (instead of EL1) on processors that support + it. This leads to better performance for KVM, as they reduce + the cost of the world switch. + + Selecting this option allows the VHE feature to be detected + at runtime, and does not affect processors that do not + implement this feature. + endmenu endmenu diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 917d98108b3f..6f2f37743d3b 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -464,9 +465,27 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 isb ret +2: +#ifdef CONFIG_ARM64_VHE + /* + * Check for VHE being present. For the rest of the EL2 setup, + * x2 being non-zero indicates that we do have VHE, and that the + * kernel is intended to run at EL2. + */ + mrs x2, id_aa64mmfr1_el1 + ubfx x2, x2, #8, #4 +#else + mov x2, xzr +#endif + /* Hyp configuration. */ -2: mov x0, #(1 << 31) // 64-bit EL1 + mov x0, #HCR_RW // 64-bit EL1 + cbz x2, set_hcr + orr x0, x0, #HCR_TGE // Enable Host Extensions + orr x0, x0, #HCR_E2H +set_hcr: msr hcr_el2, x0 + isb /* Generic timers. */ mrs x0, cnthctl_el2 @@ -526,6 +545,13 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems /* Stage-2 translation */ msr vttbr_el2, xzr + cbz x2, install_el2_stub + + mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 + isb + ret + +install_el2_stub: /* Hypervisor stub */ adrp x0, __hyp_stub_vectors add x0, x0, #:lo12:__hyp_stub_vectors -- cgit v1.2.3-58-ga151 From 13720a56edbd8164fbfa251067dea9776e09f54b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 28 Jan 2016 13:44:07 +0000 Subject: arm64: KVM: Move kvm/hyp/hyp.h to include/asm/kvm_hyp.h In order to be able to move code outside of kvm/hyp, we need to make the global hyp.h file accessible from a standard location. include/asm/kvm_hyp.h seems good enough. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_hyp.h | 180 +++++++++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/debug-sr.c | 4 +- arch/arm64/kvm/hyp/hyp.h | 180 --------------------------------------- arch/arm64/kvm/hyp/s2-setup.c | 3 +- arch/arm64/kvm/hyp/switch.c | 3 +- arch/arm64/kvm/hyp/sysreg-sr.c | 4 +- arch/arm64/kvm/hyp/timer-sr.c | 4 +- arch/arm64/kvm/hyp/tlb.c | 2 +- arch/arm64/kvm/hyp/vgic-v2-sr.c | 4 +- arch/arm64/kvm/hyp/vgic-v3-sr.c | 4 +- 10 files changed, 188 insertions(+), 200 deletions(-) create mode 100644 arch/arm64/include/asm/kvm_hyp.h delete mode 100644 arch/arm64/kvm/hyp/hyp.h (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h new file mode 100644 index 000000000000..44eaff70da6a --- /dev/null +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -0,0 +1,180 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ARM64_KVM_HYP_H__ +#define __ARM64_KVM_HYP_H__ + +#include +#include +#include +#include + +#define __hyp_text __section(.hyp.text) notrace + +static inline unsigned long __kern_hyp_va(unsigned long v) +{ + asm volatile(ALTERNATIVE("and %0, %0, %1", + "nop", + ARM64_HAS_VIRT_HOST_EXTN) + : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK)); + return v; +} + +#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) + +static inline unsigned long __hyp_kern_va(unsigned long v) +{ + u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET; + asm volatile(ALTERNATIVE("add %0, %0, %1", + "nop", + ARM64_HAS_VIRT_HOST_EXTN) + : "+r" (v) : "r" (offset)); + return v; +} + +#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v))) + +#define read_sysreg_elx(r,nvh,vh) \ + ({ \ + u64 reg; \ + asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ + "mrs_s %0, " __stringify(r##vh),\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : "=r" (reg)); \ + reg; \ + }) + +#define write_sysreg_elx(v,r,nvh,vh) \ + do { \ + u64 __val = (u64)(v); \ + asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ + "msr_s " __stringify(r##vh) ", %x0",\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : : "rZ" (__val)); \ + } while (0) + +/* + * Unified accessors for registers that have a different encoding + * between VHE and non-VHE. They must be specified without their "ELx" + * encoding. + */ +#define read_sysreg_el2(r) \ + ({ \ + u64 reg; \ + asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\ + "mrs %0, " __stringify(r##_EL1),\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : "=r" (reg)); \ + reg; \ + }) + +#define write_sysreg_el2(v,r) \ + do { \ + u64 __val = (u64)(v); \ + asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\ + "msr " __stringify(r##_EL1) ", %x0",\ + ARM64_HAS_VIRT_HOST_EXTN) \ + : : "rZ" (__val)); \ + } while (0) + +#define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) +#define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) +#define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) +#define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12) + +/* The VHE specific system registers and their encoding */ +#define sctlr_EL12 sys_reg(3, 5, 1, 0, 0) +#define cpacr_EL12 sys_reg(3, 5, 1, 0, 2) +#define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0) +#define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1) +#define tcr_EL12 sys_reg(3, 5, 2, 0, 2) +#define afsr0_EL12 sys_reg(3, 5, 5, 1, 0) +#define afsr1_EL12 sys_reg(3, 5, 5, 1, 1) +#define esr_EL12 sys_reg(3, 5, 5, 2, 0) +#define far_EL12 sys_reg(3, 5, 6, 0, 0) +#define mair_EL12 sys_reg(3, 5, 10, 2, 0) +#define amair_EL12 sys_reg(3, 5, 10, 3, 0) +#define vbar_EL12 sys_reg(3, 5, 12, 0, 0) +#define contextidr_EL12 sys_reg(3, 5, 13, 0, 1) +#define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0) +#define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0) +#define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1) +#define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2) +#define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0) +#define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1) +#define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2) +#define spsr_EL12 sys_reg(3, 5, 4, 0, 0) +#define elr_EL12 sys_reg(3, 5, 4, 0, 1) + +/** + * hyp_alternate_select - Generates patchable code sequences that are + * used to switch between two implementations of a function, depending + * on the availability of a feature. + * + * @fname: a symbol name that will be defined as a function returning a + * function pointer whose type will match @orig and @alt + * @orig: A pointer to the default function, as returned by @fname when + * @cond doesn't hold + * @alt: A pointer to the alternate function, as returned by @fname + * when @cond holds + * @cond: a CPU feature (as described in asm/cpufeature.h) + */ +#define hyp_alternate_select(fname, orig, alt, cond) \ +typeof(orig) * __hyp_text fname(void) \ +{ \ + typeof(alt) *val = orig; \ + asm volatile(ALTERNATIVE("nop \n", \ + "mov %0, %1 \n", \ + cond) \ + : "+r" (val) : "r" (alt)); \ + return val; \ +} + +void __vgic_v2_save_state(struct kvm_vcpu *vcpu); +void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); + +void __vgic_v3_save_state(struct kvm_vcpu *vcpu); +void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); + +void __timer_save_state(struct kvm_vcpu *vcpu); +void __timer_restore_state(struct kvm_vcpu *vcpu); + +void __sysreg_save_host_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt); +void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt); +void __sysreg32_save_state(struct kvm_vcpu *vcpu); +void __sysreg32_restore_state(struct kvm_vcpu *vcpu); + +void __debug_save_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt); +void __debug_restore_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt); +void __debug_cond_save_host_state(struct kvm_vcpu *vcpu); +void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); + +void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); +void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); +bool __fpsimd_enabled(void); + +u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); +void __noreturn __hyp_do_panic(unsigned long, ...); + +#endif /* __ARM64_KVM_HYP_H__ */ + diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index c9c1e97501a9..053cf8b057c1 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include #define read_debug(r,n) read_sysreg(r##n##_el1) #define write_debug(v,r,n) write_sysreg(v, r##n##_el1) diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h deleted file mode 100644 index 44eaff70da6a..000000000000 --- a/arch/arm64/kvm/hyp/hyp.h +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Copyright (C) 2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM64_KVM_HYP_H__ -#define __ARM64_KVM_HYP_H__ - -#include -#include -#include -#include - -#define __hyp_text __section(.hyp.text) notrace - -static inline unsigned long __kern_hyp_va(unsigned long v) -{ - asm volatile(ALTERNATIVE("and %0, %0, %1", - "nop", - ARM64_HAS_VIRT_HOST_EXTN) - : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK)); - return v; -} - -#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) - -static inline unsigned long __hyp_kern_va(unsigned long v) -{ - u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET; - asm volatile(ALTERNATIVE("add %0, %0, %1", - "nop", - ARM64_HAS_VIRT_HOST_EXTN) - : "+r" (v) : "r" (offset)); - return v; -} - -#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v))) - -#define read_sysreg_elx(r,nvh,vh) \ - ({ \ - u64 reg; \ - asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ - "mrs_s %0, " __stringify(r##vh),\ - ARM64_HAS_VIRT_HOST_EXTN) \ - : "=r" (reg)); \ - reg; \ - }) - -#define write_sysreg_elx(v,r,nvh,vh) \ - do { \ - u64 __val = (u64)(v); \ - asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ - "msr_s " __stringify(r##vh) ", %x0",\ - ARM64_HAS_VIRT_HOST_EXTN) \ - : : "rZ" (__val)); \ - } while (0) - -/* - * Unified accessors for registers that have a different encoding - * between VHE and non-VHE. They must be specified without their "ELx" - * encoding. - */ -#define read_sysreg_el2(r) \ - ({ \ - u64 reg; \ - asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\ - "mrs %0, " __stringify(r##_EL1),\ - ARM64_HAS_VIRT_HOST_EXTN) \ - : "=r" (reg)); \ - reg; \ - }) - -#define write_sysreg_el2(v,r) \ - do { \ - u64 __val = (u64)(v); \ - asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\ - "msr " __stringify(r##_EL1) ", %x0",\ - ARM64_HAS_VIRT_HOST_EXTN) \ - : : "rZ" (__val)); \ - } while (0) - -#define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) -#define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) -#define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) -#define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12) - -/* The VHE specific system registers and their encoding */ -#define sctlr_EL12 sys_reg(3, 5, 1, 0, 0) -#define cpacr_EL12 sys_reg(3, 5, 1, 0, 2) -#define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0) -#define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1) -#define tcr_EL12 sys_reg(3, 5, 2, 0, 2) -#define afsr0_EL12 sys_reg(3, 5, 5, 1, 0) -#define afsr1_EL12 sys_reg(3, 5, 5, 1, 1) -#define esr_EL12 sys_reg(3, 5, 5, 2, 0) -#define far_EL12 sys_reg(3, 5, 6, 0, 0) -#define mair_EL12 sys_reg(3, 5, 10, 2, 0) -#define amair_EL12 sys_reg(3, 5, 10, 3, 0) -#define vbar_EL12 sys_reg(3, 5, 12, 0, 0) -#define contextidr_EL12 sys_reg(3, 5, 13, 0, 1) -#define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0) -#define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0) -#define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1) -#define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2) -#define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0) -#define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1) -#define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2) -#define spsr_EL12 sys_reg(3, 5, 4, 0, 0) -#define elr_EL12 sys_reg(3, 5, 4, 0, 1) - -/** - * hyp_alternate_select - Generates patchable code sequences that are - * used to switch between two implementations of a function, depending - * on the availability of a feature. - * - * @fname: a symbol name that will be defined as a function returning a - * function pointer whose type will match @orig and @alt - * @orig: A pointer to the default function, as returned by @fname when - * @cond doesn't hold - * @alt: A pointer to the alternate function, as returned by @fname - * when @cond holds - * @cond: a CPU feature (as described in asm/cpufeature.h) - */ -#define hyp_alternate_select(fname, orig, alt, cond) \ -typeof(orig) * __hyp_text fname(void) \ -{ \ - typeof(alt) *val = orig; \ - asm volatile(ALTERNATIVE("nop \n", \ - "mov %0, %1 \n", \ - cond) \ - : "+r" (val) : "r" (alt)); \ - return val; \ -} - -void __vgic_v2_save_state(struct kvm_vcpu *vcpu); -void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); - -void __vgic_v3_save_state(struct kvm_vcpu *vcpu); -void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); - -void __timer_save_state(struct kvm_vcpu *vcpu); -void __timer_restore_state(struct kvm_vcpu *vcpu); - -void __sysreg_save_host_state(struct kvm_cpu_context *ctxt); -void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt); -void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt); -void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt); -void __sysreg32_save_state(struct kvm_vcpu *vcpu); -void __sysreg32_restore_state(struct kvm_vcpu *vcpu); - -void __debug_save_state(struct kvm_vcpu *vcpu, - struct kvm_guest_debug_arch *dbg, - struct kvm_cpu_context *ctxt); -void __debug_restore_state(struct kvm_vcpu *vcpu, - struct kvm_guest_debug_arch *dbg, - struct kvm_cpu_context *ctxt); -void __debug_cond_save_host_state(struct kvm_vcpu *vcpu); -void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); - -void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); -void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); -bool __fpsimd_enabled(void); - -u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); -void __noreturn __hyp_do_panic(unsigned long, ...); - -#endif /* __ARM64_KVM_HYP_H__ */ - diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c index 17e8cc09a1d8..bfc54fd82797 100644 --- a/arch/arm64/kvm/hyp/s2-setup.c +++ b/arch/arm64/kvm/hyp/s2-setup.c @@ -18,8 +18,7 @@ #include #include #include - -#include "hyp.h" +#include void __hyp_text __init_stage2_translation(void) { diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index ecf5b05d1e16..7b81e56111ab 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -17,8 +17,7 @@ #include #include - -#include "hyp.h" +#include static bool __hyp_text __fpsimd_enabled_nvhe(void) { diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 74b5f81678c2..0f7c40eb3f53 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include /* Yes, this does nothing, on purpose */ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { } diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/arch/arm64/kvm/hyp/timer-sr.c index f276d9e74411..ea00d69e7078 100644 --- a/arch/arm64/kvm/hyp/timer-sr.c +++ b/arch/arm64/kvm/hyp/timer-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include /* vcpu is already in the HYP VA space */ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 2a7e0d838698..be8177cdd3bf 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -15,7 +15,7 @@ * along with this program. If not, see . */ -#include "hyp.h" +#include static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c index e71761238cfc..9514a7d90d71 100644 --- a/arch/arm64/kvm/hyp/vgic-v2-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v2-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include /* vcpu is already in the HYP VA space */ void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 5dd2a26444ec..0035b2d3fb6d 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include #define vtr_to_max_lr_idx(v) ((v) & 0xf) #define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1) -- cgit v1.2.3-58-ga151 From 6d50d54cd80cb60d701e70fb0633250115606459 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 28 Jan 2016 14:24:39 +0000 Subject: arm64: KVM: Move vgic-v2 and timer save/restore to virt/kvm/arm/hyp We already have virt/kvm/arm/ containing timer and vgic stuff. Add yet another subdirectory to contain the hyp-specific files (timer and vgic again). Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/Makefile | 7 +++- arch/arm64/kvm/hyp/timer-sr.c | 69 ---------------------------------- arch/arm64/kvm/hyp/vgic-v2-sr.c | 82 ----------------------------------------- virt/kvm/arm/hyp/timer-sr.c | 69 ++++++++++++++++++++++++++++++++++ virt/kvm/arm/hyp/vgic-v2-sr.c | 82 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 156 insertions(+), 153 deletions(-) delete mode 100644 arch/arm64/kvm/hyp/timer-sr.c delete mode 100644 arch/arm64/kvm/hyp/vgic-v2-sr.c create mode 100644 virt/kvm/arm/hyp/timer-sr.c create mode 100644 virt/kvm/arm/hyp/vgic-v2-sr.c (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 5326e664fd41..b6a8fc5ad1af 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -2,9 +2,12 @@ # Makefile for Kernel-based Virtual Machine module, HYP part # -obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o +KVM=../../../../virt/kvm + +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o + obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o -obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/arch/arm64/kvm/hyp/timer-sr.c deleted file mode 100644 index ea00d69e7078..000000000000 --- a/arch/arm64/kvm/hyp/timer-sr.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2012-2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - -#include - -/* vcpu is already in the HYP VA space */ -void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - u64 val; - - if (kvm->arch.timer.enabled) { - timer->cntv_ctl = read_sysreg_el0(cntv_ctl); - timer->cntv_cval = read_sysreg_el0(cntv_cval); - } - - /* Disable the virtual timer */ - write_sysreg_el0(0, cntv_ctl); - - /* Allow physical timer/counter access for the host */ - val = read_sysreg(cnthctl_el2); - val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; - write_sysreg(val, cnthctl_el2); - - /* Clear cntvoff for the host */ - write_sysreg(0, cntvoff_el2); -} - -void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - u64 val; - - /* - * Disallow physical timer access for the guest - * Physical counter access is allowed - */ - val = read_sysreg(cnthctl_el2); - val &= ~CNTHCTL_EL1PCEN; - val |= CNTHCTL_EL1PCTEN; - write_sysreg(val, cnthctl_el2); - - if (kvm->arch.timer.enabled) { - write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); - write_sysreg_el0(timer->cntv_cval, cntv_cval); - isb(); - write_sysreg_el0(timer->cntv_ctl, cntv_ctl); - } -} diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c deleted file mode 100644 index 9514a7d90d71..000000000000 --- a/arch/arm64/kvm/hyp/vgic-v2-sr.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2012-2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - -#include - -/* vcpu is already in the HYP VA space */ -void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; - struct vgic_dist *vgic = &kvm->arch.vgic; - void __iomem *base = kern_hyp_va(vgic->vctrl_base); - u32 eisr0, eisr1, elrsr0, elrsr1; - int i, nr_lr; - - if (!base) - return; - - nr_lr = vcpu->arch.vgic_cpu.nr_lr; - cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR); - cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR); - eisr0 = readl_relaxed(base + GICH_EISR0); - elrsr0 = readl_relaxed(base + GICH_ELRSR0); - if (unlikely(nr_lr > 32)) { - eisr1 = readl_relaxed(base + GICH_EISR1); - elrsr1 = readl_relaxed(base + GICH_ELRSR1); - } else { - eisr1 = elrsr1 = 0; - } -#ifdef CONFIG_CPU_BIG_ENDIAN - cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1; - cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; -#else - cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0; - cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; -#endif - cpu_if->vgic_apr = readl_relaxed(base + GICH_APR); - - writel_relaxed(0, base + GICH_HCR); - - for (i = 0; i < nr_lr; i++) - cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); -} - -/* vcpu is already in the HYP VA space */ -void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; - struct vgic_dist *vgic = &kvm->arch.vgic; - void __iomem *base = kern_hyp_va(vgic->vctrl_base); - int i, nr_lr; - - if (!base) - return; - - writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); - writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR); - writel_relaxed(cpu_if->vgic_apr, base + GICH_APR); - - nr_lr = vcpu->arch.vgic_cpu.nr_lr; - for (i = 0; i < nr_lr; i++) - writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4)); -} diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c new file mode 100644 index 000000000000..ea00d69e7078 --- /dev/null +++ b/virt/kvm/arm/hyp/timer-sr.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +/* vcpu is already in the HYP VA space */ +void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + u64 val; + + if (kvm->arch.timer.enabled) { + timer->cntv_ctl = read_sysreg_el0(cntv_ctl); + timer->cntv_cval = read_sysreg_el0(cntv_cval); + } + + /* Disable the virtual timer */ + write_sysreg_el0(0, cntv_ctl); + + /* Allow physical timer/counter access for the host */ + val = read_sysreg(cnthctl_el2); + val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; + write_sysreg(val, cnthctl_el2); + + /* Clear cntvoff for the host */ + write_sysreg(0, cntvoff_el2); +} + +void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + u64 val; + + /* + * Disallow physical timer access for the guest + * Physical counter access is allowed + */ + val = read_sysreg(cnthctl_el2); + val &= ~CNTHCTL_EL1PCEN; + val |= CNTHCTL_EL1PCTEN; + write_sysreg(val, cnthctl_el2); + + if (kvm->arch.timer.enabled) { + write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); + write_sysreg_el0(timer->cntv_cval, cntv_cval); + isb(); + write_sysreg_el0(timer->cntv_ctl, cntv_ctl); + } +} diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c new file mode 100644 index 000000000000..9514a7d90d71 --- /dev/null +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; + struct vgic_dist *vgic = &kvm->arch.vgic; + void __iomem *base = kern_hyp_va(vgic->vctrl_base); + u32 eisr0, eisr1, elrsr0, elrsr1; + int i, nr_lr; + + if (!base) + return; + + nr_lr = vcpu->arch.vgic_cpu.nr_lr; + cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR); + cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR); + eisr0 = readl_relaxed(base + GICH_EISR0); + elrsr0 = readl_relaxed(base + GICH_ELRSR0); + if (unlikely(nr_lr > 32)) { + eisr1 = readl_relaxed(base + GICH_EISR1); + elrsr1 = readl_relaxed(base + GICH_ELRSR1); + } else { + eisr1 = elrsr1 = 0; + } +#ifdef CONFIG_CPU_BIG_ENDIAN + cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1; + cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; +#else + cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0; + cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; +#endif + cpu_if->vgic_apr = readl_relaxed(base + GICH_APR); + + writel_relaxed(0, base + GICH_HCR); + + for (i = 0; i < nr_lr; i++) + cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); +} + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; + struct vgic_dist *vgic = &kvm->arch.vgic; + void __iomem *base = kern_hyp_va(vgic->vctrl_base); + int i, nr_lr; + + if (!base) + return; + + writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); + writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR); + writel_relaxed(cpu_if->vgic_apr, base + GICH_APR); + + nr_lr = vcpu->arch.vgic_cpu.nr_lr; + for (i = 0; i < nr_lr; i++) + writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4)); +} -- cgit v1.2.3-58-ga151 From f1c9cad7c508f59fedd9f77eb36e5859e11ce5ab Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 28 Jan 2016 14:31:37 +0000 Subject: ARM: KVM: Move kvm/hyp/hyp.h to include/asm/kvm_hyp.h In order to be able to use the code located in virt/kvm/arm/hyp, we need to make the global hyp.h file accessible from include/asm, similar to what we did for arm64. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_hyp.h | 130 +++++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/hyp/banked-sr.c | 2 +- arch/arm/kvm/hyp/cp15-sr.c | 2 +- arch/arm/kvm/hyp/hyp.h | 130 ----------------------------------------- arch/arm/kvm/hyp/s2-setup.c | 3 +- arch/arm/kvm/hyp/switch.c | 2 +- arch/arm/kvm/hyp/timer-sr.c | 4 +- arch/arm/kvm/hyp/tlb.c | 2 +- arch/arm/kvm/hyp/vgic-v2-sr.c | 4 +- 9 files changed, 137 insertions(+), 142 deletions(-) create mode 100644 arch/arm/include/asm/kvm_hyp.h delete mode 100644 arch/arm/kvm/hyp/hyp.h (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h new file mode 100644 index 000000000000..ff6de6a3af2d --- /dev/null +++ b/arch/arm/include/asm/kvm_hyp.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ARM_KVM_HYP_H__ +#define __ARM_KVM_HYP_H__ + +#include +#include +#include +#include + +#define __hyp_text __section(.hyp.text) notrace + +#define kern_hyp_va(v) (v) +#define hyp_kern_va(v) (v) + +#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ + "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 +#define __ACCESS_CP15_64(Op1, CRm) \ + "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 +#define __ACCESS_VFP(CRn) \ + "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 + +#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) +#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) + +#define __read_sysreg(r, w, c, t) ({ \ + t __val; \ + asm volatile(r " " c : "=r" (__val)); \ + __val; \ +}) +#define read_sysreg(...) __read_sysreg(__VA_ARGS__) + +#define write_special(v, r) \ + asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) +#define read_special(r) ({ \ + u32 __val; \ + asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ + __val; \ +}) + +#define TTBR0 __ACCESS_CP15_64(0, c2) +#define TTBR1 __ACCESS_CP15_64(1, c2) +#define VTTBR __ACCESS_CP15_64(6, c2) +#define PAR __ACCESS_CP15_64(0, c7) +#define CNTV_CVAL __ACCESS_CP15_64(3, c14) +#define CNTVOFF __ACCESS_CP15_64(4, c14) + +#define MIDR __ACCESS_CP15(c0, 0, c0, 0) +#define CSSELR __ACCESS_CP15(c0, 2, c0, 0) +#define VPIDR __ACCESS_CP15(c0, 4, c0, 0) +#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) +#define SCTLR __ACCESS_CP15(c1, 0, c0, 0) +#define CPACR __ACCESS_CP15(c1, 0, c0, 2) +#define HCR __ACCESS_CP15(c1, 4, c1, 0) +#define HDCR __ACCESS_CP15(c1, 4, c1, 1) +#define HCPTR __ACCESS_CP15(c1, 4, c1, 2) +#define HSTR __ACCESS_CP15(c1, 4, c1, 3) +#define TTBCR __ACCESS_CP15(c2, 0, c0, 2) +#define HTCR __ACCESS_CP15(c2, 4, c0, 2) +#define VTCR __ACCESS_CP15(c2, 4, c1, 2) +#define DACR __ACCESS_CP15(c3, 0, c0, 0) +#define DFSR __ACCESS_CP15(c5, 0, c0, 0) +#define IFSR __ACCESS_CP15(c5, 0, c0, 1) +#define ADFSR __ACCESS_CP15(c5, 0, c1, 0) +#define AIFSR __ACCESS_CP15(c5, 0, c1, 1) +#define HSR __ACCESS_CP15(c5, 4, c2, 0) +#define DFAR __ACCESS_CP15(c6, 0, c0, 0) +#define IFAR __ACCESS_CP15(c6, 0, c0, 2) +#define HDFAR __ACCESS_CP15(c6, 4, c0, 0) +#define HIFAR __ACCESS_CP15(c6, 4, c0, 2) +#define HPFAR __ACCESS_CP15(c6, 4, c0, 4) +#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) +#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) +#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) +#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) +#define PRRR __ACCESS_CP15(c10, 0, c2, 0) +#define NMRR __ACCESS_CP15(c10, 0, c2, 1) +#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0) +#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1) +#define VBAR __ACCESS_CP15(c12, 0, c0, 0) +#define CID __ACCESS_CP15(c13, 0, c0, 1) +#define TID_URW __ACCESS_CP15(c13, 0, c0, 2) +#define TID_URO __ACCESS_CP15(c13, 0, c0, 3) +#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) +#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2) +#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) +#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) +#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) + +#define VFP_FPEXC __ACCESS_VFP(FPEXC) + +void __timer_save_state(struct kvm_vcpu *vcpu); +void __timer_restore_state(struct kvm_vcpu *vcpu); + +void __vgic_v2_save_state(struct kvm_vcpu *vcpu); +void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); + +void __sysreg_save_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_state(struct kvm_cpu_context *ctxt); + +void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); +void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); +static inline bool __vfp_enabled(void) +{ + return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); +} + +void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); +void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); + +int asmlinkage __guest_enter(struct kvm_vcpu *vcpu, + struct kvm_cpu_context *host); +int asmlinkage __hyp_do_panic(const char *, int, u32); + +#endif /* __ARM_KVM_HYP_H__ */ diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c index d02dc804f611..111bda8cdebd 100644 --- a/arch/arm/kvm/hyp/banked-sr.c +++ b/arch/arm/kvm/hyp/banked-sr.c @@ -18,7 +18,7 @@ * along with this program. If not, see . */ -#include "hyp.h" +#include __asm__(".arch_extension virt"); diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c index 732abbc34bd0..c4782812714c 100644 --- a/arch/arm/kvm/hyp/cp15-sr.c +++ b/arch/arm/kvm/hyp/cp15-sr.c @@ -18,7 +18,7 @@ * along with this program. If not, see . */ -#include "hyp.h" +#include static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx) { diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h deleted file mode 100644 index ff6de6a3af2d..000000000000 --- a/arch/arm/kvm/hyp/hyp.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM_KVM_HYP_H__ -#define __ARM_KVM_HYP_H__ - -#include -#include -#include -#include - -#define __hyp_text __section(.hyp.text) notrace - -#define kern_hyp_va(v) (v) -#define hyp_kern_va(v) (v) - -#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ - "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 -#define __ACCESS_CP15_64(Op1, CRm) \ - "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 -#define __ACCESS_VFP(CRn) \ - "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 - -#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) -#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) - -#define __read_sysreg(r, w, c, t) ({ \ - t __val; \ - asm volatile(r " " c : "=r" (__val)); \ - __val; \ -}) -#define read_sysreg(...) __read_sysreg(__VA_ARGS__) - -#define write_special(v, r) \ - asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) -#define read_special(r) ({ \ - u32 __val; \ - asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ - __val; \ -}) - -#define TTBR0 __ACCESS_CP15_64(0, c2) -#define TTBR1 __ACCESS_CP15_64(1, c2) -#define VTTBR __ACCESS_CP15_64(6, c2) -#define PAR __ACCESS_CP15_64(0, c7) -#define CNTV_CVAL __ACCESS_CP15_64(3, c14) -#define CNTVOFF __ACCESS_CP15_64(4, c14) - -#define MIDR __ACCESS_CP15(c0, 0, c0, 0) -#define CSSELR __ACCESS_CP15(c0, 2, c0, 0) -#define VPIDR __ACCESS_CP15(c0, 4, c0, 0) -#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) -#define SCTLR __ACCESS_CP15(c1, 0, c0, 0) -#define CPACR __ACCESS_CP15(c1, 0, c0, 2) -#define HCR __ACCESS_CP15(c1, 4, c1, 0) -#define HDCR __ACCESS_CP15(c1, 4, c1, 1) -#define HCPTR __ACCESS_CP15(c1, 4, c1, 2) -#define HSTR __ACCESS_CP15(c1, 4, c1, 3) -#define TTBCR __ACCESS_CP15(c2, 0, c0, 2) -#define HTCR __ACCESS_CP15(c2, 4, c0, 2) -#define VTCR __ACCESS_CP15(c2, 4, c1, 2) -#define DACR __ACCESS_CP15(c3, 0, c0, 0) -#define DFSR __ACCESS_CP15(c5, 0, c0, 0) -#define IFSR __ACCESS_CP15(c5, 0, c0, 1) -#define ADFSR __ACCESS_CP15(c5, 0, c1, 0) -#define AIFSR __ACCESS_CP15(c5, 0, c1, 1) -#define HSR __ACCESS_CP15(c5, 4, c2, 0) -#define DFAR __ACCESS_CP15(c6, 0, c0, 0) -#define IFAR __ACCESS_CP15(c6, 0, c0, 2) -#define HDFAR __ACCESS_CP15(c6, 4, c0, 0) -#define HIFAR __ACCESS_CP15(c6, 4, c0, 2) -#define HPFAR __ACCESS_CP15(c6, 4, c0, 4) -#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) -#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) -#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) -#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) -#define PRRR __ACCESS_CP15(c10, 0, c2, 0) -#define NMRR __ACCESS_CP15(c10, 0, c2, 1) -#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0) -#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1) -#define VBAR __ACCESS_CP15(c12, 0, c0, 0) -#define CID __ACCESS_CP15(c13, 0, c0, 1) -#define TID_URW __ACCESS_CP15(c13, 0, c0, 2) -#define TID_URO __ACCESS_CP15(c13, 0, c0, 3) -#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) -#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2) -#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) -#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) -#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) - -#define VFP_FPEXC __ACCESS_VFP(FPEXC) - -void __timer_save_state(struct kvm_vcpu *vcpu); -void __timer_restore_state(struct kvm_vcpu *vcpu); - -void __vgic_v2_save_state(struct kvm_vcpu *vcpu); -void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); - -void __sysreg_save_state(struct kvm_cpu_context *ctxt); -void __sysreg_restore_state(struct kvm_cpu_context *ctxt); - -void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); -void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); -static inline bool __vfp_enabled(void) -{ - return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); -} - -void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); -void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); - -int asmlinkage __guest_enter(struct kvm_vcpu *vcpu, - struct kvm_cpu_context *host); -int asmlinkage __hyp_do_panic(const char *, int, u32); - -#endif /* __ARM_KVM_HYP_H__ */ diff --git a/arch/arm/kvm/hyp/s2-setup.c b/arch/arm/kvm/hyp/s2-setup.c index f5f49c53be28..7be39af2ed6c 100644 --- a/arch/arm/kvm/hyp/s2-setup.c +++ b/arch/arm/kvm/hyp/s2-setup.c @@ -18,8 +18,7 @@ #include #include #include - -#include "hyp.h" +#include void __hyp_text __init_stage2_translation(void) { diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index f11ede159080..b13caa90cd44 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -16,7 +16,7 @@ */ #include -#include "hyp.h" +#include __asm__(".arch_extension virt"); diff --git a/arch/arm/kvm/hyp/timer-sr.c b/arch/arm/kvm/hyp/timer-sr.c index d7535fd0784e..2bb0c926e01c 100644 --- a/arch/arm/kvm/hyp/timer-sr.c +++ b/arch/arm/kvm/hyp/timer-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include /* vcpu is already in the HYP VA space */ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c index 82958b8f6a74..a2636001e616 100644 --- a/arch/arm/kvm/hyp/tlb.c +++ b/arch/arm/kvm/hyp/tlb.c @@ -18,7 +18,7 @@ * along with this program. If not, see . */ -#include "hyp.h" +#include /** * Flush per-VMID TLBs diff --git a/arch/arm/kvm/hyp/vgic-v2-sr.c b/arch/arm/kvm/hyp/vgic-v2-sr.c index e71761238cfc..9514a7d90d71 100644 --- a/arch/arm/kvm/hyp/vgic-v2-sr.c +++ b/arch/arm/kvm/hyp/vgic-v2-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include /* vcpu is already in the HYP VA space */ void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) -- cgit v1.2.3-58-ga151 From b5fa5d3e628bd301b89937ee4f7814297d8e2e31 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 28 Jan 2016 14:33:05 +0000 Subject: ARM: KVM: Use common version of vgic-v2-sr.c No need to keep our own private version, the common one is strictly identical. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 5 ++- arch/arm/kvm/hyp/vgic-v2-sr.c | 82 ------------------------------------------- 2 files changed, 4 insertions(+), 83 deletions(-) delete mode 100644 arch/arm/kvm/hyp/vgic-v2-sr.c (limited to 'arch') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 7152369504a6..8f735d970ef1 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -2,10 +2,13 @@ # Makefile for Kernel-based Virtual Machine module, HYP part # +KVM=../../../../virt/kvm + +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o + obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o -obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o diff --git a/arch/arm/kvm/hyp/vgic-v2-sr.c b/arch/arm/kvm/hyp/vgic-v2-sr.c deleted file mode 100644 index 9514a7d90d71..000000000000 --- a/arch/arm/kvm/hyp/vgic-v2-sr.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2012-2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - -#include - -/* vcpu is already in the HYP VA space */ -void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; - struct vgic_dist *vgic = &kvm->arch.vgic; - void __iomem *base = kern_hyp_va(vgic->vctrl_base); - u32 eisr0, eisr1, elrsr0, elrsr1; - int i, nr_lr; - - if (!base) - return; - - nr_lr = vcpu->arch.vgic_cpu.nr_lr; - cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR); - cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR); - eisr0 = readl_relaxed(base + GICH_EISR0); - elrsr0 = readl_relaxed(base + GICH_ELRSR0); - if (unlikely(nr_lr > 32)) { - eisr1 = readl_relaxed(base + GICH_EISR1); - elrsr1 = readl_relaxed(base + GICH_ELRSR1); - } else { - eisr1 = elrsr1 = 0; - } -#ifdef CONFIG_CPU_BIG_ENDIAN - cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1; - cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; -#else - cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0; - cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; -#endif - cpu_if->vgic_apr = readl_relaxed(base + GICH_APR); - - writel_relaxed(0, base + GICH_HCR); - - for (i = 0; i < nr_lr; i++) - cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); -} - -/* vcpu is already in the HYP VA space */ -void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; - struct vgic_dist *vgic = &kvm->arch.vgic; - void __iomem *base = kern_hyp_va(vgic->vctrl_base); - int i, nr_lr; - - if (!base) - return; - - writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); - writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR); - writel_relaxed(cpu_if->vgic_apr, base + GICH_APR); - - nr_lr = vcpu->arch.vgic_cpu.nr_lr; - for (i = 0; i < nr_lr; i++) - writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4)); -} -- cgit v1.2.3-58-ga151 From 68130cb5db09cb8a285a59f70ac72d2bfa8685fd Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 28 Jan 2016 14:48:42 +0000 Subject: ARM: KVM: Use common version of timer-sr.c Using the common HYP timer code is a bit more tricky, since we use system register names. Nothing a set of macros cannot work around... Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_hyp.h | 9 ++++++ arch/arm/kvm/hyp/Makefile | 2 +- arch/arm/kvm/hyp/timer-sr.c | 69 ------------------------------------------ 3 files changed, 10 insertions(+), 70 deletions(-) delete mode 100644 arch/arm/kvm/hyp/timer-sr.c (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index ff6de6a3af2d..f0e860761380 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h @@ -104,6 +104,15 @@ #define VFP_FPEXC __ACCESS_VFP(FPEXC) +/* AArch64 compatibility macros, only for the timer so far */ +#define read_sysreg_el0(r) read_sysreg(r##_el0) +#define write_sysreg_el0(v, r) write_sysreg(v, r##_el0) + +#define cntv_ctl_el0 CNTV_CTL +#define cntv_cval_el0 CNTV_CVAL +#define cntvoff_el2 CNTVOFF +#define cnthctl_el2 CNTHCTL + void __timer_save_state(struct kvm_vcpu *vcpu); void __timer_restore_state(struct kvm_vcpu *vcpu); diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 8f735d970ef1..8dfa5f7f9290 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -5,10 +5,10 @@ KVM=../../../../virt/kvm obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o -obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o diff --git a/arch/arm/kvm/hyp/timer-sr.c b/arch/arm/kvm/hyp/timer-sr.c deleted file mode 100644 index 2bb0c926e01c..000000000000 --- a/arch/arm/kvm/hyp/timer-sr.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2012-2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - -#include - -/* vcpu is already in the HYP VA space */ -void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - u64 val; - - if (kvm->arch.timer.enabled) { - timer->cntv_ctl = read_sysreg(CNTV_CTL); - timer->cntv_cval = read_sysreg(CNTV_CVAL); - } - - /* Disable the virtual timer */ - write_sysreg(0, CNTV_CTL); - - /* Allow physical timer/counter access for the host */ - val = read_sysreg(CNTHCTL); - val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; - write_sysreg(val, CNTHCTL); - - /* Clear cntvoff for the host */ - write_sysreg(0, CNTVOFF); -} - -void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - u64 val; - - /* - * Disallow physical timer access for the guest - * Physical counter access is allowed - */ - val = read_sysreg(CNTHCTL); - val &= ~CNTHCTL_EL1PCEN; - val |= CNTHCTL_EL1PCTEN; - write_sysreg(val, CNTHCTL); - - if (kvm->arch.timer.enabled) { - write_sysreg(kvm->arch.timer.cntvoff, CNTVOFF); - write_sysreg(timer->cntv_cval, CNTV_CVAL); - isb(); - write_sysreg(timer->cntv_ctl, CNTV_CTL); - } -} -- cgit v1.2.3-58-ga151 From 21a4179ce0a127ea96c66d37ac571ac4ceeb992f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Feb 2016 10:57:30 +0000 Subject: arm64: KVM: Move __cpu_init_stage2 after kvm_call_hyp In order to ease the merge with the rest of the arm64 tree, move the definition of __cpu_init_stage2() after what will be the new kvm_call_hyp. Hopefully the resolution of the merge conflict will be obvious. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 43688d93c756..31fe7d6f32de 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -333,11 +333,6 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, hyp_stack_ptr, vector_ptr); } -static inline void __cpu_init_stage2(void) -{ - kvm_call_hyp(__init_stage2_translation); -} - static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} @@ -349,4 +344,11 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); +/* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */ + +static inline void __cpu_init_stage2(void) +{ + kvm_call_hyp(__init_stage2_translation); +} + #endif /* __ARM64_KVM_HOST_H__ */ -- cgit v1.2.3-58-ga151 From ad8821377384a2dfe3eae17dcf287b437f10ae03 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 29 Feb 2016 11:25:04 +0000 Subject: arm64: KVM: Add temporary kvm_perf_event.h In order to merge the KVM/ARM PMU patches without creating a conflict mess, let's have a temporary include file that won't conflict with anything. Subsequent patches will clean that up. Acked-by: Will Deacon Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/include/asm/kvm_perf_event.h | 55 +++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) create mode 100644 arch/arm64/include/asm/kvm_perf_event.h (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 31fe7d6f32de..15851f52096b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -27,6 +27,7 @@ #include #include #include +#include #define __KVM_HAVE_ARCH_INTC_INITIALIZED diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h new file mode 100644 index 000000000000..d1c9d504f928 --- /dev/null +++ b/arch/arm64/include/asm/kvm_perf_event.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ASM_KVM_PERF_EVENT_H +#define __ASM_KVM_PERF_EVENT_H + +#define ARMV8_PMU_MAX_COUNTERS 32 +#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) + +/* + * Per-CPU PMCR: config reg + */ +#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */ +#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */ +#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */ +#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ +#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ +#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ +#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ +#define ARMV8_PMU_PMCR_N_MASK 0x1f +#define ARMV8_PMU_PMCR_MASK 0x3f /* Mask for writable bits */ + +/* + * PMOVSR: counters overflow flag status reg + */ +#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */ +#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK + +/* + * PMXEVTYPER: Event selection reg + */ +#define ARMV8_PMU_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */ +#define ARMV8_PMU_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */ + +/* + * Event filters for PMUv3 + */ +#define ARMV8_PMU_EXCLUDE_EL1 (1 << 31) +#define ARMV8_PMU_EXCLUDE_EL0 (1 << 30) +#define ARMV8_PMU_INCLUDE_EL2 (1 << 27) + +#endif -- cgit v1.2.3-58-ga151 From 04fe472615d0216ec0bdd66d9f3f1812b642ada6 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Fri, 11 Sep 2015 09:38:32 +0800 Subject: arm64: KVM: Define PMU data structure for each vcpu Here we plan to support virtual PMU for guest by full software emulation, so define some basic structs and functions preparing for futher steps. Define struct kvm_pmc for performance monitor counter and struct kvm_pmu for performance monitor unit for each vcpu. According to ARMv8 spec, the PMU contains at most 32(ARMV8_PMU_MAX_COUNTERS) counters. Since this only supports ARM64 (or PMUv3), add a separate config symbol for it. Signed-off-by: Shannon Zhao Acked-by: Marc Zyngier Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/Kconfig | 7 +++++++ include/kvm/arm_pmu.h | 42 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) create mode 100644 include/kvm/arm_pmu.h (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 15851f52096b..fb57fdc6a433 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -38,6 +38,7 @@ #include #include +#include #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS @@ -213,6 +214,7 @@ struct kvm_vcpu_arch { /* VGIC state */ struct vgic_cpu vgic_cpu; struct arch_timer_cpu timer_cpu; + struct kvm_pmu pmu; /* * Anything that is not used directly from assembly code goes diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index a5272c07d1cb..de7450df7629 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -36,6 +36,7 @@ config KVM select HAVE_KVM_EVENTFD select HAVE_KVM_IRQFD select KVM_ARM_VGIC_V3 + select KVM_ARM_PMU if HW_PERF_EVENTS ---help--- Support hosting virtualized guest machines. We don't support KVM with 16K page tables yet, due to the multiple @@ -48,6 +49,12 @@ config KVM_ARM_HOST ---help--- Provides host support for ARM processors. +config KVM_ARM_PMU + bool + ---help--- + Adds support for a virtual Performance Monitoring Unit (PMU) in + virtual machines. + source drivers/vhost/Kconfig endif # VIRTUALIZATION diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h new file mode 100644 index 000000000000..3c2fd568e0a8 --- /dev/null +++ b/include/kvm/arm_pmu.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2015 Linaro Ltd. + * Author: Shannon Zhao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ASM_ARM_KVM_PMU_H +#define __ASM_ARM_KVM_PMU_H + +#ifdef CONFIG_KVM_ARM_PMU + +#include +#include + +struct kvm_pmc { + u8 idx; /* index into the pmu->pmc array */ + struct perf_event *perf_event; + u64 bitmask; +}; + +struct kvm_pmu { + int irq_num; + struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; + bool ready; +}; +#else +struct kvm_pmu { +}; +#endif + +#endif -- cgit v1.2.3-58-ga151 From ab9468340d2bcc2a837b8b536fa819a0fc05a32e Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Thu, 18 Jun 2015 16:01:53 +0800 Subject: arm64: KVM: Add access handler for PMCR register Add reset handler which gets host value of PMCR_EL0 and make writable bits architecturally UNKNOWN except PMCR.E which is zero. Add an access handler for PMCR. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/kvm/sys_regs.c | 42 +++++++++++++++++++++++++++++++++++++-- include/kvm/arm_pmu.h | 4 ++++ 3 files changed, 47 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index fb57fdc6a433..5def605b4525 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -117,6 +117,9 @@ enum vcpu_sysreg { MDSCR_EL1, /* Monitor Debug System Control Register */ MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ + /* Performance Monitors Registers */ + PMCR_EL0, /* Control Register */ + /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ IFSR32_EL2, /* Instruction Fault Status Register */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2e90371cfb37..e88ae2d809a5 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -34,6 +34,7 @@ #include #include #include +#include #include @@ -439,6 +440,43 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr; } +static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) +{ + u64 pmcr, val; + + asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr)); + /* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) is reset to UNKNOWN + * except PMCR.E resetting to zero. + */ + val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) + | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); + vcpu_sys_reg(vcpu, PMCR_EL0) = val; +} + +static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 val; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (p->is_write) { + /* Only update writeable bits of PMCR */ + val = vcpu_sys_reg(vcpu, PMCR_EL0); + val &= ~ARMV8_PMU_PMCR_MASK; + val |= p->regval & ARMV8_PMU_PMCR_MASK; + vcpu_sys_reg(vcpu, PMCR_EL0) = val; + } else { + /* PMCR.P & PMCR.C are RAZ */ + val = vcpu_sys_reg(vcpu, PMCR_EL0) + & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); + p->regval = val; + } + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -623,7 +661,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* PMCR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), - trap_raz_wi }, + access_pmcr, reset_pmcr, }, /* PMCNTENSET_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), trap_raz_wi }, @@ -885,7 +923,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, /* PMU */ - { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 3c2fd568e0a8..8157fe5bcbb0 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -34,9 +34,13 @@ struct kvm_pmu { struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; bool ready; }; + +#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) #else struct kvm_pmu { }; + +#define kvm_arm_pmu_v3_ready(v) (false) #endif #endif -- cgit v1.2.3-58-ga151 From 3965c3ce751ab5a97618a2818eec4497576f4654 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Mon, 31 Aug 2015 17:20:22 +0800 Subject: arm64: KVM: Add access handler for PMSELR register Since the reset value of PMSELR_EL0 is UNKNOWN, use reset_unknown for its reset handler. When reading PMSELR, return the PMSELR.SEL field to guest. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/sys_regs.c | 20 ++++++++++++++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5def605b4525..57a2d8f76c2f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -119,6 +119,7 @@ enum vcpu_sysreg { /* Performance Monitors Registers */ PMCR_EL0, /* Control Register */ + PMSELR_EL0, /* Event Counter Selection Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e88ae2d809a5..b05e20f8a3b9 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -477,6 +477,22 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (p->is_write) + vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; + else + /* return PMSELR.SEL field */ + p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0) + & ARMV8_PMU_COUNTER_MASK; + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -676,7 +692,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { trap_raz_wi }, /* PMSELR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), - trap_raz_wi }, + access_pmselr, reset_unknown, PMSELR_EL0 }, /* PMCEID0_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), trap_raz_wi }, @@ -927,7 +943,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, - { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, -- cgit v1.2.3-58-ga151 From a86b5505304404dc5fc5e62a6dc294706e525003 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Mon, 7 Sep 2015 16:11:12 +0800 Subject: arm64: KVM: Add access handler for PMCEID0 and PMCEID1 register Add access handler which gets host value of PMCEID0 or PMCEID1 when guest access these registers. Writing action to PMCEID0 or PMCEID1 is UNDEFINED. Signed-off-by: Shannon Zhao Signed-off-by: Marc Zyngier --- arch/arm64/kvm/sys_regs.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index b05e20f8a3b9..ca8cdf6d83cf 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -493,6 +493,26 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 pmceid; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + BUG_ON(p->is_write); + + if (!(p->Op2 & 1)) + asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid)); + else + asm volatile("mrs %0, pmceid1_el0\n" : "=r" (pmceid)); + + p->regval = pmceid; + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -695,10 +715,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmselr, reset_unknown, PMSELR_EL0 }, /* PMCEID0_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), - trap_raz_wi }, + access_pmceid }, /* PMCEID1_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), - trap_raz_wi }, + access_pmceid }, /* PMCCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), trap_raz_wi }, @@ -944,8 +964,8 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, - { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi }, - { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, + { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, -- cgit v1.2.3-58-ga151 From 051ff581ce70e822729e9474941f3c206cbf7436 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 8 Dec 2015 15:29:06 +0800 Subject: arm64: KVM: Add access handler for event counter register These kind of registers include PMEVCNTRn, PMCCNTR and PMXEVCNTR which is mapped to PMEVCNTRn. The access handler translates all aarch32 register offsets to aarch64 ones and uses vcpu_sys_reg() to access their values to avoid taking care of big endian. When reading these registers, return the sum of register value and the value perf event counts. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 3 + arch/arm64/kvm/Makefile | 1 + arch/arm64/kvm/sys_regs.c | 139 ++++++++++++++++++++++++++++++++++++-- include/kvm/arm_pmu.h | 11 +++ virt/kvm/arm/pmu.c | 63 +++++++++++++++++ 5 files changed, 213 insertions(+), 4 deletions(-) create mode 100644 virt/kvm/arm/pmu.c (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 57a2d8f76c2f..4ae27fe34240 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -120,6 +120,9 @@ enum vcpu_sysreg { /* Performance Monitors Registers */ PMCR_EL0, /* Control Register */ PMSELR_EL0, /* Event Counter Selection Register */ + PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ + PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, + PMCCNTR_EL0, /* Cycle Counter Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index caee9ee8e12a..122cff482ac4 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -26,3 +26,4 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o +kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ca8cdf6d83cf..ff3214b6fbc8 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -513,6 +513,56 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) +{ + u64 pmcr, val; + + pmcr = vcpu_sys_reg(vcpu, PMCR_EL0); + val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; + if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) + return false; + + return true; +} + +static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 idx; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (r->CRn == 9 && r->CRm == 13) { + if (r->Op2 == 2) { + /* PMXEVCNTR_EL0 */ + idx = vcpu_sys_reg(vcpu, PMSELR_EL0) + & ARMV8_PMU_COUNTER_MASK; + } else if (r->Op2 == 0) { + /* PMCCNTR_EL0 */ + idx = ARMV8_PMU_CYCLE_IDX; + } else { + BUG(); + } + } else if (r->CRn == 14 && (r->CRm & 12) == 8) { + /* PMEVCNTRn_EL0 */ + idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); + } else { + BUG(); + } + + if (!pmu_counter_idx_valid(vcpu, idx)) + return false; + + if (p->is_write) + kvm_pmu_set_counter_value(vcpu, idx, p->regval); + else + p->regval = kvm_pmu_get_counter_value(vcpu, idx); + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -528,6 +578,13 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } +/* Macro to expand the PMEVCNTRn_EL0 register */ +#define PMU_PMEVCNTR_EL0(n) \ + /* PMEVCNTRn_EL0 */ \ + { Op0(0b11), Op1(0b011), CRn(0b1110), \ + CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } + /* * Architected system registers. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 @@ -721,13 +778,13 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmceid }, /* PMCCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), - trap_raz_wi }, + access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, /* PMXEVTYPER_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), trap_raz_wi }, /* PMXEVCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), - trap_raz_wi }, + access_pmu_evcntr }, /* PMUSERENR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), trap_raz_wi }, @@ -742,6 +799,39 @@ static const struct sys_reg_desc sys_reg_descs[] = { { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), NULL, reset_unknown, TPIDRRO_EL0 }, + /* PMEVCNTRn_EL0 */ + PMU_PMEVCNTR_EL0(0), + PMU_PMEVCNTR_EL0(1), + PMU_PMEVCNTR_EL0(2), + PMU_PMEVCNTR_EL0(3), + PMU_PMEVCNTR_EL0(4), + PMU_PMEVCNTR_EL0(5), + PMU_PMEVCNTR_EL0(6), + PMU_PMEVCNTR_EL0(7), + PMU_PMEVCNTR_EL0(8), + PMU_PMEVCNTR_EL0(9), + PMU_PMEVCNTR_EL0(10), + PMU_PMEVCNTR_EL0(11), + PMU_PMEVCNTR_EL0(12), + PMU_PMEVCNTR_EL0(13), + PMU_PMEVCNTR_EL0(14), + PMU_PMEVCNTR_EL0(15), + PMU_PMEVCNTR_EL0(16), + PMU_PMEVCNTR_EL0(17), + PMU_PMEVCNTR_EL0(18), + PMU_PMEVCNTR_EL0(19), + PMU_PMEVCNTR_EL0(20), + PMU_PMEVCNTR_EL0(21), + PMU_PMEVCNTR_EL0(22), + PMU_PMEVCNTR_EL0(23), + PMU_PMEVCNTR_EL0(24), + PMU_PMEVCNTR_EL0(25), + PMU_PMEVCNTR_EL0(26), + PMU_PMEVCNTR_EL0(27), + PMU_PMEVCNTR_EL0(28), + PMU_PMEVCNTR_EL0(29), + PMU_PMEVCNTR_EL0(30), + /* DACR32_EL2 */ { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), NULL, reset_unknown, DACR32_EL2 }, @@ -931,6 +1021,13 @@ static const struct sys_reg_desc cp14_64_regs[] = { { Op1( 0), CRm( 2), .access = trap_raz_wi }, }; +/* Macro to expand the PMEVCNTRn register */ +#define PMU_PMEVCNTR(n) \ + /* PMEVCNTRn */ \ + { Op1(0), CRn(0b1110), \ + CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evcntr } + /* * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, * depending on the way they are accessed (as a 32bit or a 64bit @@ -966,9 +1063,9 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, - { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi }, - { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, @@ -982,10 +1079,44 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, + + /* PMEVCNTRn */ + PMU_PMEVCNTR(0), + PMU_PMEVCNTR(1), + PMU_PMEVCNTR(2), + PMU_PMEVCNTR(3), + PMU_PMEVCNTR(4), + PMU_PMEVCNTR(5), + PMU_PMEVCNTR(6), + PMU_PMEVCNTR(7), + PMU_PMEVCNTR(8), + PMU_PMEVCNTR(9), + PMU_PMEVCNTR(10), + PMU_PMEVCNTR(11), + PMU_PMEVCNTR(12), + PMU_PMEVCNTR(13), + PMU_PMEVCNTR(14), + PMU_PMEVCNTR(15), + PMU_PMEVCNTR(16), + PMU_PMEVCNTR(17), + PMU_PMEVCNTR(18), + PMU_PMEVCNTR(19), + PMU_PMEVCNTR(20), + PMU_PMEVCNTR(21), + PMU_PMEVCNTR(22), + PMU_PMEVCNTR(23), + PMU_PMEVCNTR(24), + PMU_PMEVCNTR(25), + PMU_PMEVCNTR(26), + PMU_PMEVCNTR(27), + PMU_PMEVCNTR(28), + PMU_PMEVCNTR(29), + PMU_PMEVCNTR(30), }; static const struct sys_reg_desc cp15_64_regs[] = { { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, + { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, }; diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 8157fe5bcbb0..bcb769805839 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -23,6 +23,8 @@ #include #include +#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) + struct kvm_pmc { u8 idx; /* index into the pmu->pmc array */ struct perf_event *perf_event; @@ -36,11 +38,20 @@ struct kvm_pmu { }; #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) +u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); +void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); #else struct kvm_pmu { }; #define kvm_arm_pmu_v3_ready(v) (false) +static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, + u64 select_idx) +{ + return 0; +} +static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, + u64 select_idx, u64 val) {} #endif #endif diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c new file mode 100644 index 000000000000..cd74e6367cd6 --- /dev/null +++ b/virt/kvm/arm/pmu.c @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2015 Linaro Ltd. + * Author: Shannon Zhao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +/** + * kvm_pmu_get_counter_value - get PMU counter value + * @vcpu: The vcpu pointer + * @select_idx: The counter index + */ +u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) +{ + u64 counter, reg, enabled, running; + struct kvm_pmu *pmu = &vcpu->arch.pmu; + struct kvm_pmc *pmc = &pmu->pmc[select_idx]; + + reg = (select_idx == ARMV8_PMU_CYCLE_IDX) + ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; + counter = vcpu_sys_reg(vcpu, reg); + + /* The real counter value is equal to the value of counter register plus + * the value perf event counts. + */ + if (pmc->perf_event) + counter += perf_event_read_value(pmc->perf_event, &enabled, + &running); + + return counter & pmc->bitmask; +} + +/** + * kvm_pmu_set_counter_value - set PMU counter value + * @vcpu: The vcpu pointer + * @select_idx: The counter index + * @val: The counter value + */ +void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) +{ + u64 reg; + + reg = (select_idx == ARMV8_PMU_CYCLE_IDX) + ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; + vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); +} -- cgit v1.2.3-58-ga151 From 96b0eebcc6a14e3bdb9ff0e7176fbfc225bdde94 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 8 Sep 2015 12:26:13 +0800 Subject: arm64: KVM: Add access handler for PMCNTENSET and PMCNTENCLR register Since the reset value of PMCNTENSET and PMCNTENCLR is UNKNOWN, use reset_unknown for its reset handler. Add a handler to emulate writing PMCNTENSET or PMCNTENCLR register. When writing to PMCNTENSET, call perf_event_enable to enable the perf event. When writing to PMCNTENCLR, call perf_event_disable to disable the perf event. Signed-off-by: Shannon Zhao Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/sys_regs.c | 35 ++++++++++++++++++--- include/kvm/arm_pmu.h | 9 ++++++ virt/kvm/arm/pmu.c | 66 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 107 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4ae27fe34240..993793b422aa 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -123,6 +123,7 @@ enum vcpu_sysreg { PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, PMCCNTR_EL0, /* Cycle Counter Register */ + PMCNTENSET_EL0, /* Count Enable Set Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ff3214b6fbc8..d4b6ae3c09b5 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -563,6 +563,33 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, return true; } +static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 val, mask; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + mask = kvm_pmu_valid_counter_mask(vcpu); + if (p->is_write) { + val = p->regval & mask; + if (r->Op2 & 0x1) { + /* accessing PMCNTENSET_EL0 */ + vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; + kvm_pmu_enable_counter(vcpu, val); + } else { + /* accessing PMCNTENCLR_EL0 */ + vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; + kvm_pmu_disable_counter(vcpu, val); + } + } else { + p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask; + } + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -757,10 +784,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmcr, reset_pmcr, }, /* PMCNTENSET_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), - trap_raz_wi }, + access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, /* PMCNTENCLR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), - trap_raz_wi }, + access_pmcnten, NULL, PMCNTENSET_EL0 }, /* PMOVSCLR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), trap_raz_wi }, @@ -1057,8 +1084,8 @@ static const struct sys_reg_desc cp15_regs[] = { /* PMU */ { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, - { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi }, - { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten }, + { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index bcb769805839..b70058ef1dd6 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -40,6 +40,9 @@ struct kvm_pmu { #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); +u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); +void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); #else struct kvm_pmu { }; @@ -52,6 +55,12 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, } static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) {} +static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) +{ + return 0; +} +static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} #endif #endif diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index cd74e6367cd6..f8dc17430813 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -61,3 +61,69 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); } + +u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) +{ + u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; + + val &= ARMV8_PMU_PMCR_N_MASK; + if (val == 0) + return BIT(ARMV8_PMU_CYCLE_IDX); + else + return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX); +} + +/** + * kvm_pmu_enable_counter - enable selected PMU counter + * @vcpu: The vcpu pointer + * @val: the value guest writes to PMCNTENSET register + * + * Call perf_event_enable to start counting the perf event + */ +void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) +{ + int i; + struct kvm_pmu *pmu = &vcpu->arch.pmu; + struct kvm_pmc *pmc; + + if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) + return; + + for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + if (!(val & BIT(i))) + continue; + + pmc = &pmu->pmc[i]; + if (pmc->perf_event) { + perf_event_enable(pmc->perf_event); + if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) + kvm_debug("fail to enable perf event\n"); + } + } +} + +/** + * kvm_pmu_disable_counter - disable selected PMU counter + * @vcpu: The vcpu pointer + * @val: the value guest writes to PMCNTENCLR register + * + * Call perf_event_disable to stop counting the perf event + */ +void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) +{ + int i; + struct kvm_pmu *pmu = &vcpu->arch.pmu; + struct kvm_pmc *pmc; + + if (!val) + return; + + for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + if (!(val & BIT(i))) + continue; + + pmc = &pmu->pmc[i]; + if (pmc->perf_event) + perf_event_disable(pmc->perf_event); + } +} -- cgit v1.2.3-58-ga151 From 9feb21ac57d53003557ddc01f9aee496269996c7 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 23 Feb 2016 11:11:27 +0800 Subject: arm64: KVM: Add access handler for event type register These kind of registers include PMEVTYPERn, PMCCFILTR and PMXEVTYPER which is mapped to PMEVTYPERn or PMCCFILTR. The access handler translates all aarch32 register offsets to aarch64 ones and uses vcpu_sys_reg() to access their values to avoid taking care of big endian. When writing to these registers, create a perf_event for the selected event type. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 3 + arch/arm64/kvm/sys_regs.c | 126 +++++++++++++++++++++++++++++++++++++- 2 files changed, 127 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 993793b422aa..121182dd0947 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -123,6 +123,9 @@ enum vcpu_sysreg { PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, PMCCNTR_EL0, /* Cycle Counter Register */ + PMEVTYPER0_EL0, /* Event Type Register (0-30) */ + PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, + PMCCFILTR_EL0, /* Cycle Count Filter Register */ PMCNTENSET_EL0, /* Count Enable Set Register */ /* 32bit specific registers. Keep them at the end of the range */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index d4b6ae3c09b5..4faf324c9be9 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -563,6 +563,42 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, return true; } +static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 idx, reg; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { + /* PMXEVTYPER_EL0 */ + idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; + reg = PMEVTYPER0_EL0 + idx; + } else if (r->CRn == 14 && (r->CRm & 12) == 12) { + idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); + if (idx == ARMV8_PMU_CYCLE_IDX) + reg = PMCCFILTR_EL0; + else + /* PMEVTYPERn_EL0 */ + reg = PMEVTYPER0_EL0 + idx; + } else { + BUG(); + } + + if (!pmu_counter_idx_valid(vcpu, idx)) + return false; + + if (p->is_write) { + kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); + vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; + } else { + p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; + } + + return true; +} + static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { @@ -612,6 +648,13 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } +/* Macro to expand the PMEVTYPERn_EL0 register */ +#define PMU_PMEVTYPER_EL0(n) \ + /* PMEVTYPERn_EL0 */ \ + { Op0(0b11), Op1(0b011), CRn(0b1110), \ + CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } + /* * Architected system registers. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 @@ -808,7 +851,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, /* PMXEVTYPER_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), - trap_raz_wi }, + access_pmu_evtyper }, /* PMXEVCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), access_pmu_evcntr }, @@ -858,6 +901,44 @@ static const struct sys_reg_desc sys_reg_descs[] = { PMU_PMEVCNTR_EL0(28), PMU_PMEVCNTR_EL0(29), PMU_PMEVCNTR_EL0(30), + /* PMEVTYPERn_EL0 */ + PMU_PMEVTYPER_EL0(0), + PMU_PMEVTYPER_EL0(1), + PMU_PMEVTYPER_EL0(2), + PMU_PMEVTYPER_EL0(3), + PMU_PMEVTYPER_EL0(4), + PMU_PMEVTYPER_EL0(5), + PMU_PMEVTYPER_EL0(6), + PMU_PMEVTYPER_EL0(7), + PMU_PMEVTYPER_EL0(8), + PMU_PMEVTYPER_EL0(9), + PMU_PMEVTYPER_EL0(10), + PMU_PMEVTYPER_EL0(11), + PMU_PMEVTYPER_EL0(12), + PMU_PMEVTYPER_EL0(13), + PMU_PMEVTYPER_EL0(14), + PMU_PMEVTYPER_EL0(15), + PMU_PMEVTYPER_EL0(16), + PMU_PMEVTYPER_EL0(17), + PMU_PMEVTYPER_EL0(18), + PMU_PMEVTYPER_EL0(19), + PMU_PMEVTYPER_EL0(20), + PMU_PMEVTYPER_EL0(21), + PMU_PMEVTYPER_EL0(22), + PMU_PMEVTYPER_EL0(23), + PMU_PMEVTYPER_EL0(24), + PMU_PMEVTYPER_EL0(25), + PMU_PMEVTYPER_EL0(26), + PMU_PMEVTYPER_EL0(27), + PMU_PMEVTYPER_EL0(28), + PMU_PMEVTYPER_EL0(29), + PMU_PMEVTYPER_EL0(30), + /* PMCCFILTR_EL0 + * This register resets as unknown in 64bit mode while it resets as zero + * in 32bit mode. Here we choose to reset it as zero for consistency. + */ + { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111), + access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 }, /* DACR32_EL2 */ { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), @@ -1055,6 +1136,13 @@ static const struct sys_reg_desc cp14_64_regs[] = { CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ access_pmu_evcntr } +/* Macro to expand the PMEVTYPERn register */ +#define PMU_PMEVTYPER(n) \ + /* PMEVTYPERn */ \ + { Op1(0), CRn(0b1110), \ + CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ + access_pmu_evtyper } + /* * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, * depending on the way they are accessed (as a 32bit or a 64bit @@ -1091,7 +1179,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, - { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, @@ -1139,6 +1227,40 @@ static const struct sys_reg_desc cp15_regs[] = { PMU_PMEVCNTR(28), PMU_PMEVCNTR(29), PMU_PMEVCNTR(30), + /* PMEVTYPERn */ + PMU_PMEVTYPER(0), + PMU_PMEVTYPER(1), + PMU_PMEVTYPER(2), + PMU_PMEVTYPER(3), + PMU_PMEVTYPER(4), + PMU_PMEVTYPER(5), + PMU_PMEVTYPER(6), + PMU_PMEVTYPER(7), + PMU_PMEVTYPER(8), + PMU_PMEVTYPER(9), + PMU_PMEVTYPER(10), + PMU_PMEVTYPER(11), + PMU_PMEVTYPER(12), + PMU_PMEVTYPER(13), + PMU_PMEVTYPER(14), + PMU_PMEVTYPER(15), + PMU_PMEVTYPER(16), + PMU_PMEVTYPER(17), + PMU_PMEVTYPER(18), + PMU_PMEVTYPER(19), + PMU_PMEVTYPER(20), + PMU_PMEVTYPER(21), + PMU_PMEVTYPER(22), + PMU_PMEVTYPER(23), + PMU_PMEVTYPER(24), + PMU_PMEVTYPER(25), + PMU_PMEVTYPER(26), + PMU_PMEVTYPER(27), + PMU_PMEVTYPER(28), + PMU_PMEVTYPER(29), + PMU_PMEVTYPER(30), + /* PMCCFILTR */ + { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper }, }; static const struct sys_reg_desc cp15_64_regs[] = { -- cgit v1.2.3-58-ga151 From 9db52c78cd43c7fe69992cb7d57cffa991b36ced Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 8 Sep 2015 14:40:20 +0800 Subject: arm64: KVM: Add access handler for PMINTENSET and PMINTENCLR register Since the reset value of PMINTENSET and PMINTENCLR is UNKNOWN, use reset_unknown for its reset handler. Add a handler to emulate writing PMINTENSET or PMINTENCLR register. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/sys_regs.c | 32 ++++++++++++++++++++++++++++---- 2 files changed, 29 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 121182dd0947..da59f44f0c84 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -127,6 +127,7 @@ enum vcpu_sysreg { PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, PMCCFILTR_EL0, /* Cycle Count Filter Register */ PMCNTENSET_EL0, /* Count Enable Set Register */ + PMINTENSET_EL1, /* Interrupt Enable Set Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 4faf324c9be9..bfc70b2529cd 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -626,6 +626,30 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 mask = kvm_pmu_valid_counter_mask(vcpu); + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (p->is_write) { + u64 val = p->regval & mask; + + if (r->Op2 & 0x1) + /* accessing PMINTENSET_EL1 */ + vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val; + else + /* accessing PMINTENCLR_EL1 */ + vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val; + } else { + p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask; + } + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -784,10 +808,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* PMINTENSET_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), - trap_raz_wi }, + access_pminten, reset_unknown, PMINTENSET_EL1 }, /* PMINTENCLR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), - trap_raz_wi }, + access_pminten, NULL, PMINTENSET_EL1 }, /* MAIR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), @@ -1182,8 +1206,8 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, - { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, - { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, + { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, -- cgit v1.2.3-58-ga151 From 76d883c4e6401b98ea26d40c437ff62719a517ad Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 8 Sep 2015 15:03:26 +0800 Subject: arm64: KVM: Add access handler for PMOVSSET and PMOVSCLR register Since the reset value of PMOVSSET and PMOVSCLR is UNKNOWN, use reset_unknown for its reset handler. Add a handler to emulate writing PMOVSSET or PMOVSCLR register. When writing non-zero value to PMOVSSET, the counter and its interrupt is enabled, kick this vcpu to sync PMU interrupt. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/sys_regs.c | 29 ++++++++++++++++++++++++++--- include/kvm/arm_pmu.h | 2 ++ virt/kvm/arm/pmu.c | 31 +++++++++++++++++++++++++++++++ 4 files changed, 60 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index da59f44f0c84..6c61a2bda6de 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -128,6 +128,7 @@ enum vcpu_sysreg { PMCCFILTR_EL0, /* Cycle Count Filter Register */ PMCNTENSET_EL0, /* Count Enable Set Register */ PMINTENSET_EL1, /* Interrupt Enable Set Register */ + PMOVSSET_EL0, /* Overflow Flag Status Set Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index bfc70b2529cd..6a774f9b9cca 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -650,6 +650,28 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 mask = kvm_pmu_valid_counter_mask(vcpu); + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (p->is_write) { + if (r->CRm & 0x2) + /* accessing PMOVSSET_EL0 */ + kvm_pmu_overflow_set(vcpu, p->regval & mask); + else + /* accessing PMOVSCLR_EL0 */ + vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); + } else { + p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask; + } + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -857,7 +879,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmcnten, NULL, PMCNTENSET_EL0 }, /* PMOVSCLR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), - trap_raz_wi }, + access_pmovs, NULL, PMOVSSET_EL0 }, /* PMSWINC_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), trap_raz_wi }, @@ -884,7 +906,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { trap_raz_wi }, /* PMOVSSET_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), - trap_raz_wi }, + access_pmovs, reset_unknown, PMOVSSET_EL0 }, /* TPIDR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), @@ -1198,7 +1220,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten }, - { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs }, { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, @@ -1208,6 +1230,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, + { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index c57377970d4e..60061dabe881 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -43,6 +43,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); #else @@ -63,6 +64,7 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) } static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} #endif diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 591a11d1bd13..023286101fef 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -149,6 +149,37 @@ void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) } } +static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) +{ + u64 reg = 0; + + if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) + reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0); + reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0); + reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1); + reg &= kvm_pmu_valid_counter_mask(vcpu); + + return reg; +} + +/** + * kvm_pmu_overflow_set - set PMU overflow interrupt + * @vcpu: The vcpu pointer + * @val: the value guest writes to PMOVSSET register + */ +void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) +{ + u64 reg; + + if (val == 0) + return; + + vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val; + reg = kvm_pmu_overflow_status(vcpu); + if (reg != 0) + kvm_vcpu_kick(vcpu); +} + static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) { return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && -- cgit v1.2.3-58-ga151 From 7a0adc7064b88609e2917446af8789fac1d4fdd1 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 8 Sep 2015 15:49:39 +0800 Subject: arm64: KVM: Add access handler for PMSWINC register Add access handler which emulates writing and reading PMSWINC register and add support for creating software increment event. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/include/asm/kvm_perf_event.h | 2 ++ arch/arm64/kvm/sys_regs.c | 20 ++++++++++++++++++- include/kvm/arm_pmu.h | 2 ++ virt/kvm/arm/pmu.c | 34 +++++++++++++++++++++++++++++++++ 5 files changed, 58 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 6c61a2bda6de..4001e85b4818 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -129,6 +129,7 @@ enum vcpu_sysreg { PMCNTENSET_EL0, /* Count Enable Set Register */ PMINTENSET_EL1, /* Interrupt Enable Set Register */ PMOVSSET_EL0, /* Overflow Flag Status Set Register */ + PMSWINC_EL0, /* Software Increment Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h index d1c9d504f928..62fa60fbc0b3 100644 --- a/arch/arm64/include/asm/kvm_perf_event.h +++ b/arch/arm64/include/asm/kvm_perf_event.h @@ -45,6 +45,8 @@ #define ARMV8_PMU_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */ #define ARMV8_PMU_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */ +#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */ + /* * Event filters for PMUv3 */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 6a774f9b9cca..10e53796926c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -672,6 +672,23 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 mask; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (p->is_write) { + mask = kvm_pmu_valid_counter_mask(vcpu); + kvm_pmu_software_increment(vcpu, p->regval & mask); + return true; + } + + return false; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -882,7 +899,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_pmovs, NULL, PMOVSSET_EL0 }, /* PMSWINC_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), - trap_raz_wi }, + access_pmswinc, reset_unknown, PMSWINC_EL0 }, /* PMSELR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), access_pmselr, reset_unknown, PMSELR_EL0 }, @@ -1221,6 +1238,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs }, + { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc }, { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 60061dabe881..348c4c9d763a 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -44,6 +44,7 @@ u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); #else @@ -65,6 +66,7 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} #endif diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 023286101fef..9fc775ef03ec 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -180,6 +180,36 @@ void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) kvm_vcpu_kick(vcpu); } +/** + * kvm_pmu_software_increment - do software increment + * @vcpu: The vcpu pointer + * @val: the value guest writes to PMSWINC register + */ +void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) +{ + int i; + u64 type, enable, reg; + + if (val == 0) + return; + + enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0); + for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) { + if (!(val & BIT(i))) + continue; + type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i) + & ARMV8_PMU_EVTYPE_EVENT; + if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) + && (enable & BIT(i))) { + reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; + reg = lower_32_bits(reg); + vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; + if (!reg) + kvm_pmu_overflow_set(vcpu, BIT(i)); + } + } +} + static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) { return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && @@ -208,6 +238,10 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, kvm_pmu_stop_counter(vcpu, pmc); eventsel = data & ARMV8_PMU_EVTYPE_EVENT; + /* Software increment event does't need to be backed by a perf event */ + if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) + return; + memset(&attr, 0, sizeof(struct perf_event_attr)); attr.type = PERF_TYPE_RAW; attr.size = sizeof(attr); -- cgit v1.2.3-58-ga151 From 76993739cd6f5b42e881fe3332b9f8eb98cd6907 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Wed, 28 Oct 2015 12:10:30 +0800 Subject: arm64: KVM: Add helper to handle PMCR register bits According to ARMv8 spec, when writing 1 to PMCR.E, all counters are enabled by PMCNTENSET, while writing 0 to PMCR.E, all counters are disabled. When writing 1 to PMCR.P, reset all event counters, not including PMCCNTR, to zero. When writing 1 to PMCR.C, reset PMCCNTR to zero. Signed-off-by: Shannon Zhao Reviewed-by: Marc Zyngier Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_perf_event.h | 4 +++- arch/arm64/kvm/sys_regs.c | 1 + include/kvm/arm_pmu.h | 2 ++ virt/kvm/arm/pmu.c | 34 +++++++++++++++++++++++++++++++++ 4 files changed, 40 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h index 62fa60fbc0b3..6d080c07873b 100644 --- a/arch/arm64/include/asm/kvm_perf_event.h +++ b/arch/arm64/include/asm/kvm_perf_event.h @@ -29,9 +29,11 @@ #define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ #define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ #define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ +/* Determines which bit of PMCCNTR_EL0 generates an overflow */ +#define ARMV8_PMU_PMCR_LC (1 << 6) #define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ #define ARMV8_PMU_PMCR_N_MASK 0x1f -#define ARMV8_PMU_PMCR_MASK 0x3f /* Mask for writable bits */ +#define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */ /* * PMOVSR: counters overflow flag status reg diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 10e53796926c..12f36ef8caa0 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -467,6 +467,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, val &= ~ARMV8_PMU_PMCR_MASK; val |= p->regval & ARMV8_PMU_PMCR_MASK; vcpu_sys_reg(vcpu, PMCR_EL0) = val; + kvm_pmu_handle_pmcr(vcpu, val); } else { /* PMCR.P & PMCR.C are RAZ */ val = vcpu_sys_reg(vcpu, PMCR_EL0) diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 348c4c9d763a..8bc92d119713 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -45,6 +45,7 @@ void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); #else @@ -67,6 +68,7 @@ static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} #endif diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 9fc775ef03ec..cda869c609dd 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -210,6 +210,40 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) } } +/** + * kvm_pmu_handle_pmcr - handle PMCR register + * @vcpu: The vcpu pointer + * @val: the value guest writes to PMCR register + */ +void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) +{ + struct kvm_pmu *pmu = &vcpu->arch.pmu; + struct kvm_pmc *pmc; + u64 mask; + int i; + + mask = kvm_pmu_valid_counter_mask(vcpu); + if (val & ARMV8_PMU_PMCR_E) { + kvm_pmu_enable_counter(vcpu, + vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask); + } else { + kvm_pmu_disable_counter(vcpu, mask); + } + + if (val & ARMV8_PMU_PMCR_C) + kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); + + if (val & ARMV8_PMU_PMCR_P) { + for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) + kvm_pmu_set_counter_value(vcpu, i, 0); + } + + if (val & ARMV8_PMU_PMCR_LC) { + pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX]; + pmc->bitmask = 0xffffffffffffffffUL; + } +} + static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) { return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && -- cgit v1.2.3-58-ga151 From d692b8ad6ec4814ddd9a37ce5c9c9d971e741088 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Tue, 8 Sep 2015 15:15:56 +0800 Subject: arm64: KVM: Add access handler for PMUSERENR register This register resets as unknown in 64bit mode while it resets as zero in 32bit mode. Here we choose to reset it as zero for consistency. PMUSERENR_EL0 holds some bits which decide whether PMU registers can be accessed from EL0. Add some check helpers to handle the access from EL0. When these bits are zero, only reading PMUSERENR will trap to EL2 and writing PMUSERENR or reading/writing other PMU registers will trap to EL1 other than EL2 when HCR.TGE==0. To current KVM configuration (HCR.TGE==0) there is no way to get these traps. Here we write 0xf to physical PMUSERENR register on VM entry, so that it will trap PMU access from EL0 to EL2. Within the register access handler we check the real value of guest PMUSERENR register to decide whether this access is allowed. If not allowed, return false to inject UND to guest. Signed-off-by: Shannon Zhao Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/include/asm/kvm_hyp.h | 1 + arch/arm64/include/asm/kvm_perf_event.h | 9 +++ arch/arm64/kvm/hyp/switch.c | 3 + arch/arm64/kvm/sys_regs.c | 101 ++++++++++++++++++++++++++++++-- 5 files changed, 110 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4001e85b4818..a819c6debce4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -130,6 +130,7 @@ enum vcpu_sysreg { PMINTENSET_EL1, /* Interrupt Enable Set Register */ PMOVSSET_EL0, /* Overflow Flag Status Set Register */ PMSWINC_EL0, /* Software Increment Register */ + PMUSERENR_EL0, /* User Enable Register */ /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 44eaff70da6a..a46b019ebcf5 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #define __hyp_text __section(.hyp.text) notrace diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h index 6d080c07873b..c18fdebb8f66 100644 --- a/arch/arm64/include/asm/kvm_perf_event.h +++ b/arch/arm64/include/asm/kvm_perf_event.h @@ -56,4 +56,13 @@ #define ARMV8_PMU_EXCLUDE_EL0 (1 << 30) #define ARMV8_PMU_INCLUDE_EL2 (1 << 27) +/* + * PMUSERENR: user enable reg + */ +#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ +#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ +#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ +#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ +#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ + #endif diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 7b81e56111ab..437cfad5e3d8 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -82,6 +82,8 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) write_sysreg(val, hcr_el2); /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ write_sysreg(1 << 15, hstr_el2); + /* Make sure we trap PMU access from EL0 to EL2 */ + write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); __activate_traps_arch()(); } @@ -110,6 +112,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) __deactivate_traps_arch()(); write_sysreg(0, hstr_el2); write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); + write_sysreg(0, pmuserenr_el0); } static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 12f36ef8caa0..fe15c2310a65 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -453,6 +453,37 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) vcpu_sys_reg(vcpu, PMCR_EL0) = val; } +static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu) +{ + u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); + + return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu)); +} + +static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu) +{ + u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); + + return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN)) + || vcpu_mode_priv(vcpu)); +} + +static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu) +{ + u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); + + return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN)) + || vcpu_mode_priv(vcpu)); +} + +static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu) +{ + u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); + + return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN)) + || vcpu_mode_priv(vcpu)); +} + static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { @@ -461,6 +492,9 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (pmu_access_el0_disabled(vcpu)) + return false; + if (p->is_write) { /* Only update writeable bits of PMCR */ val = vcpu_sys_reg(vcpu, PMCR_EL0); @@ -484,6 +518,9 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (pmu_access_event_counter_el0_disabled(vcpu)) + return false; + if (p->is_write) vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; else @@ -504,6 +541,9 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, BUG_ON(p->is_write); + if (pmu_access_el0_disabled(vcpu)) + return false; + if (!(p->Op2 & 1)) asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid)); else @@ -538,16 +578,25 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, if (r->CRn == 9 && r->CRm == 13) { if (r->Op2 == 2) { /* PMXEVCNTR_EL0 */ + if (pmu_access_event_counter_el0_disabled(vcpu)) + return false; + idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; } else if (r->Op2 == 0) { /* PMCCNTR_EL0 */ + if (pmu_access_cycle_counter_el0_disabled(vcpu)) + return false; + idx = ARMV8_PMU_CYCLE_IDX; } else { BUG(); } } else if (r->CRn == 14 && (r->CRm & 12) == 8) { /* PMEVCNTRn_EL0 */ + if (pmu_access_event_counter_el0_disabled(vcpu)) + return false; + idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); } else { BUG(); @@ -556,10 +605,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, if (!pmu_counter_idx_valid(vcpu, idx)) return false; - if (p->is_write) + if (p->is_write) { + if (pmu_access_el0_disabled(vcpu)) + return false; + kvm_pmu_set_counter_value(vcpu, idx, p->regval); - else + } else { p->regval = kvm_pmu_get_counter_value(vcpu, idx); + } return true; } @@ -572,6 +625,9 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (pmu_access_el0_disabled(vcpu)) + return false; + if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { /* PMXEVTYPER_EL0 */ idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; @@ -608,6 +664,9 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (pmu_access_el0_disabled(vcpu)) + return false; + mask = kvm_pmu_valid_counter_mask(vcpu); if (p->is_write) { val = p->regval & mask; @@ -635,6 +694,9 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (!vcpu_mode_priv(vcpu)) + return false; + if (p->is_write) { u64 val = p->regval & mask; @@ -659,6 +721,9 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (pmu_access_el0_disabled(vcpu)) + return false; + if (p->is_write) { if (r->CRm & 0x2) /* accessing PMOVSSET_EL0 */ @@ -681,6 +746,9 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); + if (pmu_write_swinc_el0_disabled(vcpu)) + return false; + if (p->is_write) { mask = kvm_pmu_valid_counter_mask(vcpu); kvm_pmu_software_increment(vcpu, p->regval & mask); @@ -690,6 +758,26 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return false; } +static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (!kvm_arm_pmu_v3_ready(vcpu)) + return trap_raz_wi(vcpu, p, r); + + if (p->is_write) { + if (!vcpu_mode_priv(vcpu)) + return false; + + vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval + & ARMV8_PMU_USERENR_MASK; + } else { + p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0) + & ARMV8_PMU_USERENR_MASK; + } + + return true; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ @@ -919,9 +1007,12 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* PMXEVCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), access_pmu_evcntr }, - /* PMUSERENR_EL0 */ + /* PMUSERENR_EL0 + * This register resets as unknown in 64bit mode while it resets as zero + * in 32bit mode. Here we choose to reset it as zero for consistency. + */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), - trap_raz_wi }, + access_pmuserenr, reset_val, PMUSERENR_EL0, 0 }, /* PMOVSSET_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), access_pmovs, reset_unknown, PMOVSSET_EL0 }, @@ -1246,7 +1337,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, - { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, + { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, -- cgit v1.2.3-58-ga151 From b02386eb7dac7555a208d81aef2a0e5c6f0f8085 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Fri, 26 Feb 2016 19:29:19 +0800 Subject: arm64: KVM: Add PMU overflow interrupt routing When calling perf_event_create_kernel_counter to create perf_event, assign a overflow handler. Then when the perf event overflows, set the corresponding bit of guest PMOVSSET register. If this counter is enabled and its interrupt is enabled as well, kick the vcpu to sync the interrupt. On VM entry, if there is counter overflowed and interrupt level is changed, inject the interrupt with corresponding level. On VM exit, sync the interrupt level as well if it has been changed. Signed-off-by: Shannon Zhao Reviewed-by: Marc Zyngier Reviewed-by: Andrew Jones Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/arm.c | 8 ++++-- include/kvm/arm_pmu.h | 5 ++++ virt/kvm/arm/pmu.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 79 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 686350d05174..c5e959187abd 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -28,6 +28,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include "trace.h" @@ -577,6 +578,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) * non-preemptible context. */ preempt_disable(); + kvm_pmu_flush_hwstate(vcpu); kvm_timer_flush_hwstate(vcpu); kvm_vgic_flush_hwstate(vcpu); @@ -593,6 +595,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) || vcpu->arch.power_off || vcpu->arch.pause) { local_irq_enable(); + kvm_pmu_sync_hwstate(vcpu); kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); preempt_enable(); @@ -642,10 +645,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); /* - * We must sync the timer state before the vgic state so that - * the vgic can properly sample the updated state of the + * We must sync the PMU and timer state before the vgic state so + * that the vgic can properly sample the updated state of the * interrupt line. */ + kvm_pmu_sync_hwstate(vcpu); kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 8bc92d119713..9c184edb8e07 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -35,6 +35,7 @@ struct kvm_pmu { int irq_num; struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; bool ready; + bool irq_level; }; #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) @@ -44,6 +45,8 @@ u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); +void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, @@ -67,6 +70,8 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index cda869c609dd..74e858c42ae1 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -21,6 +21,7 @@ #include #include #include +#include /** * kvm_pmu_get_counter_value - get PMU counter value @@ -180,6 +181,71 @@ void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) kvm_vcpu_kick(vcpu); } +static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) +{ + struct kvm_pmu *pmu = &vcpu->arch.pmu; + bool overflow; + + if (!kvm_arm_pmu_v3_ready(vcpu)) + return; + + overflow = !!kvm_pmu_overflow_status(vcpu); + if (pmu->irq_level != overflow) { + pmu->irq_level = overflow; + kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, + pmu->irq_num, overflow); + } +} + +/** + * kvm_pmu_flush_hwstate - flush pmu state to cpu + * @vcpu: The vcpu pointer + * + * Check if the PMU has overflowed while we were running in the host, and inject + * an interrupt if that was the case. + */ +void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) +{ + kvm_pmu_update_state(vcpu); +} + +/** + * kvm_pmu_sync_hwstate - sync pmu state from cpu + * @vcpu: The vcpu pointer + * + * Check if the PMU has overflowed while we were running in the guest, and + * inject an interrupt if that was the case. + */ +void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) +{ + kvm_pmu_update_state(vcpu); +} + +static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) +{ + struct kvm_pmu *pmu; + struct kvm_vcpu_arch *vcpu_arch; + + pmc -= pmc->idx; + pmu = container_of(pmc, struct kvm_pmu, pmc[0]); + vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu); + return container_of(vcpu_arch, struct kvm_vcpu, arch); +} + +/** + * When perf event overflows, call kvm_pmu_overflow_set to set overflow status. + */ +static void kvm_pmu_perf_overflow(struct perf_event *perf_event, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct kvm_pmc *pmc = perf_event->overflow_handler_context; + struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); + int idx = pmc->idx; + + kvm_pmu_overflow_set(vcpu, BIT(idx)); +} + /** * kvm_pmu_software_increment - do software increment * @vcpu: The vcpu pointer @@ -291,7 +357,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, /* The initial sample period (overflow count) of an event. */ attr.sample_period = (-counter) & pmc->bitmask; - event = perf_event_create_kernel_counter(&attr, -1, current, NULL, pmc); + event = perf_event_create_kernel_counter(&attr, -1, current, + kvm_pmu_perf_overflow, pmc); if (IS_ERR(event)) { pr_err_once("kvm: pmu event creation failed %ld\n", PTR_ERR(event)); -- cgit v1.2.3-58-ga151 From 2aa36e9840d71710f06b3c29634f044fde8bcbe5 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Fri, 11 Sep 2015 11:30:22 +0800 Subject: arm64: KVM: Reset PMU state when resetting vcpu When resetting vcpu, it needs to reset the PMU state to initial status. Signed-off-by: Shannon Zhao Reviewed-by: Marc Zyngier Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/kvm/reset.c | 3 +++ include/kvm/arm_pmu.h | 2 ++ virt/kvm/arm/pmu.c | 17 +++++++++++++++++ 3 files changed, 22 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f34745cb3d23..dfbce781d284 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -120,6 +120,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset system registers */ kvm_reset_sys_regs(vcpu); + /* Reset PMU */ + kvm_pmu_vcpu_reset(vcpu); + /* Reset timer */ return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); } diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 9c184edb8e07..b4993eb76aa1 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -42,6 +42,7 @@ struct kvm_pmu { u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); +void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); @@ -67,6 +68,7 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { return 0; } +static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 74e858c42ae1..1dbbc2c51559 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -84,6 +84,23 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) } } +/** + * kvm_pmu_vcpu_reset - reset pmu state for cpu + * @vcpu: The vcpu pointer + * + */ +void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) +{ + int i; + struct kvm_pmu *pmu = &vcpu->arch.pmu; + + for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); + pmu->pmc[i].idx = i; + pmu->pmc[i].bitmask = 0xffffffffUL; + } +} + u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; -- cgit v1.2.3-58-ga151 From 5f0a714a2b63c25ffba5d832773f3ca4f0d02e21 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Fri, 11 Sep 2015 15:18:05 +0800 Subject: arm64: KVM: Free perf event of PMU when destroying vcpu When KVM frees VCPU, it needs to free the perf_event of PMU. Signed-off-by: Shannon Zhao Reviewed-by: Marc Zyngier Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm/kvm/arm.c | 1 + include/kvm/arm_pmu.h | 2 ++ virt/kvm/arm/pmu.c | 21 +++++++++++++++++++++ 3 files changed, 24 insertions(+) (limited to 'arch') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index c5e959187abd..9d133df2da53 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -266,6 +266,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) kvm_mmu_free_memory_caches(vcpu); kvm_timer_vcpu_terminate(vcpu); kvm_vgic_vcpu_destroy(vcpu); + kvm_pmu_vcpu_destroy(vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu); } diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index b4993eb76aa1..9f87d717ef84 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -43,6 +43,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); +void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); @@ -69,6 +70,7 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) return 0; } static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 1dbbc2c51559..9b83857da195 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -101,6 +101,27 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) } } +/** + * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu + * @vcpu: The vcpu pointer + * + */ +void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + int i; + struct kvm_pmu *pmu = &vcpu->arch.pmu; + + for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + struct kvm_pmc *pmc = &pmu->pmc[i]; + + if (pmc->perf_event) { + perf_event_disable(pmc->perf_event); + perf_event_release_kernel(pmc->perf_event); + pmc->perf_event = NULL; + } + } +} + u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; -- cgit v1.2.3-58-ga151 From 808e738142e7086ef793ebf9797099c392894e65 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Mon, 11 Jan 2016 22:46:15 +0800 Subject: arm64: KVM: Add a new feature bit for PMUv3 To support guest PMUv3, use one bit of the VCPU INIT feature array. Initialize the PMU when initialzing the vcpu with that bit and PMU overflow interrupt set. Signed-off-by: Shannon Zhao Acked-by: Peter Maydell Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- Documentation/virtual/kvm/api.txt | 2 ++ arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/include/uapi/asm/kvm.h | 1 + arch/arm64/kvm/reset.c | 3 +++ include/kvm/arm_pmu.h | 2 ++ include/uapi/linux/kvm.h | 1 + virt/kvm/arm/pmu.c | 10 ++++++++++ 7 files changed, 20 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 07e4cdf02407..9684f8dc6bb2 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -2577,6 +2577,8 @@ Possible features: Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU. Depends on KVM_CAP_ARM_PSCI_0_2. + - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. + Depends on KVM_CAP_ARM_PMU_V3. 4.83 KVM_ARM_PREFERRED_TARGET diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a819c6debce4..b02ef0828f22 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -42,7 +42,7 @@ #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS -#define KVM_VCPU_MAX_FEATURES 3 +#define KVM_VCPU_MAX_FEATURES 4 int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 2d4ca4bb0dd3..6aedbe314432 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -94,6 +94,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ #define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */ +#define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */ struct kvm_vcpu_init { __u32 target; diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index dfbce781d284..cf4f28a7a514 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -77,6 +77,9 @@ int kvm_arch_dev_ioctl_check_extension(long ext) case KVM_CAP_GUEST_DEBUG_HW_WPS: r = get_num_wrps(); break; + case KVM_CAP_ARM_PMU_V3: + r = kvm_arm_support_pmu_v3(); + break; case KVM_CAP_SET_GUEST_DEBUG: r = 1; break; diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 9f87d717ef84..ee62497d46f7 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -53,6 +53,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); +bool kvm_arm_support_pmu_v3(void); #else struct kvm_pmu { }; @@ -80,6 +81,7 @@ static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} +static inline bool kvm_arm_support_pmu_v3(void) { return false; } #endif #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 9da905157cee..dc16d3084d4a 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -850,6 +850,7 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_IOEVENTFD_ANY_LENGTH 122 #define KVM_CAP_HYPERV_SYNIC 123 #define KVM_CAP_S390_RI 124 +#define KVM_CAP_ARM_PMU_V3 125 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 9b83857da195..6e28f4f86cc6 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -405,3 +405,13 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, pmc->perf_event = event; } + +bool kvm_arm_support_pmu_v3(void) +{ + /* + * Check if HW_PERF_EVENTS are supported by checking the number of + * hardware performance counters. This could ensure the presence of + * a physical PMU and CONFIG_PERF_EVENT is selected. + */ + return (perf_num_counters() > 0); +} -- cgit v1.2.3-58-ga151 From f577f6c2a6a5ccabe98061f256a1e2ff468d5e93 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Mon, 11 Jan 2016 20:56:17 +0800 Subject: arm64: KVM: Introduce per-vcpu kvm device controls In some cases it needs to get/set attributes specific to a vcpu and so needs something else than ONE_REG. Let's copy the KVM_DEVICE approach, and define the respective ioctls for the vcpu file descriptor. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Acked-by: Peter Maydell Signed-off-by: Marc Zyngier --- Documentation/virtual/kvm/api.txt | 10 +++--- Documentation/virtual/kvm/devices/vcpu.txt | 8 +++++ arch/arm/kvm/arm.c | 55 ++++++++++++++++++++++++++++++ arch/arm64/kvm/reset.c | 1 + include/uapi/linux/kvm.h | 1 + 5 files changed, 71 insertions(+), 4 deletions(-) create mode 100644 Documentation/virtual/kvm/devices/vcpu.txt (limited to 'arch') diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 9684f8dc6bb2..cb2ef0bcdcb5 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -2507,8 +2507,9 @@ struct kvm_create_device { 4.80 KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR -Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device -Type: device ioctl, vm ioctl +Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, + KVM_CAP_VCPU_ATTRIBUTES for vcpu device +Type: device ioctl, vm ioctl, vcpu ioctl Parameters: struct kvm_device_attr Returns: 0 on success, -1 on error Errors: @@ -2533,8 +2534,9 @@ struct kvm_device_attr { 4.81 KVM_HAS_DEVICE_ATTR -Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device -Type: device ioctl, vm ioctl +Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, + KVM_CAP_VCPU_ATTRIBUTES for vcpu device +Type: device ioctl, vm ioctl, vcpu ioctl Parameters: struct kvm_device_attr Returns: 0 on success, -1 on error Errors: diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virtual/kvm/devices/vcpu.txt new file mode 100644 index 000000000000..3cc59c5e44ce --- /dev/null +++ b/Documentation/virtual/kvm/devices/vcpu.txt @@ -0,0 +1,8 @@ +Generic vcpu interface +==================================== + +The virtual cpu "device" also accepts the ioctls KVM_SET_DEVICE_ATTR, +KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same struct +kvm_device_attr as other devices, but targets VCPU-wide settings and controls. + +The groups and attributes per virtual cpu, if any, are architecture specific. diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 9d133df2da53..166232356291 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -828,11 +828,51 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, return 0; } +static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + default: + break; + } + + return ret; +} + +static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + default: + break; + } + + return ret; +} + +static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + default: + break; + } + + return ret; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; + struct kvm_device_attr attr; switch (ioctl) { case KVM_ARM_VCPU_INIT: { @@ -875,6 +915,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return -E2BIG; return kvm_arm_copy_reg_indices(vcpu, user_list->reg); } + case KVM_SET_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + return kvm_arm_vcpu_set_attr(vcpu, &attr); + } + case KVM_GET_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + return kvm_arm_vcpu_get_attr(vcpu, &attr); + } + case KVM_HAS_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + return kvm_arm_vcpu_has_attr(vcpu, &attr); + } default: return -EINVAL; } diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index cf4f28a7a514..9677bf069bcc 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -81,6 +81,7 @@ int kvm_arch_dev_ioctl_check_extension(long ext) r = kvm_arm_support_pmu_v3(); break; case KVM_CAP_SET_GUEST_DEBUG: + case KVM_CAP_VCPU_ATTRIBUTES: r = 1; break; default: diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index dc16d3084d4a..50f44a229212 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -851,6 +851,7 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_HYPERV_SYNIC 123 #define KVM_CAP_S390_RI 124 #define KVM_CAP_ARM_PMU_V3 125 +#define KVM_CAP_VCPU_ATTRIBUTES 126 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-58-ga151 From bb0c70bcca6ba3c84afc2da7426f3b923bbe6825 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Mon, 11 Jan 2016 21:35:32 +0800 Subject: arm64: KVM: Add a new vcpu device control group for PMUv3 To configure the virtual PMUv3 overflow interrupt number, we use the vcpu kvm_device ioctl, encapsulating the KVM_ARM_VCPU_PMU_V3_IRQ attribute within the KVM_ARM_VCPU_PMU_V3_CTRL group. After configuring the PMUv3, call the vcpu ioctl with attribute KVM_ARM_VCPU_PMU_V3_INIT to initialize the PMUv3. Signed-off-by: Shannon Zhao Acked-by: Peter Maydell Reviewed-by: Andrew Jones Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- Documentation/virtual/kvm/devices/vcpu.txt | 25 +++++++ arch/arm/include/asm/kvm_host.h | 15 ++++ arch/arm/kvm/arm.c | 3 + arch/arm64/include/asm/kvm_host.h | 6 ++ arch/arm64/include/uapi/asm/kvm.h | 5 ++ arch/arm64/kvm/guest.c | 51 +++++++++++++ include/kvm/arm_pmu.h | 23 ++++++ virt/kvm/arm/pmu.c | 112 +++++++++++++++++++++++++++++ 8 files changed, 240 insertions(+) (limited to 'arch') diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virtual/kvm/devices/vcpu.txt index 3cc59c5e44ce..c04165868faf 100644 --- a/Documentation/virtual/kvm/devices/vcpu.txt +++ b/Documentation/virtual/kvm/devices/vcpu.txt @@ -6,3 +6,28 @@ KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same struct kvm_device_attr as other devices, but targets VCPU-wide settings and controls. The groups and attributes per virtual cpu, if any, are architecture specific. + +1. GROUP: KVM_ARM_VCPU_PMU_V3_CTRL +Architectures: ARM64 + +1.1. ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_IRQ +Parameters: in kvm_device_attr.addr the address for PMU overflow interrupt is a + pointer to an int +Returns: -EBUSY: The PMU overflow interrupt is already set + -ENXIO: The overflow interrupt not set when attempting to get it + -ENODEV: PMUv3 not supported + -EINVAL: Invalid PMU overflow interrupt number supplied + +A value describing the PMUv3 (Performance Monitor Unit v3) overflow interrupt +number for this vcpu. This interrupt could be a PPI or SPI, but the interrupt +type must be same for each vcpu. As a PPI, the interrupt number is the same for +all vcpus, while as an SPI it must be a separate number per vcpu. + +1.2 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_INIT +Parameters: no additional parameter in kvm_device_attr.addr +Returns: -ENODEV: PMUv3 not supported + -ENXIO: PMUv3 not properly configured as required prior to calling this + attribute + -EBUSY: PMUv3 already initialized + +Request the initialization of the PMUv3. diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 19e9aba85463..385070180c25 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -287,5 +287,20 @@ static inline void kvm_arm_init_debug(void) {} static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} +static inline int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 166232356291..75c7fed5d14c 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -835,6 +835,7 @@ static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, switch (attr->group) { default: + ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr); break; } @@ -848,6 +849,7 @@ static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, switch (attr->group) { default: + ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr); break; } @@ -861,6 +863,7 @@ static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, switch (attr->group) { default: + ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr); break; } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b02ef0828f22..71fa6fe9d54a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -361,6 +361,12 @@ void kvm_arm_init_debug(void); void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); +int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); /* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 6aedbe314432..f209ea151dca 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -205,6 +205,11 @@ struct kvm_arch_memory_slot { #define KVM_DEV_ARM_VGIC_GRP_CTRL 4 #define KVM_DEV_ARM_VGIC_CTRL_INIT 0 +/* Device Control API on vcpu fd */ +#define KVM_ARM_VCPU_PMU_V3_CTRL 0 +#define KVM_ARM_VCPU_PMU_V3_IRQ 0 +#define KVM_ARM_VCPU_PMU_V3_INIT 1 + /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 #define KVM_ARM_IRQ_TYPE_MASK 0xff diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index fcb778899a38..dbe45c364bbb 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -380,3 +380,54 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, } return 0; } + +int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret; + + switch (attr->group) { + case KVM_ARM_VCPU_PMU_V3_CTRL: + ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); + break; + default: + ret = -ENXIO; + break; + } + + return ret; +} + +int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret; + + switch (attr->group) { + case KVM_ARM_VCPU_PMU_V3_CTRL: + ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); + break; + default: + ret = -ENXIO; + break; + } + + return ret; +} + +int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret; + + switch (attr->group) { + case KVM_ARM_VCPU_PMU_V3_CTRL: + ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); + break; + default: + ret = -ENXIO; + break; + } + + return ret; +} diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index ee62497d46f7..fe389ac31489 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -39,6 +39,7 @@ struct kvm_pmu { }; #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) +#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); @@ -54,11 +55,18 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); bool kvm_arm_support_pmu_v3(void); +int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); #else struct kvm_pmu { }; #define kvm_arm_pmu_v3_ready(v) (false) +#define kvm_arm_pmu_irq_initialized(v) (false) static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) { @@ -82,6 +90,21 @@ static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) {} static inline bool kvm_arm_support_pmu_v3(void) { return false; } +static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} #endif #endif diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 6e28f4f86cc6..b5754c6c5508 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -415,3 +416,114 @@ bool kvm_arm_support_pmu_v3(void) */ return (perf_num_counters() > 0); } + +static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) +{ + if (!kvm_arm_support_pmu_v3()) + return -ENODEV; + + if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) || + !kvm_arm_pmu_irq_initialized(vcpu)) + return -ENXIO; + + if (kvm_arm_pmu_v3_ready(vcpu)) + return -EBUSY; + + kvm_pmu_vcpu_reset(vcpu); + vcpu->arch.pmu.ready = true; + + return 0; +} + +static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi) +{ + int i; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!kvm_arm_pmu_irq_initialized(vcpu)) + continue; + + if (is_ppi) { + if (vcpu->arch.pmu.irq_num != irq) + return false; + } else { + if (vcpu->arch.pmu.irq_num == irq) + return false; + } + } + + return true; +} + + +int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_ARM_VCPU_PMU_V3_IRQ: { + int __user *uaddr = (int __user *)(long)attr->addr; + int irq; + + if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) + return -ENODEV; + + if (get_user(irq, uaddr)) + return -EFAULT; + + /* + * The PMU overflow interrupt could be a PPI or SPI, but for one + * VM the interrupt type must be same for each vcpu. As a PPI, + * the interrupt number is the same for all vcpus, while as an + * SPI it must be a separate number per vcpu. + */ + if (irq < VGIC_NR_SGIS || irq >= vcpu->kvm->arch.vgic.nr_irqs || + !irq_is_valid(vcpu->kvm, irq, irq < VGIC_NR_PRIVATE_IRQS)) + return -EINVAL; + + if (kvm_arm_pmu_irq_initialized(vcpu)) + return -EBUSY; + + kvm_debug("Set kvm ARM PMU irq: %d\n", irq); + vcpu->arch.pmu.irq_num = irq; + return 0; + } + case KVM_ARM_VCPU_PMU_V3_INIT: + return kvm_arm_pmu_v3_init(vcpu); + } + + return -ENXIO; +} + +int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_ARM_VCPU_PMU_V3_IRQ: { + int __user *uaddr = (int __user *)(long)attr->addr; + int irq; + + if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) + return -ENODEV; + + if (!kvm_arm_pmu_irq_initialized(vcpu)) + return -ENXIO; + + irq = vcpu->arch.pmu.irq_num; + return put_user(irq, uaddr); + } + } + + return -ENXIO; +} + +int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_ARM_VCPU_PMU_V3_IRQ: + case KVM_ARM_VCPU_PMU_V3_INIT: + if (kvm_arm_support_pmu_v3() && + test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) + return 0; + } + + return -ENXIO; +} -- cgit v1.2.3-58-ga151 From 623eefa8d04c6c3df69a0630989f10b3762b3b00 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Jan 2016 18:27:04 +0000 Subject: arm64: KVM: Switch the sys_reg search to be a binary search Our 64bit sys_reg table is about 90 entries long (so far, and the PMU support is likely to increase this). This means that on average, it takes 45 comparaisons to find the right entry (and actually the full 90 if we have to search the invariant table). Not the most efficient thing. Specially when you think that this table is already sorted. Switching to a binary search effectively reduces the search to about 7 comparaisons. Slightly better! As an added bonus, the comparison is done by comparing all the fields at once, instead of one at a time. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/sys_regs.c | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index fe15c2310a65..61ba59104845 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -20,6 +20,7 @@ * along with this program. If not, see . */ +#include #include #include #include @@ -1453,29 +1454,32 @@ static const struct sys_reg_desc *get_target_table(unsigned target, } } +#define reg_to_match_value(x) \ + ({ \ + unsigned long val; \ + val = (x)->Op0 << 14; \ + val |= (x)->Op1 << 11; \ + val |= (x)->CRn << 7; \ + val |= (x)->CRm << 3; \ + val |= (x)->Op2; \ + val; \ + }) + +static int match_sys_reg(const void *key, const void *elt) +{ + const unsigned long pval = (unsigned long)key; + const struct sys_reg_desc *r = elt; + + return pval - reg_to_match_value(r); +} + static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[], unsigned int num) { - unsigned int i; - - for (i = 0; i < num; i++) { - const struct sys_reg_desc *r = &table[i]; + unsigned long pval = reg_to_match_value(params); - if (params->Op0 != r->Op0) - continue; - if (params->Op1 != r->Op1) - continue; - if (params->CRn != r->CRn) - continue; - if (params->CRm != r->CRm) - continue; - if (params->Op2 != r->Op2) - continue; - - return r; - } - return NULL; + return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); } int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) -- cgit v1.2.3-58-ga151 From 504bfce18a76c9fb6ad5a5f894750f4fca6cde39 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Jan 2016 15:37:03 +0000 Subject: ARM: KVM: Properly sort the invariant table Not having the invariant table properly sorted is an oddity, and may get in the way of future optimisations. Let's fix it. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/coproc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index e3e86c4cfed2..9aa462ec9c56 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -645,6 +645,9 @@ static struct coproc_reg invariant_cp15[] = { { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, + { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, + { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, + { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, @@ -660,9 +663,6 @@ static struct coproc_reg invariant_cp15[] = { { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, - - { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, - { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, }; /* -- cgit v1.2.3-58-ga151 From b613f59dd2628937860f37dbfbe315d9edcb1668 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Jan 2016 15:34:35 +0000 Subject: ARM: KVM: Enforce sorting of all CP tables Since we're obviously terrible at sorting the CP tables, make sure we're going to do it properly (or fail to boot). arm64 has had the same mechanism for a while, and nobody ever broke it... Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/coproc.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 9aa462ec9c56..40d6db1ca4a8 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -381,17 +381,26 @@ static const struct coproc_reg cp15_regs[] = { { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, }; +static int check_reg_table(const struct coproc_reg *table, unsigned int n) +{ + unsigned int i; + + for (i = 1; i < n; i++) { + if (cmp_reg(&table[i-1], &table[i]) >= 0) { + kvm_err("reg table %p out of order (%d)\n", table, i - 1); + return 1; + } + } + + return 0; +} + /* Target specific emulation tables */ static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) { - unsigned int i; - - for (i = 1; i < table->num; i++) - BUG_ON(cmp_reg(&table->table[i-1], - &table->table[i]) >= 0); - + BUG_ON(check_reg_table(table->table, table->num)); target_tables[table->target] = table; } @@ -1210,8 +1219,8 @@ void kvm_coproc_table_init(void) unsigned int i; /* Make sure tables are unique and in order. */ - for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) - BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); + BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); + BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15))); /* We abuse the reset function to overwrite the table itself. */ for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) -- cgit v1.2.3-58-ga151 From f1d67d4ac79aef6de709d7a21b35851685a1d3ee Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Jan 2016 17:04:52 +0000 Subject: ARM: KVM: Rename struct coproc_reg::is_64 to is_64bit As we're going to play some tricks on the struct coproc_reg, make sure its 64bit indicator field matches that of coproc_params. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/coproc.c | 4 ++-- arch/arm/kvm/coproc.h | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 40d6db1ca4a8..bb0690271dd2 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -423,7 +423,7 @@ static const struct coproc_reg *find_reg(const struct coproc_params *params, for (i = 0; i < num; i++) { const struct coproc_reg *r = &table[i]; - if (params->is_64bit != r->is_64) + if (params->is_64bit != r->is_64bit) continue; if (params->CRn != r->CRn) continue; @@ -1105,7 +1105,7 @@ static int write_demux_regids(u64 __user *uindices) static u64 cp15_to_index(const struct coproc_reg *reg) { u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); - if (reg->is_64) { + if (reg->is_64bit) { val |= KVM_REG_SIZE_U64; val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); /* diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 27351323871d..eef1759c2b65 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h @@ -37,7 +37,7 @@ struct coproc_reg { unsigned long Op1; unsigned long Op2; - bool is_64; + bool is_64bit; /* Trapped access from guest, if non-NULL. */ bool (*access)(struct kvm_vcpu *, @@ -141,7 +141,7 @@ static inline int cmp_reg(const struct coproc_reg *i1, return i1->Op1 - i2->Op1; if (i1->Op2 != i2->Op2) return i1->Op2 - i2->Op2; - return i2->is_64 - i1->is_64; + return i2->is_64bit - i1->is_64bit; } @@ -150,8 +150,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, #define CRm64(_x) .CRn = _x, .CRm = 0 #define Op1(_x) .Op1 = _x #define Op2(_x) .Op2 = _x -#define is64 .is_64 = true -#define is32 .is_64 = false +#define is64 .is_64bit = true +#define is32 .is_64bit = false bool access_vm_reg(struct kvm_vcpu *vcpu, const struct coproc_params *p, -- cgit v1.2.3-58-ga151 From d06a5440a02cf8ff67b1cd4ee75a30b1b1c66cff Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Jan 2016 17:34:22 +0000 Subject: ARM: KVM: Switch the CP reg search to be a binary search Doing a linear search is a bit silly when we can do a binary search. Not that we trap that so many things that it has become a burden yet, but it makes sense to align it with the arm64 code. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/coproc.c | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index bb0690271dd2..1bb2b79c01ff 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -16,6 +16,8 @@ * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + +#include #include #include #include @@ -414,29 +416,32 @@ static const struct coproc_reg *get_target_table(unsigned target, size_t *num) return table->table; } +#define reg_to_match_value(x) \ + ({ \ + unsigned long val; \ + val = (x)->CRn << 11; \ + val |= (x)->CRm << 7; \ + val |= (x)->Op1 << 4; \ + val |= (x)->Op2 << 1; \ + val |= !(x)->is_64bit; \ + val; \ + }) + +static int match_reg(const void *key, const void *elt) +{ + const unsigned long pval = (unsigned long)key; + const struct coproc_reg *r = elt; + + return pval - reg_to_match_value(r); +} + static const struct coproc_reg *find_reg(const struct coproc_params *params, const struct coproc_reg table[], unsigned int num) { - unsigned int i; - - for (i = 0; i < num; i++) { - const struct coproc_reg *r = &table[i]; + unsigned long pval = reg_to_match_value(params); - if (params->is_64bit != r->is_64bit) - continue; - if (params->CRn != r->CRn) - continue; - if (params->CRm != r->CRm) - continue; - if (params->Op1 != r->Op1) - continue; - if (params->Op2 != r->Op2) - continue; - - return r; - } - return NULL; + return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg); } static int emulate_cp15(struct kvm_vcpu *vcpu, -- cgit v1.2.3-58-ga151 From 9b4a3004439d5be680faf41f4267968ca11bb9f6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 29 Jan 2016 19:04:48 +0000 Subject: KVM: arm/arm64: timer: Add active state caching Programming the active state in the (re)distributor can be an expensive operation so it makes some sense to try and reduce the number of accesses as much as possible. So far, we program the active state on each VM entry, but there is some opportunity to do less. An obvious solution is to cache the active state in memory, and only program it in the HW when conditions change. But because the HW can also change things under our feet (the active state can transition from 1 to 0 when the guest does an EOI), some precautions have to be taken, which amount to only caching an "inactive" state, and always programing it otherwise. With this in place, we observe a reduction of around 700 cycles on a 2GHz GICv2 platform for a NULL hypercall. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/arm.c | 1 + include/kvm/arm_arch_timer.h | 5 +++++ virt/kvm/arm/arch_timer.c | 31 +++++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+) (limited to 'arch') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 75c7fed5d14c..9ca653e34d8c 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -322,6 +322,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->cpu = -1; kvm_arm_set_running_vcpu(NULL); + kvm_timer_vcpu_put(vcpu); } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 1800227af9d6..b651aed9dc6b 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -55,6 +55,9 @@ struct arch_timer_cpu { /* VGIC mapping */ struct irq_phys_map *map; + + /* Active IRQ state caching */ + bool active_cleared_last; }; int kvm_timer_hyp_init(void); @@ -74,4 +77,6 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu); void kvm_timer_schedule(struct kvm_vcpu *vcpu); void kvm_timer_unschedule(struct kvm_vcpu *vcpu); +void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); + #endif diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index ea6064696fe4..a9ad4fe3f68f 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -34,6 +34,11 @@ static struct timecounter *timecounter; static struct workqueue_struct *wqueue; static unsigned int host_vtimer_irq; +void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) +{ + vcpu->arch.timer_cpu.active_cleared_last = false; +} + static cycle_t kvm_phys_timer_read(void) { return timecounter->cc->read(timecounter->cc); @@ -130,6 +135,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level) BUG_ON(!vgic_initialized(vcpu->kvm)); + timer->active_cleared_last = false; timer->irq.level = new_level; trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->map->virt_irq, timer->irq.level); @@ -245,10 +251,35 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) else phys_active = false; + /* + * We want to avoid hitting the (re)distributor as much as + * possible, as this is a potentially expensive MMIO access + * (not to mention locks in the irq layer), and a solution for + * this is to cache the "active" state in memory. + * + * Things to consider: we cannot cache an "active set" state, + * because the HW can change this behind our back (it becomes + * "clear" in the HW). We must then restrict the caching to + * the "clear" state. + * + * The cache is invalidated on: + * - vcpu put, indicating that the HW cannot be trusted to be + * in a sane state on the next vcpu load, + * - any change in the interrupt state + * + * Usage conditions: + * - cached value is "active clear" + * - value to be programmed is "active clear" + */ + if (timer->active_cleared_last && !phys_active) + return; + ret = irq_set_irqchip_state(timer->map->irq, IRQCHIP_STATE_ACTIVE, phys_active); WARN_ON(ret); + + timer->active_cleared_last = !phys_active; } /** -- cgit v1.2.3-58-ga151 From fe26e52712ccab6648df17ecc029a68a69a01a85 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Tue, 1 Mar 2016 17:54:38 +1100 Subject: KVM: PPC: Add @page_shift to kvmppc_spapr_tce_table At the moment the kvmppc_spapr_tce_table struct can only describe 4GB windows and handle fixed size (4K) pages. Dynamic DMA windows support more so these limits need to be extended. This replaces window_size (in bytes, 4GB max) with page_shift (32bit) and size (64bit, in pages). This should cause no behavioural change as this is changing the internal structures only - the user interface still only allows one to create a 32-bit table with 4KiB pages at this stage. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_host.h | 3 ++- arch/powerpc/kvm/book3s_64_vio.c | 22 +++++++++++----------- arch/powerpc/kvm/book3s_64_vio_hv.c | 21 ++++++++++----------- 3 files changed, 23 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index ffdbc2dc18f9..edf66f770498 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -182,8 +182,9 @@ struct kvmppc_spapr_tce_table { struct list_head list; struct kvm *kvm; u64 liobn; - u32 window_size; struct rcu_head rcu; + u32 page_shift; + u64 size; /* window size in pages */ struct page *pages[0]; }; diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 94c8e7e9b58c..61cbc449d0a8 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -40,10 +40,9 @@ #include #include -static unsigned long kvmppc_tce_pages(unsigned long window_size) +static unsigned long kvmppc_tce_pages(unsigned long iommu_pages) { - return ALIGN((window_size >> IOMMU_PAGE_SHIFT_4K) - * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; + return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; } static unsigned long kvmppc_stt_pages(unsigned long tce_pages) @@ -95,8 +94,7 @@ static void release_spapr_tce_table(struct rcu_head *head) { struct kvmppc_spapr_tce_table *stt = container_of(head, struct kvmppc_spapr_tce_table, rcu); - int i; - unsigned long npages = kvmppc_tce_pages(stt->window_size); + unsigned long i, npages = kvmppc_tce_pages(stt->size); for (i = 0; i < npages; i++) __free_page(stt->pages[i]); @@ -109,7 +107,7 @@ static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data; struct page *page; - if (vmf->pgoff >= kvmppc_tce_pages(stt->window_size)) + if (vmf->pgoff >= kvmppc_tce_pages(stt->size)) return VM_FAULT_SIGBUS; page = stt->pages[vmf->pgoff]; @@ -137,7 +135,7 @@ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) kvm_put_kvm(stt->kvm); kvmppc_account_memlimit( - kvmppc_stt_pages(kvmppc_tce_pages(stt->window_size)), false); + kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false); call_rcu(&stt->rcu, release_spapr_tce_table); return 0; @@ -152,7 +150,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce *args) { struct kvmppc_spapr_tce_table *stt = NULL; - unsigned long npages; + unsigned long npages, size; int ret = -ENOMEM; int i; @@ -162,7 +160,8 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, return -EBUSY; } - npages = kvmppc_tce_pages(args->window_size); + size = args->window_size >> IOMMU_PAGE_SHIFT_4K; + npages = kvmppc_tce_pages(size); ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); if (ret) { stt = NULL; @@ -175,7 +174,8 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, goto fail; stt->liobn = args->liobn; - stt->window_size = args->window_size; + stt->page_shift = IOMMU_PAGE_SHIFT_4K; + stt->size = size; stt->kvm = kvm; for (i = 0; i < npages; i++) { @@ -218,7 +218,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, if (!stt) return H_TOO_HARD; - entry = ioba >> IOMMU_PAGE_SHIFT_4K; + entry = ioba >> stt->page_shift; /* * SPAPR spec says that the maximum size of the list is 512 TCEs * so the whole table fits in 4K page diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 0486aa2329ee..c786a58c28a7 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -72,11 +72,10 @@ EXPORT_SYMBOL_GPL(kvmppc_find_table); long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, unsigned long ioba, unsigned long npages) { - unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1; - unsigned long idx = ioba >> IOMMU_PAGE_SHIFT_4K; - unsigned long size = stt->window_size >> IOMMU_PAGE_SHIFT_4K; + unsigned long mask = (1ULL << stt->page_shift) - 1; + unsigned long idx = ioba >> stt->page_shift; - if ((ioba & mask) || (idx + npages > size) || (idx + npages < idx)) + if ((ioba & mask) || (idx + npages > stt->size) || (idx + npages < idx)) return H_PARAMETER; return H_SUCCESS; @@ -96,8 +95,8 @@ EXPORT_SYMBOL_GPL(kvmppc_ioba_validate); */ long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) { - unsigned long mask = - ~(IOMMU_PAGE_MASK_4K | TCE_PCI_WRITE | TCE_PCI_READ); + unsigned long page_mask = ~((1ULL << stt->page_shift) - 1); + unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ); if (tce & mask) return H_PARAMETER; @@ -198,7 +197,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, if (ret != H_SUCCESS) return ret; - kvmppc_tce_put(stt, ioba >> IOMMU_PAGE_SHIFT_4K, tce); + kvmppc_tce_put(stt, ioba >> stt->page_shift, tce); return H_SUCCESS; } @@ -244,7 +243,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, if (!stt) return H_TOO_HARD; - entry = ioba >> IOMMU_PAGE_SHIFT_4K; + entry = ioba >> stt->page_shift; /* * The spec says that the maximum size of the list is 512 TCEs * so the whole table addressed resides in 4K page @@ -313,8 +312,8 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) return H_PARAMETER; - for (i = 0; i < npages; ++i, ioba += IOMMU_PAGE_SIZE_4K) - kvmppc_tce_put(stt, ioba >> IOMMU_PAGE_SHIFT_4K, tce_value); + for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) + kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); return H_SUCCESS; } @@ -336,7 +335,7 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, if (ret != H_SUCCESS) return ret; - idx = ioba >> IOMMU_PAGE_SHIFT_4K; + idx = ioba >> stt->page_shift; page = stt->pages[idx / TCES_PER_PAGE]; tbl = (u64 *)page_address(page); -- cgit v1.2.3-58-ga151 From 14f853f1b257b69cf0213ad8c49c01038ccf7ef9 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Tue, 1 Mar 2016 17:54:39 +1100 Subject: KVM: PPC: Add @offset to kvmppc_spapr_tce_table This enables userspace view of TCE tables to start from non-zero offset on a bus. This will be used for huge DMA windows. This only changes the internal structure, the user interface needs to change in order to use an offset. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kvm/book3s_64_vio_hv.c | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index edf66f770498..2e7c79101652 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -184,6 +184,7 @@ struct kvmppc_spapr_tce_table { u64 liobn; struct rcu_head rcu; u32 page_shift; + u64 offset; /* in pages */ u64 size; /* window size in pages */ struct page *pages[0]; }; diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index c786a58c28a7..44be73e6aa26 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -75,7 +75,9 @@ long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, unsigned long mask = (1ULL << stt->page_shift) - 1; unsigned long idx = ioba >> stt->page_shift; - if ((ioba & mask) || (idx + npages > stt->size) || (idx + npages < idx)) + if ((ioba & mask) || (idx < stt->offset) || + (idx - stt->offset + npages > stt->size) || + (idx + npages < idx)) return H_PARAMETER; return H_SUCCESS; @@ -147,6 +149,7 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, struct page *page; u64 *tbl; + idx -= stt->offset; page = stt->pages[idx / TCES_PER_PAGE]; tbl = kvmppc_page_address(page); @@ -335,7 +338,7 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, if (ret != H_SUCCESS) return ret; - idx = ioba >> stt->page_shift; + idx = (ioba >> stt->page_shift) - stt->offset; page = stt->pages[idx / TCES_PER_PAGE]; tbl = (u64 *)page_address(page); -- cgit v1.2.3-58-ga151 From 58ded4201ff028b15f6b317228faa5f154a0663f Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Tue, 1 Mar 2016 17:54:40 +1100 Subject: KVM: PPC: Add support for 64bit TCE windows The existing KVM_CREATE_SPAPR_TCE only supports 32bit windows which is not enough for directly mapped windows as the guest can get more than 4GB. This adds KVM_CREATE_SPAPR_TCE_64 ioctl and advertises it via KVM_CAP_SPAPR_TCE_64 capability. The table size is checked against the locked memory limit. Since 64bit windows are to support Dynamic DMA windows (DDW), let's add @bus_offset and @page_shift which are also required by DDW. Signed-off-by: Alexey Kardashevskiy Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/api.txt | 32 ++++++++++++++++++++++++++++++++ arch/powerpc/include/asm/kvm_ppc.h | 2 +- arch/powerpc/include/uapi/asm/kvm.h | 9 +++++++++ arch/powerpc/kvm/book3s_64_vio.c | 10 +++++++--- arch/powerpc/kvm/powerpc.c | 25 ++++++++++++++++++++++++- include/uapi/linux/kvm.h | 2 ++ 6 files changed, 75 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index da3943586a2b..bc78652b0d07 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -3060,6 +3060,38 @@ an implementation for these despite the in kernel acceleration. This capability is always enabled. +4.98 KVM_CREATE_SPAPR_TCE_64 + +Capability: KVM_CAP_SPAPR_TCE_64 +Architectures: powerpc +Type: vm ioctl +Parameters: struct kvm_create_spapr_tce_64 (in) +Returns: file descriptor for manipulating the created TCE table + +This is an extension for KVM_CAP_SPAPR_TCE which only supports 32bit +windows, described in 4.62 KVM_CREATE_SPAPR_TCE + +This capability uses extended struct in ioctl interface: + +/* for KVM_CAP_SPAPR_TCE_64 */ +struct kvm_create_spapr_tce_64 { + __u64 liobn; + __u32 page_shift; + __u32 flags; + __u64 offset; /* in pages */ + __u64 size; /* in pages */ +}; + +The aim of extension is to support an additional bigger DMA window with +a variable page size. +KVM_CREATE_SPAPR_TCE_64 receives a 64bit window size, an IOMMU page shift and +a bus offset of the corresponding DMA window, @size and @offset are numbers +of IOMMU pages. + +@flags are not used at the moment. + +The rest of functionality is identical to KVM_CREATE_SPAPR_TCE. + 5. The kvm_run structure ------------------------ diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 197a8aca2871..2544edabe7f3 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -165,7 +165,7 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, - struct kvm_create_spapr_tce *args); + struct kvm_create_spapr_tce_64 *args); extern struct kvmppc_spapr_tce_table *kvmppc_find_table( struct kvm_vcpu *vcpu, unsigned long liobn); extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index ab4d4732c492..c93cf35ce379 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -333,6 +333,15 @@ struct kvm_create_spapr_tce { __u32 window_size; }; +/* for KVM_CAP_SPAPR_TCE_64 */ +struct kvm_create_spapr_tce_64 { + __u64 liobn; + __u32 page_shift; + __u32 flags; + __u64 offset; /* in pages */ + __u64 size; /* in pages */ +}; + /* for KVM_ALLOCATE_RMA */ struct kvm_allocate_rma { __u64 rma_size; diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 61cbc449d0a8..2c2d1030843a 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -147,20 +147,23 @@ static const struct file_operations kvm_spapr_tce_fops = { }; long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, - struct kvm_create_spapr_tce *args) + struct kvm_create_spapr_tce_64 *args) { struct kvmppc_spapr_tce_table *stt = NULL; unsigned long npages, size; int ret = -ENOMEM; int i; + if (!args->size) + return -EINVAL; + /* Check this LIOBN hasn't been previously allocated */ list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { if (stt->liobn == args->liobn) return -EBUSY; } - size = args->window_size >> IOMMU_PAGE_SHIFT_4K; + size = args->size; npages = kvmppc_tce_pages(size); ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); if (ret) { @@ -174,7 +177,8 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, goto fail; stt->liobn = args->liobn; - stt->page_shift = IOMMU_PAGE_SHIFT_4K; + stt->page_shift = args->page_shift; + stt->offset = args->offset; stt->size = size; stt->kvm = kvm; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 9258675e2ff7..19aa59b0850c 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "timing.h" #include "irq.h" #include "../mm/mmu_decl.h" @@ -519,6 +520,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_SPAPR_TCE: + case KVM_CAP_SPAPR_TCE_64: case KVM_CAP_PPC_ALLOC_HTAB: case KVM_CAP_PPC_RTAS: case KVM_CAP_PPC_FIXUP_HCALL: @@ -1344,13 +1346,34 @@ long kvm_arch_vm_ioctl(struct file *filp, break; } #ifdef CONFIG_PPC_BOOK3S_64 + case KVM_CREATE_SPAPR_TCE_64: { + struct kvm_create_spapr_tce_64 create_tce_64; + + r = -EFAULT; + if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) + goto out; + if (create_tce_64.flags) { + r = -EINVAL; + goto out; + } + r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); + goto out; + } case KVM_CREATE_SPAPR_TCE: { struct kvm_create_spapr_tce create_tce; + struct kvm_create_spapr_tce_64 create_tce_64; r = -EFAULT; if (copy_from_user(&create_tce, argp, sizeof(create_tce))) goto out; - r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); + + create_tce_64.liobn = create_tce.liobn; + create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; + create_tce_64.offset = 0; + create_tce_64.size = create_tce.window_size >> + IOMMU_PAGE_SHIFT_4K; + create_tce_64.flags = 0; + r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); goto out; } case KVM_PPC_GET_SMMU_INFO: { diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 8ce5f643d078..b06208b2669c 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1143,6 +1143,8 @@ struct kvm_s390_ucas_mapping { /* Available with KVM_CAP_PPC_ALLOC_HTAB */ #define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32) #define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce) +#define KVM_CREATE_SPAPR_TCE_64 _IOW(KVMIO, 0xa8, \ + struct kvm_create_spapr_tce_64) /* Available with KVM_CAP_RMA */ #define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma) /* Available with KVM_CAP_PPC_HTAB_FD */ -- cgit v1.2.3-58-ga151 From 9e4aabe2bb3454c83dac8139cf9974503ee044db Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 29 Feb 2016 16:04:43 +0100 Subject: kvm: x86: Convert ioapic->rtc_status.dest_map to a struct Currently this is a bitmap which tracks which CPUs we expect an EOI from. Move this bitmap to a struct so that we can track additional information there. Signed-off-by: Joerg Roedel Signed-off-by: Paolo Bonzini --- arch/x86/kvm/ioapic.c | 13 +++++++------ arch/x86/kvm/ioapic.h | 10 ++++++++-- arch/x86/kvm/irq_comm.c | 2 +- arch/x86/kvm/lapic.c | 10 +++++----- arch/x86/kvm/lapic.h | 7 +++++-- 5 files changed, 26 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 1facfd60b04a..f2c9906c5849 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -94,7 +94,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) { ioapic->rtc_status.pending_eoi = 0; - bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS); + bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS); } static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); @@ -117,16 +117,16 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) return; new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector); - old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); + old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map); if (new_val == old_val) return; if (new_val) { - __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); + __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map); ioapic->rtc_status.pending_eoi++; } else { - __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); + __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map); ioapic->rtc_status.pending_eoi--; rtc_status_pending_eoi_check_valid(ioapic); } @@ -156,7 +156,8 @@ static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic) static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu) { - if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) { + if (test_and_clear_bit(vcpu->vcpu_id, + ioapic->rtc_status.dest_map.map)) { --ioapic->rtc_status.pending_eoi; rtc_status_pending_eoi_check_valid(ioapic); } @@ -346,7 +347,7 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status) */ BUG_ON(ioapic->rtc_status.pending_eoi != 0); ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, - ioapic->rtc_status.dest_map); + &ioapic->rtc_status.dest_map); ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret); } else ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h index 2d16dc251d81..af729890f6f5 100644 --- a/arch/x86/kvm/ioapic.h +++ b/arch/x86/kvm/ioapic.h @@ -40,9 +40,14 @@ struct kvm_vcpu; #define RTC_GSI -1U #endif +struct dest_map { + DECLARE_BITMAP(map, KVM_MAX_VCPUS); +}; + + struct rtc_status { int pending_eoi; - DECLARE_BITMAP(dest_map, KVM_MAX_VCPUS); + struct dest_map dest_map; }; union kvm_ioapic_redirect_entry { @@ -118,7 +123,8 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, int level, bool line_status); void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id); int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, - struct kvm_lapic_irq *irq, unsigned long *dest_map); + struct kvm_lapic_irq *irq, + struct dest_map *dest_map); int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 37217363887d..54ead79e444b 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -54,7 +54,7 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, } int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, - struct kvm_lapic_irq *irq, unsigned long *dest_map) + struct kvm_lapic_irq *irq, struct dest_map *dest_map) { int i, r = -1; struct kvm_vcpu *vcpu, *lowest = NULL; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index cf74404230ca..e085695e6b5a 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -485,10 +485,10 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, int vector, int level, int trig_mode, - unsigned long *dest_map); + struct dest_map *dest_map); int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, - unsigned long *dest_map) + struct dest_map *dest_map) { struct kvm_lapic *apic = vcpu->arch.apic; @@ -695,7 +695,7 @@ static void kvm_apic_disabled_lapic_found(struct kvm *kvm) } bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, - struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map) + struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map) { struct kvm_apic_map *map; unsigned long bitmap = 1; @@ -894,7 +894,7 @@ out: */ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, int vector, int level, int trig_mode, - unsigned long *dest_map) + struct dest_map *dest_map) { int result = 0; struct kvm_vcpu *vcpu = apic->vcpu; @@ -915,7 +915,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, result = 1; if (dest_map) - __set_bit(vcpu->vcpu_id, dest_map); + __set_bit(vcpu->vcpu_id, dest_map->map); if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) { if (trig_mode) diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 59610099af04..f71183e502ee 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -42,6 +42,9 @@ struct kvm_lapic { unsigned long pending_events; unsigned int sipi_vector; }; + +struct dest_map; + int kvm_create_lapic(struct kvm_vcpu *vcpu); void kvm_free_lapic(struct kvm_vcpu *vcpu); @@ -60,11 +63,11 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu); void __kvm_apic_update_irr(u32 *pir, void *regs); void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir); int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, - unsigned long *dest_map); + struct dest_map *dest_map); int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type); bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, - struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map); + struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map); u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info); -- cgit v1.2.3-58-ga151 From 9daa50076f585854f0040aa8403eac020d6f5d64 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 29 Feb 2016 16:04:44 +0100 Subject: kvm: x86: Track irq vectors in ioapic->rtc_status.dest_map This allows backtracking later in case the rtc irq has been moved to another vcpu/vector. Signed-off-by: Joerg Roedel Signed-off-by: Paolo Bonzini --- arch/x86/kvm/ioapic.h | 7 +++++++ arch/x86/kvm/lapic.c | 4 +++- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h index af729890f6f5..7d2692a49657 100644 --- a/arch/x86/kvm/ioapic.h +++ b/arch/x86/kvm/ioapic.h @@ -41,7 +41,14 @@ struct kvm_vcpu; #endif struct dest_map { + /* vcpu bitmap where IRQ has been sent */ DECLARE_BITMAP(map, KVM_MAX_VCPUS); + + /* + * Vector sent to a given vcpu, only valid when + * the vcpu's bit in map is set + */ + u8 vectors[KVM_MAX_VCPUS]; }; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index e085695e6b5a..d9ae1ce2a6a0 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -914,8 +914,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, result = 1; - if (dest_map) + if (dest_map) { __set_bit(vcpu->vcpu_id, dest_map->map); + dest_map->vectors[vcpu->vcpu_id] = vector; + } if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) { if (trig_mode) -- cgit v1.2.3-58-ga151 From 4d99ba898dd0c521ca6cdfdde55c9b58aea3cb3d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 29 Feb 2016 16:04:45 +0100 Subject: kvm: x86: Check dest_map->vector to match eoi signals for rtc Using the vector stored at interrupt delivery makes the eoi matching safe agains irq migration in the ioapic. Signed-off-by: Joerg Roedel Signed-off-by: Paolo Bonzini --- arch/x86/kvm/ioapic.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index f2c9906c5849..9db47090ead0 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -237,10 +237,17 @@ static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr) void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors) { struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; + struct dest_map *dest_map = &ioapic->rtc_status.dest_map; union kvm_ioapic_redirect_entry *e; int index; spin_lock(&ioapic->lock); + + /* Make sure we see any missing RTC EOI */ + if (test_bit(vcpu->vcpu_id, dest_map->map)) + __set_bit(dest_map->vectors[vcpu->vcpu_id], + ioapic_handled_vectors); + for (index = 0; index < IOAPIC_NUM_PINS; index++) { e = &ioapic->redirtbl[index]; if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || @@ -408,8 +415,14 @@ static void kvm_ioapic_eoi_inject_work(struct work_struct *work) static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, struct kvm_ioapic *ioapic, int vector, int trigger_mode) { - int i; + struct dest_map *dest_map = &ioapic->rtc_status.dest_map; struct kvm_lapic *apic = vcpu->arch.apic; + int i; + + /* RTC special handling */ + if (test_bit(vcpu->vcpu_id, dest_map->map) && + vector == dest_map->vectors[vcpu->vcpu_id]) + rtc_irq_eoi(ioapic, vcpu); for (i = 0; i < IOAPIC_NUM_PINS; i++) { union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; @@ -417,8 +430,6 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, if (ent->fields.vector != vector) continue; - if (i == RTC_GSI) - rtc_irq_eoi(ioapic, vcpu); /* * We are dropping lock while calling ack notifiers because ack * notifier callbacks for assigned devices call into IOAPIC -- cgit v1.2.3-58-ga151 From 92f94f1e9e509caa564353c516c904278999e350 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Feb 2016 17:51:06 +0800 Subject: KVM: MMU: rename has_wrprotected_page to mmu_gfn_lpage_is_disallowed kvm_lpage_info->write_count is used to detect if the large page mapping for the gfn on the specified level is allowed, rename it to disallow_lpage to reflect its purpose, also we rename has_wrprotected_page() to mmu_gfn_lpage_is_disallowed() to make the code more clearer Later we will extend this mechanism for page tracking: if the gfn is tracked then large mapping for that gfn on any level is not allowed. The new name is more straightforward Reviewed-by: Paolo Bonzini Signed-off-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- Documentation/virtual/kvm/mmu.txt | 6 +++--- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/mmu.c | 25 +++++++++++++------------ arch/x86/kvm/x86.c | 14 ++++++++------ 4 files changed, 25 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt index daf9c0f742d2..dda2e9316701 100644 --- a/Documentation/virtual/kvm/mmu.txt +++ b/Documentation/virtual/kvm/mmu.txt @@ -391,11 +391,11 @@ To instantiate a large spte, four constraints must be satisfied: write-protected pages - the guest page must be wholly contained by a single memory slot -To check the last two conditions, the mmu maintains a ->write_count set of +To check the last two conditions, the mmu maintains a ->disallow_lpage set of arrays for each memory slot and large page size. Every write protected page -causes its write_count to be incremented, thus preventing instantiation of +causes its disallow_lpage to be incremented, thus preventing instantiation of a large spte. The frames at the end of an unaligned memory slot have -artificially inflated ->write_counts so they can never be instantiated. +artificially inflated ->disallow_lpages so they can never be instantiated. Zapping all pages (page generation count) ========================================= diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7b5459982433..1f7fed5f35fc 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -644,7 +644,7 @@ struct kvm_vcpu_arch { }; struct kvm_lpage_info { - int write_count; + int disallow_lpage; }; struct kvm_arch_memory_slot { diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 07f4c26a10d3..06dd4abfeaa3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -789,7 +789,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) slot = __gfn_to_memslot(slots, gfn); for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { linfo = lpage_info_slot(gfn, slot, i); - linfo->write_count += 1; + linfo->disallow_lpage += 1; } kvm->arch.indirect_shadow_pages++; } @@ -807,31 +807,32 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) slot = __gfn_to_memslot(slots, gfn); for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { linfo = lpage_info_slot(gfn, slot, i); - linfo->write_count -= 1; - WARN_ON(linfo->write_count < 0); + linfo->disallow_lpage -= 1; + WARN_ON(linfo->disallow_lpage < 0); } kvm->arch.indirect_shadow_pages--; } -static int __has_wrprotected_page(gfn_t gfn, int level, - struct kvm_memory_slot *slot) +static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level, + struct kvm_memory_slot *slot) { struct kvm_lpage_info *linfo; if (slot) { linfo = lpage_info_slot(gfn, slot, level); - return linfo->write_count; + return !!linfo->disallow_lpage; } - return 1; + return true; } -static int has_wrprotected_page(struct kvm_vcpu *vcpu, gfn_t gfn, int level) +static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn, + int level) { struct kvm_memory_slot *slot; slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - return __has_wrprotected_page(gfn, level, slot); + return __mmu_gfn_lpage_is_disallowed(gfn, level, slot); } static int host_mapping_level(struct kvm *kvm, gfn_t gfn) @@ -897,7 +898,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn, max_level = min(kvm_x86_ops->get_lpage_level(), host_level); for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) - if (__has_wrprotected_page(large_gfn, level, slot)) + if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot)) break; return level - 1; @@ -2503,7 +2504,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, * be fixed if guest refault. */ if (level > PT_PAGE_TABLE_LEVEL && - has_wrprotected_page(vcpu, gfn, level)) + mmu_gfn_lpage_is_disallowed(vcpu, gfn, level)) goto done; spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; @@ -2768,7 +2769,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && PageTransCompound(pfn_to_page(pfn)) && - !has_wrprotected_page(vcpu, gfn, PT_DIRECTORY_LEVEL)) { + !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { unsigned long mask; /* * mmu_notifier_retry was successful and we hold the diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2fb92c0af803..668625e47102 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7879,6 +7879,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, int i; for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { + struct kvm_lpage_info *linfo; unsigned long ugfn; int lpages; int level = i + 1; @@ -7893,15 +7894,16 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, if (i == 0) continue; - slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * - sizeof(*slot->arch.lpage_info[i - 1])); - if (!slot->arch.lpage_info[i - 1]) + linfo = kvm_kvzalloc(lpages * sizeof(*linfo)); + if (!linfo) goto out_free; + slot->arch.lpage_info[i - 1] = linfo; + if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) - slot->arch.lpage_info[i - 1][0].write_count = 1; + linfo[0].disallow_lpage = 1; if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) - slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; + linfo[lpages - 1].disallow_lpage = 1; ugfn = slot->userspace_addr >> PAGE_SHIFT; /* * If the gfn and userspace address are not aligned wrt each @@ -7913,7 +7915,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long j; for (j = 0; j < lpages; ++j) - slot->arch.lpage_info[i - 1][j].write_count = 1; + linfo[j].disallow_lpage = 1; } } -- cgit v1.2.3-58-ga151 From 547ffaed871af7db8713972eaf630802cac47cb1 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Feb 2016 17:51:07 +0800 Subject: KVM: MMU: introduce kvm_mmu_gfn_{allow,disallow}_lpage Abstract the common operations from account_shadowed() and unaccount_shadowed(), then introduce kvm_mmu_gfn_disallow_lpage() and kvm_mmu_gfn_allow_lpage() These two functions will be used by page tracking in the later patch Reviewed-by: Paolo Bonzini Signed-off-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 38 +++++++++++++++++++++++++------------- arch/x86/kvm/mmu.h | 3 +++ 2 files changed, 28 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 06dd4abfeaa3..566639dd13b3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -776,21 +776,39 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, return &slot->arch.lpage_info[level - 2][idx]; } +static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot, + gfn_t gfn, int count) +{ + struct kvm_lpage_info *linfo; + int i; + + for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { + linfo = lpage_info_slot(gfn, slot, i); + linfo->disallow_lpage += count; + WARN_ON(linfo->disallow_lpage < 0); + } +} + +void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) +{ + update_gfn_disallow_lpage_count(slot, gfn, 1); +} + +void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) +{ + update_gfn_disallow_lpage_count(slot, gfn, -1); +} + static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) { struct kvm_memslots *slots; struct kvm_memory_slot *slot; - struct kvm_lpage_info *linfo; gfn_t gfn; - int i; gfn = sp->gfn; slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); - for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { - linfo = lpage_info_slot(gfn, slot, i); - linfo->disallow_lpage += 1; - } + kvm_mmu_gfn_disallow_lpage(slot, gfn); kvm->arch.indirect_shadow_pages++; } @@ -798,18 +816,12 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) { struct kvm_memslots *slots; struct kvm_memory_slot *slot; - struct kvm_lpage_info *linfo; gfn_t gfn; - int i; gfn = sp->gfn; slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); - for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { - linfo = lpage_info_slot(gfn, slot, i); - linfo->disallow_lpage -= 1; - WARN_ON(linfo->disallow_lpage < 0); - } + kvm_mmu_gfn_allow_lpage(slot, gfn); kvm->arch.indirect_shadow_pages--; } diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 55ffb7b0f95e..de92bed207f1 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -174,4 +174,7 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); + +void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); +void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); #endif -- cgit v1.2.3-58-ga151 From aeecee2ea6e2b020de8bb562f4e79ab34eda3e22 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Feb 2016 17:51:08 +0800 Subject: KVM: MMU: introduce kvm_mmu_slot_gfn_write_protect Split rmap_write_protect() and introduce the function to abstract the write protection based on the slot This function will be used in the later patch Reviewed-by: Paolo Bonzini Signed-off-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 16 +++++++++++----- arch/x86/kvm/mmu.h | 2 ++ 2 files changed, 13 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 566639dd13b3..7184218acf78 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1336,23 +1336,29 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); } -static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) +bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, + struct kvm_memory_slot *slot, u64 gfn) { - struct kvm_memory_slot *slot; struct kvm_rmap_head *rmap_head; int i; bool write_protected = false; - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { rmap_head = __gfn_to_rmap(gfn, i, slot); - write_protected |= __rmap_write_protect(vcpu->kvm, rmap_head, true); + write_protected |= __rmap_write_protect(kvm, rmap_head, true); } return write_protected; } +static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) +{ + struct kvm_memory_slot *slot; + + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn); +} + static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head) { u64 *sptep; diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index de92bed207f1..58fe98a0a526 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -177,4 +177,6 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); +bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, + struct kvm_memory_slot *slot, u64 gfn); #endif -- cgit v1.2.3-58-ga151 From 21ebbedaddf25a35a70fedc001ba7e5f5b9129bc Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Feb 2016 17:51:09 +0800 Subject: KVM: page track: add the framework of guest page tracking The array, gfn_track[mode][gfn], is introduced in memory slot for every guest page, this is the tracking count for the gust page on different modes. If the page is tracked then the count is increased, the page is not tracked after the count reaches zero We use 'unsigned short' as the tracking count which should be enough as shadow page table only can use 2^14 (2^3 for level, 2^1 for cr4_pae, 2^2 for quadrant, 2^3 for access, 2^1 for nxe, 2^1 for cr0_wp, 2^1 for smep_andnot_wp, 2^1 for smap_andnot_wp, and 2^1 for smm) at most, there is enough room for other trackers Two callbacks, kvm_page_track_create_memslot() and kvm_page_track_free_memslot() are implemented in this patch, they are internally used to initialize and reclaim the memory of the array Currently, only write track mode is supported Signed-off-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 10 +++++++ arch/x86/include/asm/kvm_page_track.h | 13 +++++++++ arch/x86/kvm/Makefile | 3 +- arch/x86/kvm/page_track.c | 52 +++++++++++++++++++++++++++++++++++ arch/x86/kvm/x86.c | 5 ++++ 5 files changed, 82 insertions(+), 1 deletion(-) create mode 100644 arch/x86/include/asm/kvm_page_track.h create mode 100644 arch/x86/kvm/page_track.c (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1f7fed5f35fc..71e43fe04bbc 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -32,6 +32,7 @@ #include #include #include +#include #define KVM_MAX_VCPUS 255 #define KVM_SOFT_MAX_VCPUS 160 @@ -214,6 +215,14 @@ struct kvm_mmu_memory_cache { void *objects[KVM_NR_MEM_OBJS]; }; +/* + * the pages used as guest page table on soft mmu are tracked by + * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used + * by indirect shadow page can not be more than 15 bits. + * + * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access, + * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. + */ union kvm_mmu_page_role { unsigned word; struct { @@ -650,6 +659,7 @@ struct kvm_lpage_info { struct kvm_arch_memory_slot { struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES]; struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; + unsigned short *gfn_track[KVM_PAGE_TRACK_MAX]; }; /* diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h new file mode 100644 index 000000000000..5520040682d1 --- /dev/null +++ b/arch/x86/include/asm/kvm_page_track.h @@ -0,0 +1,13 @@ +#ifndef _ASM_X86_KVM_PAGE_TRACK_H +#define _ASM_X86_KVM_PAGE_TRACK_H + +enum kvm_page_track_mode { + KVM_PAGE_TRACK_WRITE, + KVM_PAGE_TRACK_MAX, +}; + +void kvm_page_track_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont); +int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, + unsigned long npages); +#endif diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index a1ff508bb423..464fa477afbf 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -13,9 +13,10 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ - hyperv.o + hyperv.o page_track.o kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o + kvm-intel-y += vmx.o pmu_intel.o kvm-amd-y += svm.o pmu_amd.o diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c new file mode 100644 index 000000000000..8c396d08c0a4 --- /dev/null +++ b/arch/x86/kvm/page_track.c @@ -0,0 +1,52 @@ +/* + * Support KVM gust page tracking + * + * This feature allows us to track page access in guest. Currently, only + * write access is tracked. + * + * Copyright(C) 2015 Intel Corporation. + * + * Author: + * Xiao Guangrong + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include + +#include "mmu.h" + +void kvm_page_track_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ + int i; + + for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) + if (!dont || free->arch.gfn_track[i] != + dont->arch.gfn_track[i]) { + kvfree(free->arch.gfn_track[i]); + free->arch.gfn_track[i] = NULL; + } +} + +int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, + unsigned long npages) +{ + int i; + + for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { + slot->arch.gfn_track[i] = kvm_kvzalloc(npages * + sizeof(*slot->arch.gfn_track[i])); + if (!slot->arch.gfn_track[i]) + goto track_free; + } + + return 0; + +track_free: + kvm_page_track_free_memslot(slot, NULL); + return -ENOMEM; +} diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 668625e47102..7b4cfea09deb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7871,6 +7871,8 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, free->arch.lpage_info[i - 1] = NULL; } } + + kvm_page_track_free_memslot(free, dont); } int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, @@ -7919,6 +7921,9 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, } } + if (kvm_page_track_create_memslot(slot, npages)) + goto out_free; + return 0; out_free: -- cgit v1.2.3-58-ga151 From f29d4d7810d7fd61442371cd68957e1d37ed79bb Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Feb 2016 17:51:10 +0800 Subject: KVM: page track: introduce kvm_slot_page_track_{add,remove}_page These two functions are the user APIs: - kvm_slot_page_track_add_page(): add the page to the tracking pool after that later specified access on that page will be tracked - kvm_slot_page_track_remove_page(): remove the page from the tracking pool, the specified access on the page is not tracked after the last user is gone Both of these are called under the protection both of mmu-lock and kvm->srcu or kvm->slots_lock Signed-off-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 7 +++ arch/x86/kvm/page_track.c | 85 +++++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 5520040682d1..e363e3040ba4 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -10,4 +10,11 @@ void kvm_page_track_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont); int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); + +void kvm_slot_page_track_add_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode); +void kvm_slot_page_track_remove_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode); #endif diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index 8c396d08c0a4..cd76bc318968 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c @@ -50,3 +50,88 @@ track_free: kvm_page_track_free_memslot(slot, NULL); return -ENOMEM; } + +static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode) +{ + if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX) + return false; + + return true; +} + +static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode, short count) +{ + int index, val; + + index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); + + val = slot->arch.gfn_track[mode][index]; + + if (WARN_ON(val + count < 0 || val + count > USHRT_MAX)) + return; + + slot->arch.gfn_track[mode][index] += count; +} + +/* + * add guest page to the tracking pool so that corresponding access on that + * page will be intercepted. + * + * It should be called under the protection both of mmu-lock and kvm->srcu + * or kvm->slots_lock. + * + * @kvm: the guest instance we are interested in. + * @slot: the @gfn belongs to. + * @gfn: the guest page. + * @mode: tracking mode, currently only write track is supported. + */ +void kvm_slot_page_track_add_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode) +{ + + if (WARN_ON(!page_track_mode_is_valid(mode))) + return; + + update_gfn_track(slot, gfn, mode, 1); + + /* + * new track stops large page mapping for the + * tracked page. + */ + kvm_mmu_gfn_disallow_lpage(slot, gfn); + + if (mode == KVM_PAGE_TRACK_WRITE) + if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn)) + kvm_flush_remote_tlbs(kvm); +} + +/* + * remove the guest page from the tracking pool which stops the interception + * of corresponding access on that page. It is the opposed operation of + * kvm_slot_page_track_add_page(). + * + * It should be called under the protection both of mmu-lock and kvm->srcu + * or kvm->slots_lock. + * + * @kvm: the guest instance we are interested in. + * @slot: the @gfn belongs to. + * @gfn: the guest page. + * @mode: tracking mode, currently only write track is supported. + */ +void kvm_slot_page_track_remove_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode) +{ + if (WARN_ON(!page_track_mode_is_valid(mode))) + return; + + update_gfn_track(slot, gfn, mode, -1); + + /* + * allow large page mapping for the tracked page + * after the tracker is gone. + */ + kvm_mmu_gfn_allow_lpage(slot, gfn); +} -- cgit v1.2.3-58-ga151 From 3d0c27ad6ee465f174b09ee99fcaf189c57d567a Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Feb 2016 17:51:11 +0800 Subject: KVM: MMU: let page fault handler be aware tracked page The page fault caused by write access on the write tracked page can not be fixed, it always need to be emulated. page_fault_handle_page_track() is the fast path we introduce here to skip holding mmu-lock and shadow page table walking However, if the page table is not present, it is worth making the page table entry present and readonly to make the read access happy mmu_need_write_protect() need to be cooked to avoid page becoming writable when making page table present or sync/prefetch shadow page table entries Signed-off-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 2 ++ arch/x86/kvm/mmu.c | 44 +++++++++++++++++++++++++++++------ arch/x86/kvm/page_track.c | 15 ++++++++++++ arch/x86/kvm/paging_tmpl.h | 3 +++ 4 files changed, 57 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index e363e3040ba4..5f16e2864e73 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -17,4 +17,6 @@ void kvm_slot_page_track_add_page(struct kvm *kvm, void kvm_slot_page_track_remove_page(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode); +bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, + enum kvm_page_track_mode mode); #endif diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7184218acf78..dd8e3ca2d79b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -41,6 +41,7 @@ #include #include #include +#include /* * When setting this variable to true it enables Two-Dimensional-Paging @@ -2448,25 +2449,29 @@ static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) } } -static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, - bool can_unsync) +static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, + bool can_unsync) { struct kvm_mmu_page *s; bool need_unsync = false; + if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) + return true; + for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { if (!can_unsync) - return 1; + return true; if (s->role.level != PT_PAGE_TABLE_LEVEL) - return 1; + return true; if (!s->unsync) need_unsync = true; } if (need_unsync) kvm_unsync_pages(vcpu, gfn); - return 0; + + return false; } static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) @@ -3381,21 +3386,43 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) } EXPORT_SYMBOL_GPL(handle_mmio_page_fault); +static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, + u32 error_code, gfn_t gfn) +{ + if (unlikely(error_code & PFERR_RSVD_MASK)) + return false; + + if (!(error_code & PFERR_PRESENT_MASK) || + !(error_code & PFERR_WRITE_MASK)) + return false; + + /* + * guest is writing the page which is write tracked which can + * not be fixed by page fault handler. + */ + if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) + return true; + + return false; +} + static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, bool prefault) { - gfn_t gfn; + gfn_t gfn = gva >> PAGE_SHIFT; int r; pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); + if (page_fault_handle_page_track(vcpu, error_code, gfn)) + return 1; + r = mmu_topup_memory_caches(vcpu); if (r) return r; MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); - gfn = gva >> PAGE_SHIFT; return nonpaging_map(vcpu, gva & PAGE_MASK, error_code, gfn, prefault); @@ -3472,6 +3499,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); + if (page_fault_handle_page_track(vcpu, error_code, gfn)) + return 1; + r = mmu_topup_memory_caches(vcpu); if (r) return r; diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index cd76bc318968..f127f6d04fa1 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c @@ -135,3 +135,18 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm, */ kvm_mmu_gfn_allow_lpage(slot, gfn); } + +/* + * check if the corresponding access on the specified guest page is tracked. + */ +bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, + enum kvm_page_track_mode mode) +{ + struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + int index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); + + if (WARN_ON(!page_track_mode_is_valid(mode))) + return false; + + return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); +} diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 05827ff7bd2e..52ae2d94cc9e 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -728,6 +728,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, return 0; } + if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) + return 1; + vcpu->arch.write_fault_to_shadow_pgtable = false; is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, -- cgit v1.2.3-58-ga151 From e5691a81e830c12d396b3f219ab999be87a1208f Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Feb 2016 17:51:12 +0800 Subject: KVM: MMU: clear write-flooding on the fast path of tracked page If the page fault is caused by write access on write tracked page, the real shadow page walking is skipped, we lost the chance to clear write flooding for the page structure current vcpu is using Fix it by locklessly waking shadow page table to clear write flooding on the shadow page structure out of mmu-lock. So that we change the count to atomic_t Signed-off-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/mmu.c | 22 ++++++++++++++++++++-- arch/x86/kvm/paging_tmpl.h | 4 +++- 3 files changed, 24 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 71e43fe04bbc..e2fc5c0ec86a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -285,7 +285,7 @@ struct kvm_mmu_page { #endif /* Number of writes since the last time traversal visited this page. */ - int write_flooding_count; + atomic_t write_flooding_count; }; struct kvm_pio_request { diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index dd8e3ca2d79b..58c067da6efc 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2063,7 +2063,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) { - sp->write_flooding_count = 0; + atomic_set(&sp->write_flooding_count, 0); } static void clear_sp_write_flooding_count(u64 *spte) @@ -3406,6 +3406,23 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, return false; } +static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) +{ + struct kvm_shadow_walk_iterator iterator; + u64 spte; + + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + return; + + walk_shadow_page_lockless_begin(vcpu); + for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { + clear_sp_write_flooding_count(iterator.sptep); + if (!is_shadow_present_pte(spte)) + break; + } + walk_shadow_page_lockless_end(vcpu); +} + static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, bool prefault) { @@ -4221,7 +4238,8 @@ static bool detect_write_flooding(struct kvm_mmu_page *sp) if (sp->role.level == PT_PAGE_TABLE_LEVEL) return false; - return ++sp->write_flooding_count >= 3; + atomic_inc(&sp->write_flooding_count); + return atomic_read(&sp->write_flooding_count) >= 3; } /* diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 52ae2d94cc9e..4174cf290fa3 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -728,8 +728,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, return 0; } - if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) + if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) { + shadow_page_table_clear_flood(vcpu, addr); return 1; + } vcpu->arch.write_fault_to_shadow_pgtable = false; -- cgit v1.2.3-58-ga151 From 0eb05bf290cfe8610d9680b49abef37febd1c38a Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Feb 2016 17:51:13 +0800 Subject: KVM: page track: add notifier support Notifier list is introduced so that any node wants to receive the track event can register to the list Two APIs are introduced here: - kvm_page_track_register_notifier(): register the notifier to receive track event - kvm_page_track_unregister_notifier(): stop receiving track event by unregister the notifier The callback, node->track_write() is called when a write access on the write tracked page happens Signed-off-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/include/asm/kvm_page_track.h | 39 +++++++++++++++++++ arch/x86/kvm/page_track.c | 70 +++++++++++++++++++++++++++++++++++ arch/x86/kvm/x86.c | 4 ++ 4 files changed, 114 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e2fc5c0ec86a..eb68e6aca0cf 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -704,6 +704,7 @@ struct kvm_arch { */ struct list_head active_mmu_pages; struct list_head zapped_obsolete_pages; + struct kvm_page_track_notifier_head track_notifier_head; struct list_head assigned_dev_head; struct iommu_domain *iommu_domain; diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 5f16e2864e73..c2b8d24a235c 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -6,6 +6,36 @@ enum kvm_page_track_mode { KVM_PAGE_TRACK_MAX, }; +/* + * The notifier represented by @kvm_page_track_notifier_node is linked into + * the head which will be notified when guest is triggering the track event. + * + * Write access on the head is protected by kvm->mmu_lock, read access + * is protected by track_srcu. + */ +struct kvm_page_track_notifier_head { + struct srcu_struct track_srcu; + struct hlist_head track_notifier_list; +}; + +struct kvm_page_track_notifier_node { + struct hlist_node node; + + /* + * It is called when guest is writing the write-tracked page + * and write emulation is finished at that time. + * + * @vcpu: the vcpu where the write access happened. + * @gpa: the physical address written by guest. + * @new: the data was written to the address. + * @bytes: the written length. + */ + void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, + int bytes); +}; + +void kvm_page_track_init(struct kvm *kvm); + void kvm_page_track_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont); int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, @@ -19,4 +49,13 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm, enum kvm_page_track_mode mode); bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, enum kvm_page_track_mode mode); + +void +kvm_page_track_register_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n); +void +kvm_page_track_unregister_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n); +void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, + int bytes); #endif diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index f127f6d04fa1..11f76436f74f 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c @@ -150,3 +150,73 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); } + +void kvm_page_track_init(struct kvm *kvm) +{ + struct kvm_page_track_notifier_head *head; + + head = &kvm->arch.track_notifier_head; + init_srcu_struct(&head->track_srcu); + INIT_HLIST_HEAD(&head->track_notifier_list); +} + +/* + * register the notifier so that event interception for the tracked guest + * pages can be received. + */ +void +kvm_page_track_register_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n) +{ + struct kvm_page_track_notifier_head *head; + + head = &kvm->arch.track_notifier_head; + + spin_lock(&kvm->mmu_lock); + hlist_add_head_rcu(&n->node, &head->track_notifier_list); + spin_unlock(&kvm->mmu_lock); +} + +/* + * stop receiving the event interception. It is the opposed operation of + * kvm_page_track_register_notifier(). + */ +void +kvm_page_track_unregister_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n) +{ + struct kvm_page_track_notifier_head *head; + + head = &kvm->arch.track_notifier_head; + + spin_lock(&kvm->mmu_lock); + hlist_del_rcu(&n->node); + spin_unlock(&kvm->mmu_lock); + synchronize_srcu(&head->track_srcu); +} + +/* + * Notify the node that write access is intercepted and write emulation is + * finished at this time. + * + * The node should figure out if the written page is the one that node is + * interested in by itself. + */ +void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, + int bytes) +{ + struct kvm_page_track_notifier_head *head; + struct kvm_page_track_notifier_node *n; + int idx; + + head = &vcpu->kvm->arch.track_notifier_head; + + if (hlist_empty(&head->track_notifier_list)) + return; + + idx = srcu_read_lock(&head->track_srcu); + hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) + if (n->track_write) + n->track_write(vcpu, gpa, new, bytes); + srcu_read_unlock(&head->track_srcu, idx); +} diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7b4cfea09deb..b81c14ef1e1d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4346,6 +4346,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, if (ret < 0) return 0; kvm_mmu_pte_write(vcpu, gpa, val, bytes); + kvm_page_track_write(vcpu, gpa, val, bytes); return 1; } @@ -4604,6 +4605,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); kvm_mmu_pte_write(vcpu, gpa, new, bytes); + kvm_page_track_write(vcpu, gpa, new, bytes); return X86EMUL_CONTINUE; @@ -7724,6 +7726,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); + kvm_page_track_init(kvm); + return 0; } -- cgit v1.2.3-58-ga151 From 56ca57f9fe553da122472a15e49c2d808123ff0a Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Feb 2016 17:51:14 +0800 Subject: KVM: MMU: use page track for non-leaf shadow pages non-leaf shadow pages are always write protected, it can be the user of page track Signed-off-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 58c067da6efc..fe03d2a1d4d3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -806,11 +806,17 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) struct kvm_memory_slot *slot; gfn_t gfn; + kvm->arch.indirect_shadow_pages++; gfn = sp->gfn; slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); + + /* the non-leaf shadow pages are keeping readonly. */ + if (sp->role.level > PT_PAGE_TABLE_LEVEL) + return kvm_slot_page_track_add_page(kvm, slot, gfn, + KVM_PAGE_TRACK_WRITE); + kvm_mmu_gfn_disallow_lpage(slot, gfn); - kvm->arch.indirect_shadow_pages++; } static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) @@ -819,11 +825,15 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) struct kvm_memory_slot *slot; gfn_t gfn; + kvm->arch.indirect_shadow_pages--; gfn = sp->gfn; slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); + if (sp->role.level > PT_PAGE_TABLE_LEVEL) + return kvm_slot_page_track_remove_page(kvm, slot, gfn, + KVM_PAGE_TRACK_WRITE); + kvm_mmu_gfn_allow_lpage(slot, gfn); - kvm->arch.indirect_shadow_pages--; } static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level, @@ -2132,12 +2142,18 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, hlist_add_head(&sp->hash_link, &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); if (!direct) { - if (rmap_write_protect(vcpu, gfn)) + /* + * we should do write protection before syncing pages + * otherwise the content of the synced shadow page may + * be inconsistent with guest page table. + */ + account_shadowed(vcpu->kvm, sp); + if (level == PT_PAGE_TABLE_LEVEL && + rmap_write_protect(vcpu, gfn)) kvm_flush_remote_tlbs(vcpu->kvm); + if (level > PT_PAGE_TABLE_LEVEL && need_sync) kvm_sync_pages(vcpu, gfn); - - account_shadowed(vcpu->kvm, sp); } sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; clear_page(sp->spt); -- cgit v1.2.3-58-ga151 From 5c520e90af3ad546bf328d2c9306c72bf3da6afe Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Feb 2016 17:51:15 +0800 Subject: KVM: MMU: simplify mmu_need_write_protect Now, all non-leaf shadow page are page tracked, if gfn is not tracked there is no non-leaf shadow page of gfn is existed, we can directly make the shadow page of gfn to unsync Signed-off-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 29 +++++++---------------------- 1 file changed, 7 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index fe03d2a1d4d3..d6b264b492c9 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2444,7 +2444,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); -static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { trace_kvm_mmu_unsync_page(sp); ++vcpu->kvm->stat.mmu_unsync; @@ -2453,39 +2453,24 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) kvm_mmu_mark_parents_unsync(sp); } -static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) -{ - struct kvm_mmu_page *s; - - for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { - if (s->unsync) - continue; - WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); - __kvm_unsync_page(vcpu, s); - } -} - static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync) { - struct kvm_mmu_page *s; - bool need_unsync = false; + struct kvm_mmu_page *sp; if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) return true; - for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { + for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { if (!can_unsync) return true; - if (s->role.level != PT_PAGE_TABLE_LEVEL) - return true; + if (sp->unsync) + continue; - if (!s->unsync) - need_unsync = true; + WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL); + kvm_unsync_page(vcpu, sp); } - if (need_unsync) - kvm_unsync_pages(vcpu, gfn); return false; } -- cgit v1.2.3-58-ga151 From 13d268ca2c4c29d6da2ba79419f9a655e602afed Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Feb 2016 17:51:16 +0800 Subject: KVM: MMU: apply page track notifier Register the notifier to receive write track event so that we can update our shadow page table It makes kvm_mmu_pte_write() be the callback of the notifier, no function is changed Signed-off-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 5 +++-- arch/x86/kvm/mmu.c | 19 +++++++++++++++++-- arch/x86/kvm/x86.c | 4 ++-- 3 files changed, 22 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index eb68e6aca0cf..1c3e390993a2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -704,6 +704,7 @@ struct kvm_arch { */ struct list_head active_mmu_pages; struct list_head zapped_obsolete_pages; + struct kvm_page_track_notifier_node mmu_sp_tracker; struct kvm_page_track_notifier_head track_notifier_head; struct list_head assigned_dev_head; @@ -1001,6 +1002,8 @@ void kvm_mmu_module_exit(void); void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); void kvm_mmu_setup(struct kvm_vcpu *vcpu); +void kvm_mmu_init_vm(struct kvm *kvm); +void kvm_mmu_uninit_vm(struct kvm *kvm); void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, u64 dirty_mask, u64 nx_mask, u64 x_mask); @@ -1140,8 +1143,6 @@ void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); void kvm_inject_nmi(struct kvm_vcpu *vcpu); -void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, - const u8 *new, int bytes); int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d6b264b492c9..42ca0acc1c4e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4302,8 +4302,8 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) return spte; } -void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, - const u8 *new, int bytes) +static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, + const u8 *new, int bytes) { gfn_t gfn = gpa >> PAGE_SHIFT; struct kvm_mmu_page *sp; @@ -4517,6 +4517,21 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu) init_kvm_mmu(vcpu); } +void kvm_mmu_init_vm(struct kvm *kvm) +{ + struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; + + node->track_write = kvm_mmu_pte_write; + kvm_page_track_register_notifier(kvm, node); +} + +void kvm_mmu_uninit_vm(struct kvm *kvm) +{ + struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; + + kvm_page_track_unregister_notifier(kvm, node); +} + /* The return value indicates if tlb flush on all vcpus is needed. */ typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b81c14ef1e1d..f4654e4150b0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4345,7 +4345,6 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); if (ret < 0) return 0; - kvm_mmu_pte_write(vcpu, gpa, val, bytes); kvm_page_track_write(vcpu, gpa, val, bytes); return 1; } @@ -4604,7 +4603,6 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, return X86EMUL_CMPXCHG_FAILED; kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); - kvm_mmu_pte_write(vcpu, gpa, new, bytes); kvm_page_track_write(vcpu, gpa, new, bytes); return X86EMUL_CONTINUE; @@ -7727,6 +7725,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); kvm_page_track_init(kvm); + kvm_mmu_init_vm(kvm); return 0; } @@ -7854,6 +7853,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kfree(kvm->arch.vioapic); kvm_free_vcpus(kvm); kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); + kvm_mmu_uninit_vm(kvm); } void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, -- cgit v1.2.3-58-ga151 From 7dd0fdff145c5be7146d0ac06732ae3613412ac1 Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:38 +0100 Subject: KVM: i8254: change PIT discard tick policy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Discard policy uses ack_notifiers to prevent injection of PIT interrupts before EOI from the last one. This patch changes the policy to always try to deliver the interrupt, which makes a difference when its vector is in ISR. Old implementation would drop the interrupt, but proposed one injects to IRR, like real hardware would. The old policy breaks legacy NMI watchdogs, where PIT is used through virtual wire (LVT0): PIT never sends an interrupt before receiving EOI, thus a guest deadlock with disabled interrupts will stop NMIs. Note that NMI doesn't do EOI, so PIT also had to send a normal interrupt through IOAPIC. (KVM's PIT is deeply rotten and luckily not used much in modern systems.) Even though there is a chance of regressions, I think we can fix the LVT0 NMI bug without introducing a new tick policy. Cc: Reported-by: Yuki Shibuya Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index b0ea42b78ccd..ab5318727579 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -245,7 +245,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) * PIC is being reset. Handle it gracefully here */ atomic_inc(&ps->pending); - else if (value > 0) + else if (value > 0 && ps->reinject) /* in this case, we had multiple outstanding pit interrupts * that we needed to inject. Reinject */ @@ -288,7 +288,9 @@ static void pit_do_work(struct kthread_work *work) * last one has been acked. */ spin_lock(&ps->inject_lock); - if (ps->irq_ack) { + if (!ps->reinject) + inject = 1; + else if (ps->irq_ack) { ps->irq_ack = 0; inject = 1; } @@ -317,10 +319,10 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer); struct kvm_pit *pt = ps->kvm->arch.vpit; - if (ps->reinject || !atomic_read(&ps->pending)) { + if (ps->reinject) atomic_inc(&ps->pending); - queue_kthread_work(&pt->worker, &pt->expired); - } + + queue_kthread_work(&pt->worker, &pt->expired); if (ps->is_periodic) { hrtimer_add_expires_ns(&ps->timer, ps->period); -- cgit v1.2.3-58-ga151 From f6e0a0c113f78a0e13c69762b2ab342d6c69220d Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:39 +0100 Subject: KVM: i8254: simplify atomics in kvm_pit_ack_irq MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We already have a helper that does the same thing. Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index ab5318727579..7d694ac7f4a4 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -236,19 +236,9 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) { struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, irq_ack_notifier); - int value; spin_lock(&ps->inject_lock); - value = atomic_dec_return(&ps->pending); - if (value < 0) - /* spurious acks can be generated if, for example, the - * PIC is being reset. Handle it gracefully here - */ - atomic_inc(&ps->pending); - else if (value > 0 && ps->reinject) - /* in this case, we had multiple outstanding pit interrupts - * that we needed to inject. Reinject - */ + if (atomic_dec_if_positive(&ps->pending) > 0 && ps->reinject) queue_kthread_work(&ps->pit->worker, &ps->pit->expired); ps->irq_ack = 1; spin_unlock(&ps->inject_lock); -- cgit v1.2.3-58-ga151 From fd700a00dc2e821be92b0b56fd5d8ebf8c63f9ba Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:40 +0100 Subject: KVM: i8254: add kvm_pit_reset_reinject MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit pit_state.pending and pit_state.irq_ack are always reset at the same time. Create a function for them. Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 7d694ac7f4a4..bdbb3f076e72 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -321,6 +321,12 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) return HRTIMER_NORESTART; } +static inline void kvm_pit_reset_reinject(struct kvm_pit *pit) +{ + atomic_set(&pit->pit_state.pending, 0); + pit->pit_state.irq_ack = 1; +} + static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) { struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; @@ -343,8 +349,7 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) ps->timer.function = pit_timer_fn; ps->kvm = ps->pit->kvm; - atomic_set(&ps->pending, 0); - ps->irq_ack = 1; + kvm_pit_reset_reinject(ps->pit); /* * Do not allow the guest to program periodic timers with small @@ -644,18 +649,15 @@ void kvm_pit_reset(struct kvm_pit *pit) } mutex_unlock(&pit->pit_state.lock); - atomic_set(&pit->pit_state.pending, 0); - pit->pit_state.irq_ack = 1; + kvm_pit_reset_reinject(pit); } static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) { struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); - if (!mask) { - atomic_set(&pit->pit_state.pending, 0); - pit->pit_state.irq_ack = 1; - } + if (!mask) + kvm_pit_reset_reinject(pit); } static const struct kvm_io_device_ops pit_dev_ops = { -- cgit v1.2.3-58-ga151 From ddf54503e2bbed01958cf5fb16ad6378971d2468 Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:41 +0100 Subject: KVM: i8254: use atomic_t instead of pit.inject_lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The lock was an overkill, the same can be done with atomics. A mb() was added in kvm_pit_ack_irq, to pair with implicit barrier between pit_timer_fn and pit_do_work. The mb() prevents a race that could happen if pending == 0 and irq_ack == 0: kvm_pit_ack_irq: | pit_timer_fn: p = atomic_read(&ps->pending); | | atomic_inc(&ps->pending); | queue_work(pit_do_work); | pit_do_work: | atomic_xchg(&ps->irq_ack, 0); | return; atomic_set(&ps->irq_ack, 1); | if (p == 0) return; | where the interrupt would not be delivered in this tick of pit_timer_fn. PIT would have eventually delivered the interrupt, but we sacrifice perofmance to make sure that interrupts are not needlessly delayed. sfence isn't enough: atomic_dec_if_positive does atomic_read first and x86 can reorder loads before stores. lfence isn't enough: store can pass lfence, turning it into a nop. A compiler barrier would be more than enough as CPU needs to stall for unbelievably long to use fences. This patch doesn't do anything in kvm_pit_reset_reinject, because any order of resets can race, but the result differs by at most one interrupt, which is ok, because it's the same result as if the reset happened at a slightly different time. (Original code didn't protect the reset path with a proper lock, so users have to be robust.) Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 56 +++++++++++++++++++++------------------------------- arch/x86/kvm/i8254.h | 3 +-- 2 files changed, 24 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index bdbb3f076e72..0f5655c50e0c 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -237,11 +237,13 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, irq_ack_notifier); - spin_lock(&ps->inject_lock); + atomic_set(&ps->irq_ack, 1); + /* irq_ack should be set before pending is read. Order accesses with + * inc(pending) in pit_timer_fn and xchg(irq_ack, 0) in pit_do_work. + */ + smp_mb(); if (atomic_dec_if_positive(&ps->pending) > 0 && ps->reinject) queue_kthread_work(&ps->pit->worker, &ps->pit->expired); - ps->irq_ack = 1; - spin_unlock(&ps->inject_lock); } void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) @@ -272,36 +274,25 @@ static void pit_do_work(struct kthread_work *work) struct kvm_vcpu *vcpu; int i; struct kvm_kpit_state *ps = &pit->pit_state; - int inject = 0; - /* Try to inject pending interrupts when - * last one has been acked. + if (ps->reinject && !atomic_xchg(&ps->irq_ack, 0)) + return; + + kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false); + kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false); + + /* + * Provides NMI watchdog support via Virtual Wire mode. + * The route is: PIT -> LVT0 in NMI mode. + * + * Note: Our Virtual Wire implementation does not follow + * the MP specification. We propagate a PIT interrupt to all + * VCPUs and only when LVT0 is in NMI mode. The interrupt can + * also be simultaneously delivered through PIC and IOAPIC. */ - spin_lock(&ps->inject_lock); - if (!ps->reinject) - inject = 1; - else if (ps->irq_ack) { - ps->irq_ack = 0; - inject = 1; - } - spin_unlock(&ps->inject_lock); - if (inject) { - kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false); - kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false); - - /* - * Provides NMI watchdog support via Virtual Wire mode. - * The route is: PIT -> PIC -> LVT0 in NMI mode. - * - * Note: Our Virtual Wire implementation is simplified, only - * propagating PIT interrupts to all VCPUs when they have set - * LVT0 to NMI delivery. Other PIC interrupts are just sent to - * VCPU0, and only if its LVT0 is in EXTINT mode. - */ - if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0) - kvm_for_each_vcpu(i, vcpu, kvm) - kvm_apic_nmi_wd_deliver(vcpu); - } + if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0) + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_apic_nmi_wd_deliver(vcpu); } static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) @@ -324,7 +315,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) static inline void kvm_pit_reset_reinject(struct kvm_pit *pit) { atomic_set(&pit->pit_state.pending, 0); - pit->pit_state.irq_ack = 1; + atomic_set(&pit->pit_state.irq_ack, 1); } static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) @@ -691,7 +682,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) mutex_init(&pit->pit_state.lock); mutex_lock(&pit->pit_state.lock); - spin_lock_init(&pit->pit_state.inject_lock); pid = get_pid(task_tgid(current)); pid_nr = pid_vnr(pid); diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index c84990b42b5b..f8cf4b84f435 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -33,8 +33,7 @@ struct kvm_kpit_state { u32 speaker_data_on; struct mutex lock; struct kvm_pit *pit; - spinlock_t inject_lock; - unsigned long irq_ack; + atomic_t irq_ack; struct kvm_irq_ack_notifier irq_ack_notifier; }; -- cgit v1.2.3-58-ga151 From b69d920f68b119bdc0483f0c33d34fd0c57724f5 Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:42 +0100 Subject: KVM: i8254: tone down WARN_ON pit.state_lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the guest could hit this, it would hang the host kernel, bacause of sheer number of those reports. Internal callers have to be sensible anyway, so we now only check for it in an API function. Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 0f5655c50e0c..e5a3e8015e30 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -76,8 +76,6 @@ static void pit_set_gate(struct kvm *kvm, int channel, u32 val) struct kvm_kpit_channel_state *c = &kvm->arch.vpit->pit_state.channels[channel]; - WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); - switch (c->mode) { default: case 0: @@ -99,8 +97,6 @@ static void pit_set_gate(struct kvm *kvm, int channel, u32 val) static int pit_get_gate(struct kvm *kvm, int channel) { - WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); - return kvm->arch.vpit->pit_state.channels[channel].gate; } @@ -144,8 +140,6 @@ static int pit_get_count(struct kvm *kvm, int channel) s64 d, t; int counter; - WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); - t = kpit_elapsed(kvm, c, channel); d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); @@ -174,8 +168,6 @@ static int pit_get_out(struct kvm *kvm, int channel) s64 d, t; int out; - WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); - t = kpit_elapsed(kvm, c, channel); d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); @@ -207,8 +199,6 @@ static void pit_latch_count(struct kvm *kvm, int channel) struct kvm_kpit_channel_state *c = &kvm->arch.vpit->pit_state.channels[channel]; - WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); - if (!c->count_latched) { c->latched_count = pit_get_count(kvm, channel); c->count_latched = c->rw_mode; @@ -220,8 +210,6 @@ static void pit_latch_status(struct kvm *kvm, int channel) struct kvm_kpit_channel_state *c = &kvm->arch.vpit->pit_state.channels[channel]; - WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); - if (!c->status_latched) { /* TODO: Return NULL COUNT (bit 6). */ c->status = ((pit_get_out(kvm, channel) << 7) | @@ -367,8 +355,6 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) { struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; - WARN_ON(!mutex_is_locked(&ps->lock)); - pr_debug("load_count val is %d, channel is %d\n", val, channel); /* @@ -406,6 +392,9 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start) { u8 saved_mode; + + WARN_ON_ONCE(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); + if (hpet_legacy_start) { /* save existing mode for later reenablement */ WARN_ON(channel != 0); -- cgit v1.2.3-58-ga151 From 09edea72b7f9fd8a8d26c1f7504d989b9773ee5e Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:43 +0100 Subject: KVM: i8254: pass struct kvm_pit instead of kvm in PIT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch passes struct kvm_pit into internal PIT functions. Those functions used to get PIT through kvm->arch.vpit, even though most of them never used *kvm for other purposes. Another benefit is that we don't need to set kvm->arch.vpit during initialization. Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 112 ++++++++++++++++++++++++--------------------------- arch/x86/kvm/i8254.h | 4 +- arch/x86/kvm/x86.c | 26 +++++++----- 3 files changed, 70 insertions(+), 72 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index e5a3e8015e30..2afe09b054e7 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -71,10 +71,9 @@ static u64 muldiv64(u64 a, u32 b, u32 c) return res.ll; } -static void pit_set_gate(struct kvm *kvm, int channel, u32 val) +static void pit_set_gate(struct kvm_pit *pit, int channel, u32 val) { - struct kvm_kpit_channel_state *c = - &kvm->arch.vpit->pit_state.channels[channel]; + struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; switch (c->mode) { default: @@ -95,16 +94,16 @@ static void pit_set_gate(struct kvm *kvm, int channel, u32 val) c->gate = val; } -static int pit_get_gate(struct kvm *kvm, int channel) +static int pit_get_gate(struct kvm_pit *pit, int channel) { - return kvm->arch.vpit->pit_state.channels[channel].gate; + return pit->pit_state.channels[channel].gate; } -static s64 __kpit_elapsed(struct kvm *kvm) +static s64 __kpit_elapsed(struct kvm_pit *pit) { s64 elapsed; ktime_t remaining; - struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; + struct kvm_kpit_state *ps = &pit->pit_state; if (!ps->period) return 0; @@ -124,23 +123,22 @@ static s64 __kpit_elapsed(struct kvm *kvm) return elapsed; } -static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c, +static s64 kpit_elapsed(struct kvm_pit *pit, struct kvm_kpit_channel_state *c, int channel) { if (channel == 0) - return __kpit_elapsed(kvm); + return __kpit_elapsed(pit); return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time)); } -static int pit_get_count(struct kvm *kvm, int channel) +static int pit_get_count(struct kvm_pit *pit, int channel) { - struct kvm_kpit_channel_state *c = - &kvm->arch.vpit->pit_state.channels[channel]; + struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; s64 d, t; int counter; - t = kpit_elapsed(kvm, c, channel); + t = kpit_elapsed(pit, c, channel); d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); switch (c->mode) { @@ -161,14 +159,13 @@ static int pit_get_count(struct kvm *kvm, int channel) return counter; } -static int pit_get_out(struct kvm *kvm, int channel) +static int pit_get_out(struct kvm_pit *pit, int channel) { - struct kvm_kpit_channel_state *c = - &kvm->arch.vpit->pit_state.channels[channel]; + struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; s64 d, t; int out; - t = kpit_elapsed(kvm, c, channel); + t = kpit_elapsed(pit, c, channel); d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); switch (c->mode) { @@ -194,25 +191,23 @@ static int pit_get_out(struct kvm *kvm, int channel) return out; } -static void pit_latch_count(struct kvm *kvm, int channel) +static void pit_latch_count(struct kvm_pit *pit, int channel) { - struct kvm_kpit_channel_state *c = - &kvm->arch.vpit->pit_state.channels[channel]; + struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; if (!c->count_latched) { - c->latched_count = pit_get_count(kvm, channel); + c->latched_count = pit_get_count(pit, channel); c->count_latched = c->rw_mode; } } -static void pit_latch_status(struct kvm *kvm, int channel) +static void pit_latch_status(struct kvm_pit *pit, int channel) { - struct kvm_kpit_channel_state *c = - &kvm->arch.vpit->pit_state.channels[channel]; + struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; if (!c->status_latched) { /* TODO: Return NULL COUNT (bit 6). */ - c->status = ((pit_get_out(kvm, channel) << 7) | + c->status = ((pit_get_out(pit, channel) << 7) | (c->rw_mode << 4) | (c->mode << 1) | c->bcd); @@ -306,9 +301,10 @@ static inline void kvm_pit_reset_reinject(struct kvm_pit *pit) atomic_set(&pit->pit_state.irq_ack, 1); } -static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) +static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period) { - struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; + struct kvm_kpit_state *ps = &pit->pit_state; + struct kvm *kvm = pit->kvm; s64 interval; if (!ioapic_in_kernel(kvm) || @@ -326,9 +322,9 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) ps->is_periodic = is_period; ps->timer.function = pit_timer_fn; - ps->kvm = ps->pit->kvm; + ps->kvm = pit->kvm; - kvm_pit_reset_reinject(ps->pit); + kvm_pit_reset_reinject(pit); /* * Do not allow the guest to program periodic timers with small @@ -351,9 +347,9 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) HRTIMER_MODE_ABS); } -static void pit_load_count(struct kvm *kvm, int channel, u32 val) +static void pit_load_count(struct kvm_pit *pit, int channel, u32 val) { - struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; + struct kvm_kpit_state *ps = &pit->pit_state; pr_debug("load_count val is %d, channel is %d\n", val, channel); @@ -378,32 +374,33 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) case 1: /* FIXME: enhance mode 4 precision */ case 4: - create_pit_timer(kvm, val, 0); + create_pit_timer(pit, val, 0); break; case 2: case 3: - create_pit_timer(kvm, val, 1); + create_pit_timer(pit, val, 1); break; default: - destroy_pit_timer(kvm->arch.vpit); + destroy_pit_timer(pit); } } -void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start) +void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val, + int hpet_legacy_start) { u8 saved_mode; - WARN_ON_ONCE(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); + WARN_ON_ONCE(!mutex_is_locked(&pit->pit_state.lock)); if (hpet_legacy_start) { /* save existing mode for later reenablement */ WARN_ON(channel != 0); - saved_mode = kvm->arch.vpit->pit_state.channels[0].mode; - kvm->arch.vpit->pit_state.channels[0].mode = 0xff; /* disable timer */ - pit_load_count(kvm, channel, val); - kvm->arch.vpit->pit_state.channels[0].mode = saved_mode; + saved_mode = pit->pit_state.channels[0].mode; + pit->pit_state.channels[0].mode = 0xff; /* disable timer */ + pit_load_count(pit, channel, val); + pit->pit_state.channels[0].mode = saved_mode; } else { - pit_load_count(kvm, channel, val); + pit_load_count(pit, channel, val); } } @@ -429,7 +426,6 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu, { struct kvm_pit *pit = dev_to_pit(this); struct kvm_kpit_state *pit_state = &pit->pit_state; - struct kvm *kvm = pit->kvm; int channel, access; struct kvm_kpit_channel_state *s; u32 val = *(u32 *) data; @@ -453,9 +449,9 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu, s = &pit_state->channels[channel]; if (val & (2 << channel)) { if (!(val & 0x20)) - pit_latch_count(kvm, channel); + pit_latch_count(pit, channel); if (!(val & 0x10)) - pit_latch_status(kvm, channel); + pit_latch_status(pit, channel); } } } else { @@ -463,7 +459,7 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu, s = &pit_state->channels[channel]; access = (val >> 4) & KVM_PIT_CHANNEL_MASK; if (access == 0) { - pit_latch_count(kvm, channel); + pit_latch_count(pit, channel); } else { s->rw_mode = access; s->read_state = access; @@ -480,17 +476,17 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu, switch (s->write_state) { default: case RW_STATE_LSB: - pit_load_count(kvm, addr, val); + pit_load_count(pit, addr, val); break; case RW_STATE_MSB: - pit_load_count(kvm, addr, val << 8); + pit_load_count(pit, addr, val << 8); break; case RW_STATE_WORD0: s->write_latch = val; s->write_state = RW_STATE_WORD1; break; case RW_STATE_WORD1: - pit_load_count(kvm, addr, s->write_latch | (val << 8)); + pit_load_count(pit, addr, s->write_latch | (val << 8)); s->write_state = RW_STATE_WORD0; break; } @@ -506,7 +502,6 @@ static int pit_ioport_read(struct kvm_vcpu *vcpu, { struct kvm_pit *pit = dev_to_pit(this); struct kvm_kpit_state *pit_state = &pit->pit_state; - struct kvm *kvm = pit->kvm; int ret, count; struct kvm_kpit_channel_state *s; if (!pit_in_range(addr)) @@ -543,20 +538,20 @@ static int pit_ioport_read(struct kvm_vcpu *vcpu, switch (s->read_state) { default: case RW_STATE_LSB: - count = pit_get_count(kvm, addr); + count = pit_get_count(pit, addr); ret = count & 0xff; break; case RW_STATE_MSB: - count = pit_get_count(kvm, addr); + count = pit_get_count(pit, addr); ret = (count >> 8) & 0xff; break; case RW_STATE_WORD0: - count = pit_get_count(kvm, addr); + count = pit_get_count(pit, addr); ret = count & 0xff; s->read_state = RW_STATE_WORD1; break; case RW_STATE_WORD1: - count = pit_get_count(kvm, addr); + count = pit_get_count(pit, addr); ret = (count >> 8) & 0xff; s->read_state = RW_STATE_WORD0; break; @@ -577,14 +572,13 @@ static int speaker_ioport_write(struct kvm_vcpu *vcpu, { struct kvm_pit *pit = speaker_to_pit(this); struct kvm_kpit_state *pit_state = &pit->pit_state; - struct kvm *kvm = pit->kvm; u32 val = *(u32 *) data; if (addr != KVM_SPEAKER_BASE_ADDRESS) return -EOPNOTSUPP; mutex_lock(&pit_state->lock); pit_state->speaker_data_on = (val >> 1) & 1; - pit_set_gate(kvm, 2, val & 1); + pit_set_gate(pit, 2, val & 1); mutex_unlock(&pit_state->lock); return 0; } @@ -595,7 +589,6 @@ static int speaker_ioport_read(struct kvm_vcpu *vcpu, { struct kvm_pit *pit = speaker_to_pit(this); struct kvm_kpit_state *pit_state = &pit->pit_state; - struct kvm *kvm = pit->kvm; unsigned int refresh_clock; int ret; if (addr != KVM_SPEAKER_BASE_ADDRESS) @@ -605,8 +598,8 @@ static int speaker_ioport_read(struct kvm_vcpu *vcpu, refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1; mutex_lock(&pit_state->lock); - ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) | - (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4)); + ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(pit, 2) | + (pit_get_out(pit, 2) << 5) | (refresh_clock << 4)); if (len > sizeof(ret)) len = sizeof(ret); memcpy(data, (char *)&ret, len); @@ -625,7 +618,7 @@ void kvm_pit_reset(struct kvm_pit *pit) c = &pit->pit_state.channels[i]; c->mode = 0xff; c->gate = (i != 2); - pit_load_count(pit->kvm, i, 0); + pit_load_count(pit, i, 0); } mutex_unlock(&pit->pit_state.lock); @@ -687,7 +680,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) } init_kthread_work(&pit->expired, pit_do_work); - kvm->arch.vpit = pit; pit->kvm = kvm; pit_state = &pit->pit_state; diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index f8cf4b84f435..a6aceaf08df5 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -56,9 +56,11 @@ struct kvm_pit { #define KVM_MAX_PIT_INTR_INTERVAL HZ / 100 #define KVM_PIT_CHANNEL_MASK 0x3 -void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start); struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags); void kvm_free_pit(struct kvm *kvm); + void kvm_pit_reset(struct kvm_pit *pit); +void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val, + int hpet_legacy_start); #endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f4654e4150b0..a88e1a3eeb69 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3613,11 +3613,13 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int i; - mutex_lock(&kvm->arch.vpit->pit_state.lock); - memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); + struct kvm_pit *pit = kvm->arch.vpit; + + mutex_lock(&pit->pit_state.lock); + memcpy(&pit->pit_state, ps, sizeof(struct kvm_pit_state)); for (i = 0; i < 3; i++) - kvm_pit_load_count(kvm, i, ps->channels[i].count, 0); - mutex_unlock(&kvm->arch.vpit->pit_state.lock); + kvm_pit_load_count(pit, i, ps->channels[i].count, 0); + mutex_unlock(&pit->pit_state.lock); return 0; } @@ -3637,18 +3639,20 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) int start = 0; int i; u32 prev_legacy, cur_legacy; - mutex_lock(&kvm->arch.vpit->pit_state.lock); - prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; + struct kvm_pit *pit = kvm->arch.vpit; + + mutex_lock(&pit->pit_state.lock); + prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; if (!prev_legacy && cur_legacy) start = 1; - memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, - sizeof(kvm->arch.vpit->pit_state.channels)); - kvm->arch.vpit->pit_state.flags = ps->flags; + memcpy(&pit->pit_state.channels, &ps->channels, + sizeof(pit->pit_state.channels)); + pit->pit_state.flags = ps->flags; for (i = 0; i < 3; i++) - kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, + kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, start && i == 0); - mutex_unlock(&kvm->arch.vpit->pit_state.lock); + mutex_unlock(&pit->pit_state.lock); return 0; } -- cgit v1.2.3-58-ga151 From b39c90b6560c4458d46cb243abea0dcb7bc126d1 Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:44 +0100 Subject: KVM: i8254: remove unnecessary uses of PIT state lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - kvm_create_pit had to lock only because it exposed kvm->arch.vpit very early, but initialization doesn't use kvm->arch.vpit since the last patch, so we can drop locking. - kvm_free_pit is only run after there are no users of KVM and therefore is the sole actor. - Locking in kvm_vm_ioctl_reinject doesn't do anything, because reinject is only protected at that place. - kvm_pit_reset isn't used anywhere and its locking can be dropped if we hide it. Removing useless locking allows to see what actually is being protected by PIT state lock (values accessible from the guest). Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 9 +-------- arch/x86/kvm/i8254.h | 9 +++++---- arch/x86/kvm/x86.c | 4 ++-- 3 files changed, 8 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 2afe09b054e7..b8582fbe4fcf 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -607,12 +607,11 @@ static int speaker_ioport_read(struct kvm_vcpu *vcpu, return 0; } -void kvm_pit_reset(struct kvm_pit *pit) +static void kvm_pit_reset(struct kvm_pit *pit) { int i; struct kvm_kpit_channel_state *c; - mutex_lock(&pit->pit_state.lock); pit->pit_state.flags = 0; for (i = 0; i < 3; i++) { c = &pit->pit_state.channels[i]; @@ -620,7 +619,6 @@ void kvm_pit_reset(struct kvm_pit *pit) c->gate = (i != 2); pit_load_count(pit, i, 0); } - mutex_unlock(&pit->pit_state.lock); kvm_pit_reset_reinject(pit); } @@ -663,7 +661,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) } mutex_init(&pit->pit_state.lock); - mutex_lock(&pit->pit_state.lock); pid = get_pid(task_tgid(current)); pid_nr = pid_vnr(pid); @@ -673,7 +670,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker, "kvm-pit/%d", pid_nr); if (IS_ERR(pit->worker_task)) { - mutex_unlock(&pit->pit_state.lock); kvm_free_irq_source_id(kvm, pit->irq_source_id); kfree(pit); return NULL; @@ -689,7 +685,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); pit_state->reinject = true; - mutex_unlock(&pit->pit_state.lock); kvm_pit_reset(pit); @@ -737,13 +732,11 @@ void kvm_free_pit(struct kvm *kvm) &kvm->arch.vpit->mask_notifier); kvm_unregister_irq_ack_notifier(kvm, &kvm->arch.vpit->pit_state.irq_ack_notifier); - mutex_lock(&kvm->arch.vpit->pit_state.lock); timer = &kvm->arch.vpit->pit_state.timer; hrtimer_cancel(timer); flush_kthread_work(&kvm->arch.vpit->expired); kthread_stop(kvm->arch.vpit->worker_task); kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); - mutex_unlock(&kvm->arch.vpit->pit_state.lock); kfree(kvm->arch.vpit); } } diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index a6aceaf08df5..840fbb3cb626 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -22,17 +22,19 @@ struct kvm_kpit_channel_state { }; struct kvm_kpit_state { + /* All members before "struct mutex lock" are protected by the lock. */ struct kvm_kpit_channel_state channels[3]; u32 flags; bool is_periodic; s64 period; /* unit: ns */ struct hrtimer timer; - atomic_t pending; /* accumulated triggered timers */ - bool reinject; - struct kvm *kvm; u32 speaker_data_on; + struct mutex lock; + struct kvm *kvm; struct kvm_pit *pit; + bool reinject; + atomic_t pending; /* accumulated triggered timers */ atomic_t irq_ack; struct kvm_irq_ack_notifier irq_ack_notifier; }; @@ -59,7 +61,6 @@ struct kvm_pit { struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags); void kvm_free_pit(struct kvm *kvm); -void kvm_pit_reset(struct kvm_pit *pit); void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val, int hpet_legacy_start); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a88e1a3eeb69..ce4e91db5bae 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3661,9 +3661,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, { if (!kvm->arch.vpit) return -ENXIO; - mutex_lock(&kvm->arch.vpit->pit_state.lock); + kvm->arch.vpit->pit_state.reinject = control->pit_reinject; - mutex_unlock(&kvm->arch.vpit->pit_state.lock); + return 0; } -- cgit v1.2.3-58-ga151 From 71474e2f0f439b83b7b53ee6e9cf4f44c15b5806 Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:45 +0100 Subject: KVM: i8254: remove notifiers from PIT discard policy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Discard policy doesn't rely on information from notifiers, so we don't need to register notifiers unconditionally. We kept correct counts in case userspace switched between policies during runtime, but that can be avoided by reseting the state. Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 38 +++++++++++++++++++++++++++----------- arch/x86/kvm/i8254.h | 1 + arch/x86/kvm/x86.c | 12 ++++++++++-- 3 files changed, 38 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index b8582fbe4fcf..7a2f14bdf4b5 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -225,7 +225,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) * inc(pending) in pit_timer_fn and xchg(irq_ack, 0) in pit_do_work. */ smp_mb(); - if (atomic_dec_if_positive(&ps->pending) > 0 && ps->reinject) + if (atomic_dec_if_positive(&ps->pending) > 0) queue_kthread_work(&ps->pit->worker, &ps->pit->expired); } @@ -301,6 +301,27 @@ static inline void kvm_pit_reset_reinject(struct kvm_pit *pit) atomic_set(&pit->pit_state.irq_ack, 1); } +void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject) +{ + struct kvm_kpit_state *ps = &pit->pit_state; + struct kvm *kvm = pit->kvm; + + if (ps->reinject == reinject) + return; + + if (reinject) { + /* The initial state is preserved while ps->reinject == 0. */ + kvm_pit_reset_reinject(pit); + kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier); + kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); + } else { + kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier); + kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); + } + + ps->reinject = reinject; +} + static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period) { struct kvm_kpit_state *ps = &pit->pit_state; @@ -681,15 +702,14 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) pit_state = &pit->pit_state; pit_state->pit = pit; hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + pit_state->irq_ack_notifier.gsi = 0; pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; - kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); - pit_state->reinject = true; + pit->mask_notifier.func = pit_mask_notifer; kvm_pit_reset(pit); - pit->mask_notifier.func = pit_mask_notifer; - kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); + kvm_pit_set_reinject(pit, true); kvm_iodevice_init(&pit->dev, &pit_dev_ops); ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS, @@ -712,8 +732,7 @@ fail_unregister: kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev); fail: - kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); - kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); + kvm_pit_set_reinject(pit, false); kvm_free_irq_source_id(kvm, pit->irq_source_id); kthread_stop(pit->worker_task); kfree(pit); @@ -728,10 +747,7 @@ void kvm_free_pit(struct kvm *kvm) kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &kvm->arch.vpit->dev); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &kvm->arch.vpit->speaker_dev); - kvm_unregister_irq_mask_notifier(kvm, 0, - &kvm->arch.vpit->mask_notifier); - kvm_unregister_irq_ack_notifier(kvm, - &kvm->arch.vpit->pit_state.irq_ack_notifier); + kvm_pit_set_reinject(kvm->arch.vpit, false); timer = &kvm->arch.vpit->pit_state.timer; hrtimer_cancel(timer); flush_kthread_work(&kvm->arch.vpit->expired); diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index 840fbb3cb626..1945635904a7 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -63,5 +63,6 @@ void kvm_free_pit(struct kvm *kvm); void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val, int hpet_legacy_start); +void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject); #endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ce4e91db5bae..76f9f48898a5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3659,10 +3659,18 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) static int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control) { - if (!kvm->arch.vpit) + struct kvm_pit *pit = kvm->arch.vpit; + + if (!pit) return -ENXIO; - kvm->arch.vpit->pit_state.reinject = control->pit_reinject; + /* pit->pit_state.lock was overloaded to prevent userspace from getting + * an inconsistent state after running multiple KVM_REINJECT_CONTROL + * ioctls in parallel. Use a separate lock if that ioctl isn't rare. + */ + mutex_lock(&pit->pit_state.lock); + kvm_pit_set_reinject(pit, control->pit_reinject); + mutex_unlock(&pit->pit_state.lock); return 0; } -- cgit v1.2.3-58-ga151 From 10d2482126d02682d5d21aa5ecdf76d5f49c6740 Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:46 +0100 Subject: KVM: i8254: refactor kvm_create_pit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Locks are gone, so we don't need to duplicate error paths. Use goto everywhere. Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 7a2f14bdf4b5..c24735ae1871 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -676,10 +676,8 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) return NULL; pit->irq_source_id = kvm_request_irq_source_id(kvm); - if (pit->irq_source_id < 0) { - kfree(pit); - return NULL; - } + if (pit->irq_source_id < 0) + goto fail_request; mutex_init(&pit->pit_state.lock); @@ -690,11 +688,9 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) init_kthread_worker(&pit->worker); pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker, "kvm-pit/%d", pid_nr); - if (IS_ERR(pit->worker_task)) { - kvm_free_irq_source_id(kvm, pit->irq_source_id); - kfree(pit); - return NULL; - } + if (IS_ERR(pit->worker_task)) + goto fail_kthread; + init_kthread_work(&pit->expired, pit_do_work); pit->kvm = kvm; @@ -715,7 +711,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS, KVM_PIT_MEM_LENGTH, &pit->dev); if (ret < 0) - goto fail; + goto fail_register_pit; if (flags & KVM_PIT_SPEAKER_DUMMY) { kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops); @@ -723,18 +719,19 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) KVM_SPEAKER_BASE_ADDRESS, 4, &pit->speaker_dev); if (ret < 0) - goto fail_unregister; + goto fail_register_speaker; } return pit; -fail_unregister: +fail_register_speaker: kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev); - -fail: +fail_register_pit: kvm_pit_set_reinject(pit, false); - kvm_free_irq_source_id(kvm, pit->irq_source_id); kthread_stop(pit->worker_task); +fail_kthread: + kvm_free_irq_source_id(kvm, pit->irq_source_id); +fail_request: kfree(pit); return NULL; } -- cgit v1.2.3-58-ga151 From 08e5ccf3ae8f2c5c8319fec521e312f7e775cb84 Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:47 +0100 Subject: KVM: i8254: refactor kvm_free_pit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Could be easier to read, but git history will become deeper. Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index c24735ae1871..055018e833b6 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -738,18 +738,16 @@ fail_request: void kvm_free_pit(struct kvm *kvm) { - struct hrtimer *timer; - - if (kvm->arch.vpit) { - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &kvm->arch.vpit->dev); - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, - &kvm->arch.vpit->speaker_dev); - kvm_pit_set_reinject(kvm->arch.vpit, false); - timer = &kvm->arch.vpit->pit_state.timer; - hrtimer_cancel(timer); - flush_kthread_work(&kvm->arch.vpit->expired); - kthread_stop(kvm->arch.vpit->worker_task); - kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); - kfree(kvm->arch.vpit); + struct kvm_pit *pit = kvm->arch.vpit; + + if (pit) { + kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev); + kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev); + kvm_pit_set_reinject(pit, false); + hrtimer_cancel(&pit->pit_state.timer); + flush_kthread_work(&pit->expired); + kthread_stop(pit->worker_task); + kvm_free_irq_source_id(kvm, pit->irq_source_id); + kfree(pit); } } -- cgit v1.2.3-58-ga151 From a3e1311549593692d98cba1ceb479f070dcd9873 Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:48 +0100 Subject: KVM: i8254: remove pit and kvm from kvm_kpit_state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kvm isn't ever used and pit can be accessed with container_of. If you *really* need kvm, pit_state_to_pit(ps)->kvm. Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 14 +++++++++----- arch/x86/kvm/i8254.h | 2 -- 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 055018e833b6..37e665c5f307 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -215,10 +215,16 @@ static void pit_latch_status(struct kvm_pit *pit, int channel) } } +static inline struct kvm_pit *pit_state_to_pit(struct kvm_kpit_state *ps) +{ + return container_of(ps, struct kvm_pit, pit_state); +} + static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) { struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, irq_ack_notifier); + struct kvm_pit *pit = pit_state_to_pit(ps); atomic_set(&ps->irq_ack, 1); /* irq_ack should be set before pending is read. Order accesses with @@ -226,7 +232,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) */ smp_mb(); if (atomic_dec_if_positive(&ps->pending) > 0) - queue_kthread_work(&ps->pit->worker, &ps->pit->expired); + queue_kthread_work(&pit->worker, &pit->expired); } void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) @@ -281,7 +287,7 @@ static void pit_do_work(struct kthread_work *work) static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) { struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer); - struct kvm_pit *pt = ps->kvm->arch.vpit; + struct kvm_pit *pt = pit_state_to_pit(ps); if (ps->reinject) atomic_inc(&ps->pending); @@ -338,12 +344,11 @@ static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period) /* TODO The new value only affected after the retriggered */ hrtimer_cancel(&ps->timer); - flush_kthread_work(&ps->pit->expired); + flush_kthread_work(&pit->expired); ps->period = interval; ps->is_periodic = is_period; ps->timer.function = pit_timer_fn; - ps->kvm = pit->kvm; kvm_pit_reset_reinject(pit); @@ -696,7 +701,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) pit->kvm = kvm; pit_state = &pit->pit_state; - pit_state->pit = pit; hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); pit_state->irq_ack_notifier.gsi = 0; diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index 1945635904a7..f365dce4fb8d 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -31,8 +31,6 @@ struct kvm_kpit_state { u32 speaker_data_on; struct mutex lock; - struct kvm *kvm; - struct kvm_pit *pit; bool reinject; atomic_t pending; /* accumulated triggered timers */ atomic_t irq_ack; -- cgit v1.2.3-58-ga151 From 4a2095df8ab1671078c46d988e1b83baf42db4f6 Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:49 +0100 Subject: KVM: i8254: remove pointless dereference of PIT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PIT is known at that point. Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 37e665c5f307..964902b33eed 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -267,8 +267,8 @@ static void pit_do_work(struct kthread_work *work) if (ps->reinject && !atomic_xchg(&ps->irq_ack, 0)) return; - kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false); - kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false); + kvm_set_irq(kvm, pit->irq_source_id, 0, 1, false); + kvm_set_irq(kvm, pit->irq_source_id, 0, 0, false); /* * Provides NMI watchdog support via Virtual Wire mode. -- cgit v1.2.3-58-ga151 From 34f3941c42e22217e39f442e15401ae53a23f8cb Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:50 +0100 Subject: KVM: i8254: don't assume layout of kvm_kpit_state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit channels has offset 0 and correct size now, but that can change. Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 76f9f48898a5..60d6c0036a98 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3604,9 +3604,13 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) { - mutex_lock(&kvm->arch.vpit->pit_state.lock); - memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); - mutex_unlock(&kvm->arch.vpit->pit_state.lock); + struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; + + BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels)); + + mutex_lock(&kps->lock); + memcpy(ps, &kps->channels, sizeof(*ps)); + mutex_unlock(&kps->lock); return 0; } @@ -3616,7 +3620,7 @@ static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) struct kvm_pit *pit = kvm->arch.vpit; mutex_lock(&pit->pit_state.lock); - memcpy(&pit->pit_state, ps, sizeof(struct kvm_pit_state)); + memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); for (i = 0; i < 3; i++) kvm_pit_load_count(pit, i, ps->channels[i].count, 0); mutex_unlock(&pit->pit_state.lock); -- cgit v1.2.3-58-ga151 From ab4c14763b434d48bc7732e475ff4d5b6b9d3e3b Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:51 +0100 Subject: KVM: i8254: move PIT timer function initialization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can do it just once. Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 964902b33eed..68af4445d51d 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -348,8 +348,6 @@ static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period) ps->period = interval; ps->is_periodic = is_period; - ps->timer.function = pit_timer_fn; - kvm_pit_reset_reinject(pit); /* @@ -702,6 +700,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) pit_state = &pit->pit_state; hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + pit_state->timer.function = pit_timer_fn; pit_state->irq_ack_notifier.gsi = 0; pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; -- cgit v1.2.3-58-ga151 From a0aace5ac0efdb2bcb71e10d9c9ca6a851fa59f9 Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Wed, 2 Mar 2016 22:56:52 +0100 Subject: KVM: i8254: turn kvm_kpit_state.reinject into atomic_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Document possible races between readers and concurrent update to the ioctl. Suggested-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 8 ++++---- arch/x86/kvm/i8254.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 68af4445d51d..219ef855aae5 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -264,7 +264,7 @@ static void pit_do_work(struct kthread_work *work) int i; struct kvm_kpit_state *ps = &pit->pit_state; - if (ps->reinject && !atomic_xchg(&ps->irq_ack, 0)) + if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0)) return; kvm_set_irq(kvm, pit->irq_source_id, 0, 1, false); @@ -289,7 +289,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer); struct kvm_pit *pt = pit_state_to_pit(ps); - if (ps->reinject) + if (atomic_read(&ps->reinject)) atomic_inc(&ps->pending); queue_kthread_work(&pt->worker, &pt->expired); @@ -312,7 +312,7 @@ void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject) struct kvm_kpit_state *ps = &pit->pit_state; struct kvm *kvm = pit->kvm; - if (ps->reinject == reinject) + if (atomic_read(&ps->reinject) == reinject) return; if (reinject) { @@ -325,7 +325,7 @@ void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject) kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); } - ps->reinject = reinject; + atomic_set(&ps->reinject, reinject); } static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period) diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index f365dce4fb8d..2f5af0798326 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -31,7 +31,7 @@ struct kvm_kpit_state { u32 speaker_data_on; struct mutex lock; - bool reinject; + atomic_t reinject; atomic_t pending; /* accumulated triggered timers */ atomic_t irq_ack; struct kvm_irq_ack_notifier irq_ack_notifier; -- cgit v1.2.3-58-ga151 From 8f22372f85476bfe612136133d5883b28d163c23 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 26 Feb 2016 12:09:49 +0100 Subject: KVM: VMX: use vmcs_clear/set_bits for debug register exits Reviewed-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index aa16d5874fe6..46154dac71e6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5619,11 +5619,8 @@ static int handle_dr(struct kvm_vcpu *vcpu) } if (vcpu->guest_debug == 0) { - u32 cpu_based_vm_exec_control; - - cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); - cpu_based_vm_exec_control &= ~CPU_BASED_MOV_DR_EXITING; - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); + vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, + CPU_BASED_MOV_DR_EXITING); /* * No more DR vmexits; force a reload of the debug registers @@ -5660,8 +5657,6 @@ static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) { - u32 cpu_based_vm_exec_control; - get_debugreg(vcpu->arch.db[0], 0); get_debugreg(vcpu->arch.db[1], 1); get_debugreg(vcpu->arch.db[2], 2); @@ -5670,10 +5665,7 @@ static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; - - cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); - cpu_based_vm_exec_control |= CPU_BASED_MOV_DR_EXITING; - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); + vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING); } static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) -- cgit v1.2.3-58-ga151 From 798e88b31fbe9863163054feb8432e62e77f539c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 23 Feb 2016 15:28:51 +0100 Subject: KVM: MMU: cleanup handle_abnormal_pfn The goto and temporary variable are unnecessary, just use return statements. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 42ca0acc1c4e..bb223f8f3440 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2821,20 +2821,16 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, kvm_pfn_t pfn, unsigned access, int *ret_val) { - bool ret = true; - /* The pfn is invalid, report the error! */ if (unlikely(is_error_pfn(pfn))) { *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); - goto exit; + return true; } if (unlikely(is_noslot_pfn(pfn))) vcpu_cache_mmio_info(vcpu, gva, gfn, access); - ret = false; -exit: - return ret; + return false; } static bool page_fault_can_be_fast(u32 error_code) -- cgit v1.2.3-58-ga151 From 0a47cd85833e56574a926cad309726f4f7859544 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 23 Feb 2016 13:54:25 +0100 Subject: KVM: MMU: Fix ubsan warnings kvm_mmu_pages_init is doing some really yucky stuff. It is setting up a sentinel for mmu_page_clear_parents; however, because of a) the way levels are numbered starting from 1 and b) the way mmu_page_path sizes its arrays with PT64_ROOT_LEVEL-1 elements, the access can be out of bounds. This is harmless because the code overwrites up to the first two elements of parents->idx and these are initialized, and because the sentinel is not needed in this case---mmu_page_clear_parents exits anyway when it gets to the end of the array. However ubsan complains, and everyone else should too. This fix does three things. First it makes the mmu_page_path arrays PT64_ROOT_LEVEL elements in size, so that we can write to them without checking the level in advance. Second it disintegrates kvm_mmu_pages_init between mmu_unsync_walk (to reset the struct kvm_mmu_pages) and for_each_sp (to place the NULL sentinel at the end of the current path). This is okay because the mmu_page_path is only used in mmu_pages_clear_parents; mmu_pages_clear_parents itself is called within a for_each_sp iterator, and hence always after a call to mmu_pages_next. Third it changes mmu_pages_clear_parents to just use the sentinel to stop iteration, without checking the bounds on level. Reported-by: Sasha Levin Reported-by: Mike Krinkin Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 57 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index bb223f8f3440..609fa5322f6a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1873,6 +1873,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, static int mmu_unsync_walk(struct kvm_mmu_page *sp, struct kvm_mmu_pages *pvec) { + pvec->nr = 0; if (!sp->unsync_children) return 0; @@ -1986,13 +1987,12 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) } struct mmu_page_path { - struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1]; - unsigned int idx[PT64_ROOT_LEVEL-1]; + struct kvm_mmu_page *parent[PT64_ROOT_LEVEL]; + unsigned int idx[PT64_ROOT_LEVEL]; }; #define for_each_sp(pvec, sp, parents, i) \ - for (i = mmu_pages_next(&pvec, &parents, -1), \ - sp = pvec.page[i].sp; \ + for (i = mmu_pages_first(&pvec, &parents); \ i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ i = mmu_pages_next(&pvec, &parents, i)) @@ -2004,19 +2004,41 @@ static int mmu_pages_next(struct kvm_mmu_pages *pvec, for (n = i+1; n < pvec->nr; n++) { struct kvm_mmu_page *sp = pvec->page[n].sp; + unsigned idx = pvec->page[n].idx; + int level = sp->role.level; - if (sp->role.level == PT_PAGE_TABLE_LEVEL) { - parents->idx[0] = pvec->page[n].idx; - return n; - } + parents->idx[level-1] = idx; + if (level == PT_PAGE_TABLE_LEVEL) + break; - parents->parent[sp->role.level-2] = sp; - parents->idx[sp->role.level-1] = pvec->page[n].idx; + parents->parent[level-2] = sp; } return n; } +static int mmu_pages_first(struct kvm_mmu_pages *pvec, + struct mmu_page_path *parents) +{ + struct kvm_mmu_page *sp; + int level; + + if (pvec->nr == 0) + return 0; + + sp = pvec->page[0].sp; + level = sp->role.level; + WARN_ON(level == PT_PAGE_TABLE_LEVEL); + + parents->parent[level-2] = sp; + + /* Also set up a sentinel. Further entries in pvec are all + * children of sp, so this element is never overwritten. + */ + parents->parent[level-1] = NULL; + return mmu_pages_next(pvec, parents, 0); +} + static void mmu_pages_clear_parents(struct mmu_page_path *parents) { struct kvm_mmu_page *sp; @@ -2024,22 +2046,13 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents) do { unsigned int idx = parents->idx[level]; - sp = parents->parent[level]; if (!sp) return; clear_unsync_child_bit(sp, idx); level++; - } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children); -} - -static void kvm_mmu_pages_init(struct kvm_mmu_page *parent, - struct mmu_page_path *parents, - struct kvm_mmu_pages *pvec) -{ - parents->parent[parent->role.level-1] = NULL; - pvec->nr = 0; + } while (!sp->unsync_children); } static void mmu_sync_children(struct kvm_vcpu *vcpu, @@ -2051,7 +2064,6 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_pages pages; LIST_HEAD(invalid_list); - kvm_mmu_pages_init(parent, &parents, &pages); while (mmu_unsync_walk(parent, &pages)) { bool protected = false; @@ -2067,7 +2079,6 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, } kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); cond_resched_lock(&vcpu->kvm->mmu_lock); - kvm_mmu_pages_init(parent, &parents, &pages); } } @@ -2305,7 +2316,6 @@ static int mmu_zap_unsync_children(struct kvm *kvm, if (parent->role.level == PT_PAGE_TABLE_LEVEL) return 0; - kvm_mmu_pages_init(parent, &parents, &pages); while (mmu_unsync_walk(parent, &pages)) { struct kvm_mmu_page *sp; @@ -2314,7 +2324,6 @@ static int mmu_zap_unsync_children(struct kvm *kvm, mmu_pages_clear_parents(&parents); zapped++; } - kvm_mmu_pages_init(parent, &parents, &pages); } return zapped; -- cgit v1.2.3-58-ga151 From e23d3fef83df8e303fded0ab55b379beec0dd604 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 24 Feb 2016 09:46:06 +0100 Subject: KVM: MMU: check kvm_mmu_pages and mmu_page_path indices Give a special invalid index to the root of the walk, so that we can check the consistency of kvm_mmu_pages and mmu_page_path. Signed-off-by: Xiao Guangrong [Extracted from a bigger patch proposed by Guangrong. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 609fa5322f6a..0a4dc9b54181 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1870,6 +1870,8 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, return nr_unsync_leaf; } +#define INVALID_INDEX (-1) + static int mmu_unsync_walk(struct kvm_mmu_page *sp, struct kvm_mmu_pages *pvec) { @@ -1877,7 +1879,7 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp, if (!sp->unsync_children) return 0; - mmu_pages_add(pvec, sp, 0); + mmu_pages_add(pvec, sp, INVALID_INDEX); return __mmu_unsync_walk(sp, pvec); } @@ -2026,6 +2028,8 @@ static int mmu_pages_first(struct kvm_mmu_pages *pvec, if (pvec->nr == 0) return 0; + WARN_ON(pvec->page[0].idx != INVALID_INDEX); + sp = pvec->page[0].sp; level = sp->role.level; WARN_ON(level == PT_PAGE_TABLE_LEVEL); @@ -2050,6 +2054,7 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents) if (!sp) return; + WARN_ON(idx == INVALID_INDEX); clear_unsync_child_bit(sp, idx); level++; } while (!sp->unsync_children); -- cgit v1.2.3-58-ga151 From 0e4d44151af7c8fca3d15c27d9b97d4ac41c102b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 4 Mar 2016 09:28:41 +0100 Subject: KVM: i8254: drop local copy of mul_u64_u32_div MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A function that does the same as i8254.c's muldiv64 has been added (for KVM's own use, in fact!) in include/linux/math64.h. Use it instead of muldiv64. Reviewed-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/kvm/i8254.c | 26 +++----------------------- 1 file changed, 3 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 219ef855aae5..a4bf5b45d65a 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -51,26 +51,6 @@ #define RW_STATE_WORD0 3 #define RW_STATE_WORD1 4 -/* Compute with 96 bit intermediate result: (a*b)/c */ -static u64 muldiv64(u64 a, u32 b, u32 c) -{ - union { - u64 ll; - struct { - u32 low, high; - } l; - } u, res; - u64 rl, rh; - - u.ll = a; - rl = (u64)u.l.low * (u64)b; - rh = (u64)u.l.high * (u64)b; - rh += (rl >> 32); - res.l.high = div64_u64(rh, c); - res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); - return res.ll; -} - static void pit_set_gate(struct kvm_pit *pit, int channel, u32 val) { struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; @@ -139,7 +119,7 @@ static int pit_get_count(struct kvm_pit *pit, int channel) int counter; t = kpit_elapsed(pit, c, channel); - d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); + d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC); switch (c->mode) { case 0: @@ -166,7 +146,7 @@ static int pit_get_out(struct kvm_pit *pit, int channel) int out; t = kpit_elapsed(pit, c, channel); - d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); + d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC); switch (c->mode) { default: @@ -338,7 +318,7 @@ static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period) ps->flags & KVM_PIT_FLAGS_HPET_LEGACY) return; - interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); + interval = mul_u64_u32_div(val, NSEC_PER_SEC, KVM_PIT_FREQ); pr_debug("create pit timer, interval is %llu nsec\n", interval); -- cgit v1.2.3-58-ga151 From b8c67b7a086c9b2f96a2cf95624a9000a6be2922 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 24 Feb 2016 11:21:55 +0100 Subject: KVM: MMU: introduce kvm_mmu_flush_or_zap This is a generalization of mmu_pte_write_flush_tlb, that also takes care of calling kvm_mmu_commit_zap_page. The next patches will introduce more uses. Reviewed-by: Takuya Yoshikawa Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 0a4dc9b54181..6dae2356b9f5 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4188,11 +4188,14 @@ static bool need_remote_flush(u64 old, u64 new) return (old & ~new & PT64_PERM_MASK) != 0; } -static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page, - bool remote_flush, bool local_flush) +static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, + struct list_head *invalid_list, + bool remote_flush, bool local_flush) { - if (zap_page) + if (!list_empty(invalid_list)) { + kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list); return; + } if (remote_flush) kvm_flush_remote_tlbs(vcpu->kvm); @@ -4320,7 +4323,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, LIST_HEAD(invalid_list); u64 entry, gentry, *spte; int npte; - bool remote_flush, local_flush, zap_page; + bool remote_flush, local_flush; union kvm_mmu_page_role mask = { }; mask.cr0_wp = 1; @@ -4337,7 +4340,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) return; - zap_page = remote_flush = local_flush = false; + remote_flush = local_flush = false; pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); @@ -4357,8 +4360,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { if (detect_write_misaligned(sp, gpa, bytes) || detect_write_flooding(sp)) { - zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, - &invalid_list); + kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); ++vcpu->kvm->stat.mmu_flooded; continue; } @@ -4380,8 +4382,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ++spte; } } - mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush); - kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); + kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); spin_unlock(&vcpu->kvm->mmu_lock); } -- cgit v1.2.3-58-ga151 From 35a70510ee13b0ab130968ed399eeeb5106bc12e Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 24 Feb 2016 10:03:27 +0100 Subject: KVM: MMU: move TLB flush out of __kvm_sync_page By doing this, kvm_sync_pages can use __kvm_sync_page instead of reinventing it. Because of kvm_mmu_flush_or_zap, the code does not end up being more complex than before, and more cleanups to kvm_sync_pages will come in the next patches. Reviewed-by: Takuya Yoshikawa Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 53 ++++++++++++++++++++++++----------------------------- 1 file changed, 24 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 6dae2356b9f5..45a8a0605a09 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1932,10 +1932,24 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, return 1; } - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); return 0; } +static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, + struct list_head *invalid_list, + bool remote_flush, bool local_flush) +{ + if (!list_empty(invalid_list)) { + kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list); + return; + } + + if (remote_flush) + kvm_flush_remote_tlbs(vcpu->kvm); + else if (local_flush) + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); +} + static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { @@ -1943,8 +1957,7 @@ static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, int ret; ret = __kvm_sync_page(vcpu, sp, &invalid_list, false); - if (ret) - kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, !ret); return ret; } @@ -1975,17 +1988,11 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); kvm_unlink_unsync_page(vcpu->kvm, s); - if ((s->role.cr4_pae != !!is_pae(vcpu)) || - (vcpu->arch.mmu.sync_page(vcpu, s))) { - kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); - continue; - } - flush = true; + if (!__kvm_sync_page(vcpu, s, &invalid_list, false)) + flush = true; } - kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); - if (flush) - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); } struct mmu_page_path { @@ -2071,6 +2078,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, while (mmu_unsync_walk(parent, &pages)) { bool protected = false; + bool flush = false; for_each_sp(pages, sp, parents, i) protected |= rmap_write_protect(vcpu, sp->gfn); @@ -2079,10 +2087,12 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, kvm_flush_remote_tlbs(vcpu->kvm); for_each_sp(pages, sp, parents, i) { - kvm_sync_page(vcpu, sp, &invalid_list); + if (!kvm_sync_page(vcpu, sp, &invalid_list)) + flush = true; + mmu_pages_clear_parents(&parents); } - kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); cond_resched_lock(&vcpu->kvm->mmu_lock); } } @@ -4188,21 +4198,6 @@ static bool need_remote_flush(u64 old, u64 new) return (old & ~new & PT64_PERM_MASK) != 0; } -static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, - struct list_head *invalid_list, - bool remote_flush, bool local_flush) -{ - if (!list_empty(invalid_list)) { - kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list); - return; - } - - if (remote_flush) - kvm_flush_remote_tlbs(vcpu->kvm); - else if (local_flush) - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); -} - static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, const u8 *new, int *bytes) { -- cgit v1.2.3-58-ga151 From df748f864a342375aaa52a7e043fae7142376d01 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 24 Feb 2016 10:19:30 +0100 Subject: KVM: MMU: use kvm_sync_page in kvm_sync_pages If the last argument is true, kvm_unlink_unsync_page is called anyway in __kvm_sync_page (either by kvm_mmu_prepare_zap_page or by __kvm_sync_page itself). Therefore, kvm_sync_pages can just call kvm_sync_page, instead of going through kvm_unlink_unsync_page+__kvm_sync_page. Reviewed-by: Takuya Yoshikawa Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 45a8a0605a09..56be33714036 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1987,8 +1987,7 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) continue; WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); - kvm_unlink_unsync_page(vcpu->kvm, s); - if (!__kvm_sync_page(vcpu, s, &invalid_list, false)) + if (!kvm_sync_page(vcpu, s, &invalid_list)) flush = true; } -- cgit v1.2.3-58-ga151 From 9a43c5d9c3f13a2fc3864570e33438347319b584 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 24 Feb 2016 10:28:01 +0100 Subject: KVM: MMU: cleanup __kvm_sync_page and its callers Calling kvm_unlink_unsync_page in the middle of __kvm_sync_page makes things unnecessarily tricky. If kvm_mmu_prepare_zap_page is called, it will call kvm_unlink_unsync_page too. So kvm_unlink_unsync_page can be called just as well at the beginning or the end of __kvm_sync_page... which means that we might do it in kvm_sync_page too and remove the parameter. kvm_sync_page ends up being the same code that kvm_sync_pages used to have before the previous patch. Reviewed-by: Takuya Yoshikawa Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 56be33714036..88a1a79c869e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1917,16 +1917,13 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, /* @sp->gfn should be write-protected at the call site */ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - struct list_head *invalid_list, bool clear_unsync) + struct list_head *invalid_list) { if (sp->role.cr4_pae != !!is_pae(vcpu)) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); return 1; } - if (clear_unsync) - kvm_unlink_unsync_page(vcpu->kvm, sp); - if (vcpu->arch.mmu.sync_page(vcpu, sp)) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); return 1; @@ -1956,7 +1953,7 @@ static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, LIST_HEAD(invalid_list); int ret; - ret = __kvm_sync_page(vcpu, sp, &invalid_list, false); + ret = __kvm_sync_page(vcpu, sp, &invalid_list); kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, !ret); return ret; @@ -1972,7 +1969,8 @@ static void mmu_audit_disable(void) { } static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list) { - return __kvm_sync_page(vcpu, sp, invalid_list, true); + kvm_unlink_unsync_page(vcpu->kvm, sp); + return __kvm_sync_page(vcpu, sp, invalid_list); } /* @gfn should be write-protected at the call site */ -- cgit v1.2.3-58-ga151 From 1f50f1b3a4630966fb3cd1f56892acb03580bd37 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 24 Feb 2016 11:07:14 +0100 Subject: KVM: MMU: invert return value of mmu.sync_page and *kvm_sync_page* Return true if the page was synced (and the TLB must be flushed) and false if the page was zapped. Reviewed-by: Takuya Yoshikawa Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 31 ++++++++++++++----------------- arch/x86/kvm/paging_tmpl.h | 4 ++-- 2 files changed, 16 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 88a1a79c869e..1c87102efb3d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1784,7 +1784,7 @@ static void mark_unsync(u64 *spte) static int nonpaging_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { - return 1; + return 0; } static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) @@ -1916,20 +1916,20 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, if ((_sp)->role.direct || (_sp)->role.invalid) {} else /* @sp->gfn should be write-protected at the call site */ -static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - struct list_head *invalid_list) +static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + struct list_head *invalid_list) { if (sp->role.cr4_pae != !!is_pae(vcpu)) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); - return 1; + return false; } - if (vcpu->arch.mmu.sync_page(vcpu, sp)) { + if (vcpu->arch.mmu.sync_page(vcpu, sp) == 0) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); - return 1; + return false; } - return 0; + return true; } static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, @@ -1947,14 +1947,14 @@ static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } -static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp) +static bool kvm_sync_page_transient(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp) { LIST_HEAD(invalid_list); int ret; ret = __kvm_sync_page(vcpu, sp, &invalid_list); - kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, !ret); + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, ret); return ret; } @@ -1966,7 +1966,7 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } static void mmu_audit_disable(void) { } #endif -static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, +static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list) { kvm_unlink_unsync_page(vcpu->kvm, sp); @@ -1985,8 +1985,7 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) continue; WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); - if (!kvm_sync_page(vcpu, s, &invalid_list)) - flush = true; + flush |= kvm_sync_page(vcpu, s, &invalid_list); } kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); @@ -2084,9 +2083,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, kvm_flush_remote_tlbs(vcpu->kvm); for_each_sp(pages, sp, parents, i) { - if (!kvm_sync_page(vcpu, sp, &invalid_list)) - flush = true; - + flush |= kvm_sync_page(vcpu, sp, &invalid_list); mmu_pages_clear_parents(&parents); } kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); @@ -2145,7 +2142,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, if (sp->role.word != role.word) continue; - if (sp->unsync && kvm_sync_page_transient(vcpu, sp)) + if (sp->unsync && !kvm_sync_page_transient(vcpu, sp)) break; if (sp->unsync_children) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 4174cf290fa3..a1f5459edcec 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -943,7 +943,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, sizeof(pt_element_t))) - return -EINVAL; + return 0; if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { vcpu->kvm->tlbs_dirty++; @@ -975,7 +975,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) host_writable); } - return !nr_present; + return nr_present; } #undef pt_element_t -- cgit v1.2.3-58-ga151 From 2a74003ae818cfaf129ed4df89499a1c2b796129 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 24 Feb 2016 11:26:10 +0100 Subject: KVM: MMU: move zap/flush to kvm_mmu_get_page kvm_mmu_get_page is the only caller of kvm_sync_page_transient and kvm_sync_pages. Moving the handling of the invalid_list there removes the need for the underdocumented kvm_sync_page_transient function. Reviewed-by: Takuya Yoshikawa Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1c87102efb3d..fecc9c51d924 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1947,18 +1947,6 @@ static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } -static bool kvm_sync_page_transient(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp) -{ - LIST_HEAD(invalid_list); - int ret; - - ret = __kvm_sync_page(vcpu, sp, &invalid_list); - kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, ret); - - return ret; -} - #ifdef CONFIG_KVM_MMU_AUDIT #include "mmu_audit.c" #else @@ -1974,21 +1962,21 @@ static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, } /* @gfn should be write-protected at the call site */ -static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) +static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, + struct list_head *invalid_list) { struct kvm_mmu_page *s; - LIST_HEAD(invalid_list); - bool flush = false; + bool ret = false; for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { if (!s->unsync) continue; WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); - flush |= kvm_sync_page(vcpu, s, &invalid_list); + ret |= kvm_sync_page(vcpu, s, invalid_list); } - kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); + return ret; } struct mmu_page_path { @@ -2119,6 +2107,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, unsigned quadrant; struct kvm_mmu_page *sp; bool need_sync = false; + bool flush = false; + LIST_HEAD(invalid_list); role = vcpu->arch.mmu.base_role; role.level = level; @@ -2142,8 +2132,16 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, if (sp->role.word != role.word) continue; - if (sp->unsync && !kvm_sync_page_transient(vcpu, sp)) - break; + if (sp->unsync) { + /* The page is good, but __kvm_sync_page might still end + * up zapping it. If so, break in order to rebuild it. + */ + if (!__kvm_sync_page(vcpu, sp, &invalid_list)) + break; + + WARN_ON(!list_empty(&invalid_list)); + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + } if (sp->unsync_children) kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); @@ -2173,11 +2171,13 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, kvm_flush_remote_tlbs(vcpu->kvm); if (level > PT_PAGE_TABLE_LEVEL && need_sync) - kvm_sync_pages(vcpu, gfn); + flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); } sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; clear_page(sp->spt); trace_kvm_mmu_get_page(sp, true); + + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); return sp; } -- cgit v1.2.3-58-ga151 From 50c9e6f3a69dfa458ecb671bcbd11e2eea6db0c1 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 25 Feb 2016 10:47:38 +0100 Subject: KVM: MMU: coalesce more page zapping in mmu_sync_children mmu_sync_children can only process up to 16 pages at a time. Check if we need to reschedule, and do not bother zapping the pages until that happens. Reviewed-by: Takuya Yoshikawa Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index fecc9c51d924..754d2c4f6f99 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2059,24 +2059,31 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, struct mmu_page_path parents; struct kvm_mmu_pages pages; LIST_HEAD(invalid_list); + bool flush = false; while (mmu_unsync_walk(parent, &pages)) { bool protected = false; - bool flush = false; for_each_sp(pages, sp, parents, i) protected |= rmap_write_protect(vcpu, sp->gfn); - if (protected) + if (protected) { kvm_flush_remote_tlbs(vcpu->kvm); + flush = false; + } for_each_sp(pages, sp, parents, i) { flush |= kvm_sync_page(vcpu, sp, &invalid_list); mmu_pages_clear_parents(&parents); } - kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); - cond_resched_lock(&vcpu->kvm->mmu_lock); + if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) { + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); + cond_resched_lock(&vcpu->kvm->mmu_lock); + flush = false; + } } + + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); } static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) -- cgit v1.2.3-58-ga151 From 6bb69c9b69c315200ddc2bc79aee14c0184cf5b2 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 23 Feb 2016 12:51:19 +0100 Subject: KVM: MMU: simplify last_pte_bitmap Branch-free code is fun and everybody knows how much Avi loves it, but last_pte_bitmap takes it a bit to the extreme. Since the code is simply doing a range check, like (level == 1 || ((gpte & PT_PAGE_SIZE_MASK) && level < N) we can make it branch-free without storing the entire truth table; it is enough to cache N. Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 8 ++----- arch/x86/kvm/mmu.c | 50 +++++++++++++++++++++-------------------- 2 files changed, 28 insertions(+), 30 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1c3e390993a2..d110dc44d6c2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -347,12 +347,8 @@ struct kvm_mmu { struct rsvd_bits_validate guest_rsvd_check; - /* - * Bitmap: bit set = last pte in walk - * index[0:1]: level (zero-based) - * index[2]: pte.ps - */ - u8 last_pte_bitmap; + /* Can have large pages at levels 2..last_nonleaf_level-1. */ + u8 last_nonleaf_level; bool nx; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 754d2c4f6f99..2463de0b935c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3632,13 +3632,24 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, return false; } -static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) +static inline bool is_last_gpte(struct kvm_mmu *mmu, + unsigned level, unsigned gpte) { - unsigned index; + /* + * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set + * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means + * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then. + */ + gpte |= level - PT_PAGE_TABLE_LEVEL - 1; - index = level - 1; - index |= (gpte & PT_PAGE_SIZE_MASK) >> (PT_PAGE_SIZE_SHIFT - 2); - return mmu->last_pte_bitmap & (1 << index); + /* + * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. + * If it is clear, there are no large pages at this level, so clear + * PT_PAGE_SIZE_MASK in gpte if that is the case. + */ + gpte &= level - mmu->last_nonleaf_level; + + return gpte & PT_PAGE_SIZE_MASK; } #define PTTYPE_EPT 18 /* arbitrary */ @@ -3910,22 +3921,13 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, } } -static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) +static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) { - u8 map; - unsigned level, root_level = mmu->root_level; - const unsigned ps_set_index = 1 << 2; /* bit 2 of index: ps */ - - if (root_level == PT32E_ROOT_LEVEL) - --root_level; - /* PT_PAGE_TABLE_LEVEL always terminates */ - map = 1 | (1 << ps_set_index); - for (level = PT_DIRECTORY_LEVEL; level <= root_level; ++level) { - if (level <= PT_PDPE_LEVEL - && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu))) - map |= 1 << (ps_set_index | (level - 1)); - } - mmu->last_pte_bitmap = map; + unsigned root_level = mmu->root_level; + + mmu->last_nonleaf_level = root_level; + if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu)) + mmu->last_nonleaf_level++; } static void paging64_init_context_common(struct kvm_vcpu *vcpu, @@ -3937,7 +3939,7 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu, reset_rsvds_bits_mask(vcpu, context); update_permission_bitmask(vcpu, context, false); - update_last_pte_bitmap(vcpu, context); + update_last_nonleaf_level(vcpu, context); MMU_WARN_ON(!is_pae(vcpu)); context->page_fault = paging64_page_fault; @@ -3964,7 +3966,7 @@ static void paging32_init_context(struct kvm_vcpu *vcpu, reset_rsvds_bits_mask(vcpu, context); update_permission_bitmask(vcpu, context, false); - update_last_pte_bitmap(vcpu, context); + update_last_nonleaf_level(vcpu, context); context->page_fault = paging32_page_fault; context->gva_to_gpa = paging32_gva_to_gpa; @@ -4022,7 +4024,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) } update_permission_bitmask(vcpu, context, false); - update_last_pte_bitmap(vcpu, context); + update_last_nonleaf_level(vcpu, context); reset_tdp_shadow_zero_bits_mask(vcpu, context); } @@ -4128,7 +4130,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) } update_permission_bitmask(vcpu, g_context, false); - update_last_pte_bitmap(vcpu, g_context); + update_last_nonleaf_level(vcpu, g_context); } static void init_kvm_mmu(struct kvm_vcpu *vcpu) -- cgit v1.2.3-58-ga151 From bb9eadf0c35f2e7eb5ca6468f46ebb7473b85537 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 23 Feb 2016 14:19:20 +0100 Subject: KVM: MMU: micro-optimize gpte_access Avoid AND-NOT, most x86 processor lack an instruction for it. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/paging_tmpl.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index a1f5459edcec..6013f3685ef4 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -189,8 +189,11 @@ static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) | ACC_USER_MASK; #else - access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; - access &= ~(gpte >> PT64_NX_SHIFT); + BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK); + BUILD_BUG_ON(ACC_EXEC_MASK != 1); + access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK); + /* Combine NX with P (which is set here) to get ACC_EXEC_MASK. */ + access ^= (gpte >> PT64_NX_SHIFT); #endif return access; -- cgit v1.2.3-58-ga151 From dce382b6c72717b610741340f05e959764b851b8 Mon Sep 17 00:00:00 2001 From: Alexander Yarygin Date: Fri, 19 Feb 2016 12:21:26 +0300 Subject: KVM: s390: Add diag "watchdog functions" to trace event decoding DIAG 0x288 may occur now. Let's add its code to the diag table in sie.h. Signed-off-by: Alexander Yarygin Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/sie.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h index ee69c0854c88..5dbaa72baa64 100644 --- a/arch/s390/include/uapi/asm/sie.h +++ b/arch/s390/include/uapi/asm/sie.h @@ -7,6 +7,7 @@ { 0x9c, "DIAG (0x9c) time slice end directed" }, \ { 0x204, "DIAG (0x204) logical-cpu utilization" }, \ { 0x258, "DIAG (0x258) page-reference services" }, \ + { 0x288, "DIAG (0x288) watchdog functions" }, \ { 0x308, "DIAG (0x308) ipl functions" }, \ { 0x500, "DIAG (0x500) KVM virtio functions" }, \ { 0x501, "DIAG (0x501) KVM breakpoint" } -- cgit v1.2.3-58-ga151 From 01a745ac8b6c5d323a37194c242f7c77f3402469 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 12 Feb 2016 20:41:56 +0100 Subject: KVM: s390: store cpu id in vcpu->cpu when scheduled in By storing the cpu id, we have a way to verify if the current cpu is owning a VCPU. Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 28bd5ea1b08f..bd5edb138479 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1449,10 +1449,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) restore_access_regs(vcpu->run->s.regs.acrs); gmap_enable(vcpu->arch.gmap); atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); + vcpu->cpu = cpu; } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { + vcpu->cpu = -1; atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); -- cgit v1.2.3-58-ga151 From 4287f247f6cfaea0ed73b5104e94cd737e1ac0ae Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 15 Feb 2016 09:40:12 +0100 Subject: KVM: s390: abstract access to the VCPU cpu timer We want to manually step the cpu timer in certain scenarios in the future. Let's abstract any access to the cpu timer, so we can hide the complexity internally. Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 5 +++-- arch/s390/kvm/kvm-s390.c | 31 +++++++++++++++++++++++-------- arch/s390/kvm/kvm-s390.h | 2 ++ 3 files changed, 28 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 87e2d1a89d74..4604e9accc65 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -182,8 +182,9 @@ static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu) static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) { - return (vcpu->arch.sie_block->cputm >> 63) && - cpu_timer_interrupts_enabled(vcpu); + if (!cpu_timer_interrupts_enabled(vcpu)) + return 0; + return kvm_s390_get_cpu_timer(vcpu) >> 63; } static inline int is_ioirq(unsigned long irq_type) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index bd5edb138479..2118a2250ac7 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1429,6 +1429,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) return 0; } +/* set the cpu timer - may only be called from the VCPU thread itself */ +void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm) +{ + vcpu->arch.sie_block->cputm = cputm; +} + +/* get the cpu timer - can also be called from other VCPU threads */ +__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.sie_block->cputm; +} + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Save host register state */ @@ -1476,7 +1488,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->gpsw.mask = 0UL; vcpu->arch.sie_block->gpsw.addr = 0UL; kvm_s390_set_prefix(vcpu, 0); - vcpu->arch.sie_block->cputm = 0UL; + kvm_s390_set_cpu_timer(vcpu, 0); vcpu->arch.sie_block->ckc = 0UL; vcpu->arch.sie_block->todpr = 0; memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); @@ -1723,7 +1735,7 @@ static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, (u64 __user *)reg->addr); break; case KVM_REG_S390_CPU_TIMER: - r = put_user(vcpu->arch.sie_block->cputm, + r = put_user(kvm_s390_get_cpu_timer(vcpu), (u64 __user *)reg->addr); break; case KVM_REG_S390_CLOCK_COMP: @@ -1761,6 +1773,7 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) { int r = -EINVAL; + __u64 val; switch (reg->id) { case KVM_REG_S390_TODPR: @@ -1772,8 +1785,9 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, (u64 __user *)reg->addr); break; case KVM_REG_S390_CPU_TIMER: - r = get_user(vcpu->arch.sie_block->cputm, - (u64 __user *)reg->addr); + r = get_user(val, (u64 __user *)reg->addr); + if (!r) + kvm_s390_set_cpu_timer(vcpu, val); break; case KVM_REG_S390_CLOCK_COMP: r = get_user(vcpu->arch.sie_block->ckc, @@ -2290,7 +2304,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { - vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; + kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; @@ -2312,7 +2326,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); - kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; + kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; @@ -2383,7 +2397,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) unsigned char archmode = 1; freg_t fprs[NUM_FPRS]; unsigned int px; - u64 clkcomp; + u64 clkcomp, cputm; int rc; px = kvm_s390_get_prefix(vcpu); @@ -2417,8 +2431,9 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) &vcpu->run->s.regs.fpc, 4); rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA, &vcpu->arch.sie_block->todpr, 4); + cputm = kvm_s390_get_cpu_timer(vcpu); rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA, - &vcpu->arch.sie_block->cputm, 8); + &cputm, 8); clkcomp = vcpu->arch.sie_block->ckc >> 8; rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA, &clkcomp, 8); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 1c756c7dd0c2..9787299d9a29 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -263,6 +263,8 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); unsigned long kvm_s390_fac_list_mask_size(void); extern unsigned long kvm_s390_fac_list_mask[]; +void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm); +__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu); /* implemented in diag.c */ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); -- cgit v1.2.3-58-ga151 From db0758b29709815d93a963e31e2ec87ecf74f8bd Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 15 Feb 2016 09:42:25 +0100 Subject: KVM: s390: step VCPU cpu timer during kvm_run ioctl Architecturally we should only provide steal time if we are scheduled away, and not if the host interprets a guest exit. We have to step the guest CPU timer in these cases. In the first shot, we will step the VCPU timer only during the kvm_run ioctl. Therefore all time spent e.g. in interception handlers or on irq delivery will be accounted for that VCPU. We have to take care of a few special cases: - Other VCPUs can test for pending irqs. We can only report a consistent value for the VCPU thread itself when adding the delta. - We have to take care of STP sync, therefore we have to extend kvm_clock_sync() and disable preemption accordingly - During any call to disable/enable/start/stop we could get premeempted and therefore get start/stop calls. Therefore we have to make sure we don't get into an inconsistent state. Whenever a VCPU is scheduled out, sleeping, in user space or just about to enter the SIE, the guest cpu timer isn't stepped. Please note that all primitives are prepared to be called from both environments (cpu timer accounting enabled or not), although not completely used in this patch yet (e.g. kvm_s390_set_cpu_timer() will never be called while cpu timer accounting is enabled). Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 2 ++ arch/s390/kvm/kvm-s390.c | 76 ++++++++++++++++++++++++++++++++++++++-- 2 files changed, 76 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 727e7f7b33fd..91796dd2a8ec 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -552,6 +552,8 @@ struct kvm_vcpu_arch { unsigned long pfault_token; unsigned long pfault_select; unsigned long pfault_compare; + bool cputm_enabled; + __u64 cputm_start; }; struct kvm_vm_stat { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2118a2250ac7..76b99149dc65 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -158,6 +158,8 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val, kvm->arch.epoch -= *delta; kvm_for_each_vcpu(i, vcpu, kvm) { vcpu->arch.sie_block->epoch -= *delta; + if (vcpu->arch.cputm_enabled) + vcpu->arch.cputm_start += *delta; } } return NOTIFY_OK; @@ -1429,16 +1431,78 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) return 0; } +/* needs disabled preemption to protect from TOD sync and vcpu_load/put */ +static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu) +{ + WARN_ON_ONCE(vcpu->arch.cputm_start != 0); + vcpu->arch.cputm_start = get_tod_clock_fast(); +} + +/* needs disabled preemption to protect from TOD sync and vcpu_load/put */ +static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu) +{ + WARN_ON_ONCE(vcpu->arch.cputm_start == 0); + vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; + vcpu->arch.cputm_start = 0; +} + +/* needs disabled preemption to protect from TOD sync and vcpu_load/put */ +static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu) +{ + WARN_ON_ONCE(vcpu->arch.cputm_enabled); + vcpu->arch.cputm_enabled = true; + __start_cpu_timer_accounting(vcpu); +} + +/* needs disabled preemption to protect from TOD sync and vcpu_load/put */ +static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu) +{ + WARN_ON_ONCE(!vcpu->arch.cputm_enabled); + __stop_cpu_timer_accounting(vcpu); + vcpu->arch.cputm_enabled = false; +} + +static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu) +{ + preempt_disable(); /* protect from TOD sync and vcpu_load/put */ + __enable_cpu_timer_accounting(vcpu); + preempt_enable(); +} + +static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu) +{ + preempt_disable(); /* protect from TOD sync and vcpu_load/put */ + __disable_cpu_timer_accounting(vcpu); + preempt_enable(); +} + /* set the cpu timer - may only be called from the VCPU thread itself */ void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm) { + preempt_disable(); /* protect from TOD sync and vcpu_load/put */ + if (vcpu->arch.cputm_enabled) + vcpu->arch.cputm_start = get_tod_clock_fast(); vcpu->arch.sie_block->cputm = cputm; + preempt_enable(); } -/* get the cpu timer - can also be called from other VCPU threads */ +/* update and get the cpu timer - can also be called from other VCPU threads */ __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu) { - return vcpu->arch.sie_block->cputm; + __u64 value; + int me; + + if (unlikely(!vcpu->arch.cputm_enabled)) + return vcpu->arch.sie_block->cputm; + + me = get_cpu(); /* also protects from TOD sync and vcpu_load/put */ + value = vcpu->arch.sie_block->cputm; + if (likely(me == vcpu->cpu)) { + /* the VCPU itself will always read consistent values */ + value -= get_tod_clock_fast() - vcpu->arch.cputm_start; + } + put_cpu(); + return value; } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) @@ -1461,12 +1525,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) restore_access_regs(vcpu->run->s.regs.acrs); gmap_enable(vcpu->arch.gmap); atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); + if (vcpu->arch.cputm_enabled) + __start_cpu_timer_accounting(vcpu); vcpu->cpu = cpu; } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { vcpu->cpu = -1; + if (vcpu->arch.cputm_enabled) + __stop_cpu_timer_accounting(vcpu); atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); @@ -2277,10 +2345,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) */ local_irq_disable(); __kvm_guest_enter(); + __disable_cpu_timer_accounting(vcpu); local_irq_enable(); exit_reason = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); local_irq_disable(); + __enable_cpu_timer_accounting(vcpu); __kvm_guest_exit(); local_irq_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); @@ -2358,6 +2428,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } sync_regs(vcpu, kvm_run); + enable_cpu_timer_accounting(vcpu); might_fault(); rc = __vcpu_run(vcpu); @@ -2377,6 +2448,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) rc = 0; } + disable_cpu_timer_accounting(vcpu); store_regs(vcpu, kvm_run); if (vcpu->sigset_active) -- cgit v1.2.3-58-ga151 From 9c23a1318eb12fcf76d9f663d2c3d88598e62a55 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 17 Feb 2016 21:53:33 +0100 Subject: KVM: s390: protect VCPU cpu timer with a seqcount For now, only the owning VCPU thread (that has loaded the VCPU) can get a consistent cpu timer value when calculating the delta. However, other threads might also be interested in a more recent, consistent value. Of special interest will be the timer callback of a VCPU that executes without having the VCPU loaded and could run in parallel with the VCPU thread. The cpu timer has a nice property: it is only updated by the owning VCPU thread. And speaking about accounting, a consistent value can only be calculated by looking at cputm_start and the cpu timer itself in one shot, otherwise the result might be wrong. As we only have one writing thread at a time (owning VCPU thread), we can use a seqcount instead of a seqlock and retry if the VCPU refreshed its cpu timer. This avoids any heavy locking and only introduces a counter update/check plus a handful of smp_wmb(). The owning VCPU thread should never have to retry on reads, and also for other threads this might be a very rare scenario. Please note that we have to use the raw_* variants for locking the seqcount as lockdep will produce false warnings otherwise. The rq->lock held during vcpu_load/put is also acquired from hardirq context. Lockdep cannot know that we avoid potential deadlocks by disabling preemption and thereby disable concurrent write locking attempts (via vcpu_put/load). Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 8 ++++++++ arch/s390/kvm/kvm-s390.c | 30 ++++++++++++++++++++++-------- 2 files changed, 30 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 91796dd2a8ec..d61e64555938 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -553,6 +554,13 @@ struct kvm_vcpu_arch { unsigned long pfault_select; unsigned long pfault_compare; bool cputm_enabled; + /* + * The seqcount protects updates to cputm_start and sie_block.cputm, + * this way we can have non-blocking reads with consistent values. + * Only the owning VCPU thread (vcpu->cpu) is allowed to change these + * values and to start/stop/enable/disable cpu timer accounting. + */ + seqcount_t cputm_seqcount; __u64 cputm_start; }; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 76b99149dc65..38223c4603c7 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1435,15 +1435,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(vcpu->arch.cputm_start != 0); + raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); vcpu->arch.cputm_start = get_tod_clock_fast(); + raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); } /* needs disabled preemption to protect from TOD sync and vcpu_load/put */ static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(vcpu->arch.cputm_start == 0); + raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; vcpu->arch.cputm_start = 0; + raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); } /* needs disabled preemption to protect from TOD sync and vcpu_load/put */ @@ -1480,28 +1484,37 @@ static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu) void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm) { preempt_disable(); /* protect from TOD sync and vcpu_load/put */ + raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); if (vcpu->arch.cputm_enabled) vcpu->arch.cputm_start = get_tod_clock_fast(); vcpu->arch.sie_block->cputm = cputm; + raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); preempt_enable(); } /* update and get the cpu timer - can also be called from other VCPU threads */ __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu) { + unsigned int seq; __u64 value; - int me; if (unlikely(!vcpu->arch.cputm_enabled)) return vcpu->arch.sie_block->cputm; - me = get_cpu(); /* also protects from TOD sync and vcpu_load/put */ - value = vcpu->arch.sie_block->cputm; - if (likely(me == vcpu->cpu)) { - /* the VCPU itself will always read consistent values */ - value -= get_tod_clock_fast() - vcpu->arch.cputm_start; - } - put_cpu(); + preempt_disable(); /* protect from TOD sync and vcpu_load/put */ + do { + seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); + /* + * If the writer would ever execute a read in the critical + * section, e.g. in irq context, we have a deadlock. + */ + WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu); + value = vcpu->arch.sie_block->cputm; + /* if cputm_start is 0, accounting is being started/stopped */ + if (likely(vcpu->arch.cputm_start)) + value -= get_tod_clock_fast() - vcpu->arch.cputm_start; + } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); + preempt_enable(); return value; } @@ -1704,6 +1717,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.local_int.float_int = &kvm->arch.float_int; vcpu->arch.local_int.wq = &vcpu->wq; vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; + seqcount_init(&vcpu->arch.cputm_seqcount); rc = kvm_vcpu_init(vcpu, kvm, id); if (rc) -- cgit v1.2.3-58-ga151 From 5ebda31686af6bb70affdcc5777ebc7ed81c0eac Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 22 Feb 2016 13:52:27 +0100 Subject: KVM: s390: step the VCPU timer while in enabled wait The cpu timer is a mean to measure task execution time. We want to account everything for a VCPU for which it is responsible. Therefore, if the VCPU wants to sleep, it shall be accounted for it. We can easily get this done by not disabling cpu timer accounting when scheduled out while sleeping because of enabled wait. Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 4 ++-- arch/s390/kvm/kvm-s390.h | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 38223c4603c7..b54daed49c2c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1538,7 +1538,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) restore_access_regs(vcpu->run->s.regs.acrs); gmap_enable(vcpu->arch.gmap); atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); - if (vcpu->arch.cputm_enabled) + if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) __start_cpu_timer_accounting(vcpu); vcpu->cpu = cpu; } @@ -1546,7 +1546,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { vcpu->cpu = -1; - if (vcpu->arch.cputm_enabled) + if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) __stop_cpu_timer_accounting(vcpu); atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 9787299d9a29..b1f7ee3bd72d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -54,6 +54,11 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; } +static inline int is_vcpu_idle(struct kvm_vcpu *vcpu) +{ + return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_WAIT; +} + static inline int kvm_is_ucontrol(struct kvm *kvm) { #ifdef CONFIG_KVM_S390_UCONTROL -- cgit v1.2.3-58-ga151 From b3c17f10fa2cfc29cf35e4821275e046e725213e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 22 Feb 2016 14:14:50 +0100 Subject: KVM: s390: wake up when the VCPU cpu timer expires When the VCPU cpu timer expires, we have to wake up just like when the ckc triggers. For now, setting up a cpu timer in the guest and going into enabled wait will never lead to a wakeup. This patch fixes this problem. Just as for the ckc, we have to take care of waking up too early. We have to recalculate the sleep time and go back to sleep. Please note that the timer callback calls kvm_s390_get_cpu_timer() from interrupt context. As the timer is canceled when leaving handle_wait(), and we don't do any VCPU cpu timer writes/updates in that function, we can be sure that we will never try to read the VCPU cpu timer from the same cpu that is currentyl updating the timer (deadlock). Reported-by: Sascha Silbe Tested-by: Sascha Silbe Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 48 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 35 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 4604e9accc65..ef84a803433e 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -909,9 +909,35 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu); } +static u64 __calculate_sltime(struct kvm_vcpu *vcpu) +{ + u64 now, cputm, sltime = 0; + + if (ckc_interrupts_enabled(vcpu)) { + now = kvm_s390_get_tod_clock_fast(vcpu->kvm); + sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); + /* already expired or overflow? */ + if (!sltime || vcpu->arch.sie_block->ckc <= now) + return 0; + if (cpu_timer_interrupts_enabled(vcpu)) { + cputm = kvm_s390_get_cpu_timer(vcpu); + /* already expired? */ + if (cputm >> 63) + return 0; + return min(sltime, tod_to_ns(cputm)); + } + } else if (cpu_timer_interrupts_enabled(vcpu)) { + sltime = kvm_s390_get_cpu_timer(vcpu); + /* already expired? */ + if (sltime >> 63) + return 0; + } + return sltime; +} + int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) { - u64 now, sltime; + u64 sltime; vcpu->stat.exit_wait_state++; @@ -924,22 +950,20 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; /* disabled wait */ } - if (!ckc_interrupts_enabled(vcpu)) { + if (!ckc_interrupts_enabled(vcpu) && + !cpu_timer_interrupts_enabled(vcpu)) { VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); __set_cpu_idle(vcpu); goto no_timer; } - now = kvm_s390_get_tod_clock_fast(vcpu->kvm); - sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); - - /* underflow */ - if (vcpu->arch.sie_block->ckc < now) + sltime = __calculate_sltime(vcpu); + if (!sltime) return 0; __set_cpu_idle(vcpu); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); - VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime); + VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime); no_timer: srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); @@ -966,18 +990,16 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) { struct kvm_vcpu *vcpu; - u64 now, sltime; + u64 sltime; vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); - now = kvm_s390_get_tod_clock_fast(vcpu->kvm); - sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); + sltime = __calculate_sltime(vcpu); /* * If the monotonic clock runs faster than the tod clock we might be * woken up too early and have to go back to sleep to avoid deadlocks. */ - if (vcpu->arch.sie_block->ckc > now && - hrtimer_forward_now(timer, ns_to_ktime(sltime))) + if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime))) return HRTIMER_RESTART; kvm_s390_vcpu_wakeup(vcpu); return HRTIMER_NORESTART; -- cgit v1.2.3-58-ga151 From 80bc79dc0b18b17510ceb1e2d2d1999104af03c9 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 2 Dec 2015 09:43:29 +0100 Subject: KVM: s390: enable STFLE interpretation only if enabled for the guest Not setting the facility list designation disables STFLE interpretation, this is what we want if the guest was told to not have it. Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b54daed49c2c..b6a065403bdc 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1639,7 +1639,8 @@ static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) vcpu->arch.cpu_id = model->cpu_id; vcpu->arch.sie_block->ibc = model->ibc; - vcpu->arch.sie_block->fac = (int) (long) model->fac->list; + if (test_kvm_facility(vcpu->kvm, 7)) + vcpu->arch.sie_block->fac = (int) (long) model->fac->list; } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) -- cgit v1.2.3-58-ga151 From c54f0d6ae057444453f5167e66ed999e8cf26936 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 2 Dec 2015 08:53:52 +0100 Subject: KVM: s390: allocate only one DMA page per VM We can fit the 2k for the STFLE interpretation and the crypto control block into one DMA page. As we now only have to allocate one DMA page, we can clean up the code a bit. As a nice side effect, this also fixes a problem with crycbd alignment in case special allocation debug options are enabled, debugged by Sascha Silbe. Acked-by: Christian Borntraeger Reviewed-by: Dominik Dingel Acked-by: Cornelia Huck Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 23 +++++++++------ arch/s390/kvm/kvm-s390.c | 60 +++++++++++++++------------------------- arch/s390/kvm/kvm-s390.h | 4 +-- arch/s390/kvm/priv.c | 2 +- 4 files changed, 41 insertions(+), 48 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index d61e64555938..3c254952d3a7 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -600,15 +600,11 @@ struct s390_io_adapter { #define S390_ARCH_FAC_MASK_SIZE_U64 \ (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) -struct kvm_s390_fac { - /* facility list requested by guest */ - __u64 list[S390_ARCH_FAC_LIST_SIZE_U64]; - /* facility mask supported by kvm & hosting machine */ - __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64]; -}; - struct kvm_s390_cpu_model { - struct kvm_s390_fac *fac; + /* facility mask supported by kvm & hosting machine */ + __u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64]; + /* facility list requested by guest (in dma page) */ + __u64 *fac_list; struct cpuid cpu_id; unsigned short ibc; }; @@ -627,6 +623,16 @@ struct kvm_s390_crypto_cb { __u8 reserved80[128]; /* 0x0080 */ }; +/* + * sie_page2 has to be allocated as DMA because fac_list and crycb need + * 31bit addresses in the sie control block. + */ +struct sie_page2 { + __u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64]; /* 0x0000 */ + struct kvm_s390_crypto_cb crycb; /* 0x0800 */ + u8 reserved900[0x1000 - 0x900]; /* 0x0900 */ +} __packed; + struct kvm_arch{ void *sca; int use_esca; @@ -647,6 +653,7 @@ struct kvm_arch{ int ipte_lock_count; struct mutex ipte_mutex; spinlock_t start_stop_lock; + struct sie_page2 *sie_page2; struct kvm_s390_cpu_model model; struct kvm_s390_crypto crypto; u64 epoch; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b6a065403bdc..c186d55b87ac 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -355,8 +355,8 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) if (atomic_read(&kvm->online_vcpus)) { r = -EBUSY; } else if (MACHINE_HAS_VX) { - set_kvm_facility(kvm->arch.model.fac->mask, 129); - set_kvm_facility(kvm->arch.model.fac->list, 129); + set_kvm_facility(kvm->arch.model.fac_mask, 129); + set_kvm_facility(kvm->arch.model.fac_list, 129); r = 0; } else r = -EINVAL; @@ -370,8 +370,8 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) if (atomic_read(&kvm->online_vcpus)) { r = -EBUSY; } else if (test_facility(64)) { - set_kvm_facility(kvm->arch.model.fac->mask, 64); - set_kvm_facility(kvm->arch.model.fac->list, 64); + set_kvm_facility(kvm->arch.model.fac_mask, 64); + set_kvm_facility(kvm->arch.model.fac_list, 64); r = 0; } mutex_unlock(&kvm->lock); @@ -654,7 +654,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, sizeof(struct cpuid)); kvm->arch.model.ibc = proc->ibc; - memcpy(kvm->arch.model.fac->list, proc->fac_list, + memcpy(kvm->arch.model.fac_list, proc->fac_list, S390_ARCH_FAC_LIST_SIZE_BYTE); } else ret = -EFAULT; @@ -688,7 +688,8 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) } memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); proc->ibc = kvm->arch.model.ibc; - memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); + memcpy(&proc->fac_list, kvm->arch.model.fac_list, + S390_ARCH_FAC_LIST_SIZE_BYTE); if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) ret = -EFAULT; kfree(proc); @@ -708,7 +709,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) } get_cpu_id((struct cpuid *) &mach->cpuid); mach->ibc = sclp.ibc; - memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, + memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, S390_ARCH_FAC_LIST_SIZE_BYTE); memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, S390_ARCH_FAC_LIST_SIZE_BYTE); @@ -1085,16 +1086,12 @@ static void kvm_s390_get_cpu_id(struct cpuid *cpu_id) cpu_id->version = 0xff; } -static int kvm_s390_crypto_init(struct kvm *kvm) +static void kvm_s390_crypto_init(struct kvm *kvm) { if (!test_kvm_facility(kvm, 76)) - return 0; - - kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), - GFP_KERNEL | GFP_DMA); - if (!kvm->arch.crypto.crycb) - return -ENOMEM; + return; + kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; kvm_s390_set_crycb_format(kvm); /* Enable AES/DEA protected key functions by default */ @@ -1104,8 +1101,6 @@ static int kvm_s390_crypto_init(struct kvm *kvm) sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); - - return 0; } static void sca_dispose(struct kvm *kvm) @@ -1159,37 +1154,30 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (!kvm->arch.dbf) goto out_err; - /* - * The architectural maximum amount of facilities is 16 kbit. To store - * this amount, 2 kbyte of memory is required. Thus we need a full - * page to hold the guest facility list (arch.model.fac->list) and the - * facility mask (arch.model.fac->mask). Its address size has to be - * 31 bits and word aligned. - */ - kvm->arch.model.fac = - (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!kvm->arch.model.fac) + kvm->arch.sie_page2 = + (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!kvm->arch.sie_page2) goto out_err; /* Populate the facility mask initially. */ - memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, + memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list, S390_ARCH_FAC_LIST_SIZE_BYTE); for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { if (i < kvm_s390_fac_list_mask_size()) - kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; + kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i]; else - kvm->arch.model.fac->mask[i] = 0UL; + kvm->arch.model.fac_mask[i] = 0UL; } /* Populate the facility list initially. */ - memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, + kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; + memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask, S390_ARCH_FAC_LIST_SIZE_BYTE); kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); kvm->arch.model.ibc = sclp.ibc & 0x0fff; - if (kvm_s390_crypto_init(kvm) < 0) - goto out_err; + kvm_s390_crypto_init(kvm); spin_lock_init(&kvm->arch.float_int.lock); for (i = 0; i < FIRQ_LIST_COUNT; i++) @@ -1225,8 +1213,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) return 0; out_err: - kfree(kvm->arch.crypto.crycb); - free_page((unsigned long)kvm->arch.model.fac); + free_page((unsigned long)kvm->arch.sie_page2); debug_unregister(kvm->arch.dbf); sca_dispose(kvm); KVM_EVENT(3, "creation of vm failed: %d", rc); @@ -1272,10 +1259,9 @@ static void kvm_free_vcpus(struct kvm *kvm) void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_free_vcpus(kvm); - free_page((unsigned long)kvm->arch.model.fac); sca_dispose(kvm); debug_unregister(kvm->arch.dbf); - kfree(kvm->arch.crypto.crycb); + free_page((unsigned long)kvm->arch.sie_page2); if (!kvm_is_ucontrol(kvm)) gmap_free(kvm->arch.gmap); kvm_s390_destroy_adapters(kvm); @@ -1640,7 +1626,7 @@ static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) vcpu->arch.cpu_id = model->cpu_id; vcpu->arch.sie_block->ibc = model->ibc; if (test_kvm_facility(vcpu->kvm, 7)) - vcpu->arch.sie_block->fac = (int) (long) model->fac->list; + vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list; } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index b1f7ee3bd72d..8621ab00ec8e 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -160,8 +160,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) /* test availability of facility in a kvm instance */ static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) { - return __test_facility(nr, kvm->arch.model.fac->mask) && - __test_facility(nr, kvm->arch.model.fac->list); + return __test_facility(nr, kvm->arch.model.fac_mask) && + __test_facility(nr, kvm->arch.model.fac_list); } static inline int set_kvm_facility(u64 *fac_list, unsigned long nr) diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index add990945986..f218ccf016c8 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -354,7 +354,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu) * We need to shift the lower 32 facility bits (bit 0-31) from a u64 * into a u32 memory representation. They will remain bits 0-31. */ - fac = *vcpu->kvm->arch.model.fac->list >> 32; + fac = *vcpu->kvm->arch.model.fac_list >> 32; rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list), &fac, sizeof(fac)); if (rc) -- cgit v1.2.3-58-ga151 From 1b8e83c04ee2c05c0cd0d304c4b389adf24ebe7f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 17 Feb 2016 10:25:05 +0000 Subject: arm64: KVM: vgic-v3: Avoid accessing ICH registers Just like on GICv2, we're a bit hammer-happy with GICv3, and access them more often than we should. Adopt a policy similar to what we do for GICv2, only save/restoring the minimal set of registers. As we don't access the registers linearly anymore (we may skip some), the convoluted accessors become slightly simpler, and we can drop the ugly indexing macro that tended to confuse the reviewers. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 293 ++++++++++++++++++++++++---------------- include/kvm/arm_vgic.h | 6 - virt/kvm/arm/vgic-v3.c | 4 +- 3 files changed, 182 insertions(+), 121 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 0035b2d3fb6d..e596945a88f7 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -37,12 +37,104 @@ asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\ } while (0) -/* vcpu is already in the HYP VA space */ +static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) +{ + switch (lr & 0xf) { + case 0: + return read_gicreg(ICH_LR0_EL2); + case 1: + return read_gicreg(ICH_LR1_EL2); + case 2: + return read_gicreg(ICH_LR2_EL2); + case 3: + return read_gicreg(ICH_LR3_EL2); + case 4: + return read_gicreg(ICH_LR4_EL2); + case 5: + return read_gicreg(ICH_LR5_EL2); + case 6: + return read_gicreg(ICH_LR6_EL2); + case 7: + return read_gicreg(ICH_LR7_EL2); + case 8: + return read_gicreg(ICH_LR8_EL2); + case 9: + return read_gicreg(ICH_LR9_EL2); + case 10: + return read_gicreg(ICH_LR10_EL2); + case 11: + return read_gicreg(ICH_LR11_EL2); + case 12: + return read_gicreg(ICH_LR12_EL2); + case 13: + return read_gicreg(ICH_LR13_EL2); + case 14: + return read_gicreg(ICH_LR14_EL2); + case 15: + return read_gicreg(ICH_LR15_EL2); + } + + unreachable(); +} + +static void __hyp_text __gic_v3_set_lr(u64 val, int lr) +{ + switch (lr & 0xf) { + case 0: + write_gicreg(val, ICH_LR0_EL2); + break; + case 1: + write_gicreg(val, ICH_LR1_EL2); + break; + case 2: + write_gicreg(val, ICH_LR2_EL2); + break; + case 3: + write_gicreg(val, ICH_LR3_EL2); + break; + case 4: + write_gicreg(val, ICH_LR4_EL2); + break; + case 5: + write_gicreg(val, ICH_LR5_EL2); + break; + case 6: + write_gicreg(val, ICH_LR6_EL2); + break; + case 7: + write_gicreg(val, ICH_LR7_EL2); + break; + case 8: + write_gicreg(val, ICH_LR8_EL2); + break; + case 9: + write_gicreg(val, ICH_LR9_EL2); + break; + case 10: + write_gicreg(val, ICH_LR10_EL2); + break; + case 11: + write_gicreg(val, ICH_LR11_EL2); + break; + case 12: + write_gicreg(val, ICH_LR12_EL2); + break; + case 13: + write_gicreg(val, ICH_LR13_EL2); + break; + case 14: + write_gicreg(val, ICH_LR14_EL2); + break; + case 15: + write_gicreg(val, ICH_LR15_EL2); + break; + } +} + void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; u64 val; - u32 max_lr_idx, nr_pri_bits; /* * Make sure stores to the GIC via the memory mapped interface @@ -51,68 +143,58 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) dsb(st); cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); - cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); - cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); - cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); - write_gicreg(0, ICH_HCR_EL2); - val = read_gicreg(ICH_VTR_EL2); - max_lr_idx = vtr_to_max_lr_idx(val); - nr_pri_bits = vtr_to_nr_pri_bits(val); + if (vcpu->arch.vgic_cpu.live_lrs) { + int i; + u32 max_lr_idx, nr_pri_bits; - switch (max_lr_idx) { - case 15: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)] = read_gicreg(ICH_LR15_EL2); - case 14: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)] = read_gicreg(ICH_LR14_EL2); - case 13: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)] = read_gicreg(ICH_LR13_EL2); - case 12: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)] = read_gicreg(ICH_LR12_EL2); - case 11: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)] = read_gicreg(ICH_LR11_EL2); - case 10: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)] = read_gicreg(ICH_LR10_EL2); - case 9: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)] = read_gicreg(ICH_LR9_EL2); - case 8: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)] = read_gicreg(ICH_LR8_EL2); - case 7: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)] = read_gicreg(ICH_LR7_EL2); - case 6: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)] = read_gicreg(ICH_LR6_EL2); - case 5: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)] = read_gicreg(ICH_LR5_EL2); - case 4: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)] = read_gicreg(ICH_LR4_EL2); - case 3: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)] = read_gicreg(ICH_LR3_EL2); - case 2: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)] = read_gicreg(ICH_LR2_EL2); - case 1: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)] = read_gicreg(ICH_LR1_EL2); - case 0: - cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)] = read_gicreg(ICH_LR0_EL2); - } + cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); + cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); + cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); - switch (nr_pri_bits) { - case 7: - cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); - cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); - case 6: - cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2); - default: - cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); - } + write_gicreg(0, ICH_HCR_EL2); + val = read_gicreg(ICH_VTR_EL2); + max_lr_idx = vtr_to_max_lr_idx(val); + nr_pri_bits = vtr_to_nr_pri_bits(val); - switch (nr_pri_bits) { - case 7: - cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); - cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); - case 6: - cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2); - default: - cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2); + for (i = 0; i <= max_lr_idx; i++) { + if (vcpu->arch.vgic_cpu.live_lrs & (1UL << i)) + cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); + } + + switch (nr_pri_bits) { + case 7: + cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); + cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); + case 6: + cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2); + default: + cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); + } + + switch (nr_pri_bits) { + case 7: + cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); + cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); + case 6: + cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2); + default: + cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2); + } + + vcpu->arch.vgic_cpu.live_lrs = 0; + } else { + cpu_if->vgic_misr = 0; + cpu_if->vgic_eisr = 0; + cpu_if->vgic_elrsr = 0xffff; + cpu_if->vgic_ap0r[0] = 0; + cpu_if->vgic_ap0r[1] = 0; + cpu_if->vgic_ap0r[2] = 0; + cpu_if->vgic_ap0r[3] = 0; + cpu_if->vgic_ap1r[0] = 0; + cpu_if->vgic_ap1r[1] = 0; + cpu_if->vgic_ap1r[2] = 0; + cpu_if->vgic_ap1r[3] = 0; } val = read_gicreg(ICC_SRE_EL2); @@ -126,6 +208,8 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; u64 val; u32 max_lr_idx, nr_pri_bits; + u16 live_lrs = 0; + int i; /* * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a @@ -138,66 +222,48 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); isb(); - write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); - write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); - val = read_gicreg(ICH_VTR_EL2); max_lr_idx = vtr_to_max_lr_idx(val); nr_pri_bits = vtr_to_nr_pri_bits(val); - switch (nr_pri_bits) { - case 7: - write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); - write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); - case 6: - write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2); - default: - write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); + for (i = 0; i <= max_lr_idx; i++) { + if (cpu_if->vgic_lr[i] & ICH_LR_STATE) + live_lrs |= (1 << i); } - switch (nr_pri_bits) { - case 7: - write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); - write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); - case 6: - write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2); - default: - write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2); - } + write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); - switch (max_lr_idx) { - case 15: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2); - case 14: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)], ICH_LR14_EL2); - case 13: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)], ICH_LR13_EL2); - case 12: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)], ICH_LR12_EL2); - case 11: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)], ICH_LR11_EL2); - case 10: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)], ICH_LR10_EL2); - case 9: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)], ICH_LR9_EL2); - case 8: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)], ICH_LR8_EL2); - case 7: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)], ICH_LR7_EL2); - case 6: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)], ICH_LR6_EL2); - case 5: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)], ICH_LR5_EL2); - case 4: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)], ICH_LR4_EL2); - case 3: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)], ICH_LR3_EL2); - case 2: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)], ICH_LR2_EL2); - case 1: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)], ICH_LR1_EL2); - case 0: - write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)], ICH_LR0_EL2); + if (live_lrs) { + write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); + + switch (nr_pri_bits) { + case 7: + write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); + write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); + case 6: + write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2); + default: + write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); + } + + switch (nr_pri_bits) { + case 7: + write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); + write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); + case 6: + write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2); + default: + write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2); + } + + for (i = 0; i <= max_lr_idx; i++) { + val = 0; + + if (live_lrs & (1 << i)) + val = cpu_if->vgic_lr[i]; + + __gic_v3_set_lr(val, i); + } } /* @@ -207,6 +273,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) */ isb(); dsb(sy); + vcpu->arch.vgic_cpu.live_lrs = live_lrs; /* * Prevent the guest from touching the GIC system registers if diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index f473fd65fab5..281caf847fad 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -279,12 +279,6 @@ struct vgic_v2_cpu_if { u32 vgic_lr[VGIC_V2_MAX_LRS]; }; -/* - * LRs are stored in reverse order in memory. make sure we index them - * correctly. - */ -#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) - struct vgic_v3_cpu_if { #ifdef CONFIG_KVM_ARM_VGIC_V3 u32 vgic_hcr; diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 453eafd4dd6e..11b5ff6ce81c 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c @@ -42,7 +42,7 @@ static u32 ich_vtr_el2; static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) { struct vgic_lr lr_desc; - u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)]; + u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr]; if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) lr_desc.irq = val & ICH_LR_VIRTUALID_MASK; @@ -106,7 +106,7 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr, lr_val |= ((u64)lr_desc.hwirq) << ICH_LR_PHYS_ID_SHIFT; } - vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)] = lr_val; + vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = lr_val; if (!(lr_desc.state & LR_STATE_MASK)) vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); -- cgit v1.2.3-58-ga151 From b4344545cf85d2a6ad546ec21dab5f76487e020e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Feb 2016 18:53:04 +0000 Subject: arm64: KVM: vgic-v3: Save maintenance interrupt state only if required Next on our list of useless accesses is the maintenance interrupt status registers (ICH_MISR_EL2, ICH_EISR_EL2). It is pointless to save them if we haven't asked for a maintenance interrupt the first place, which can only happen for two reasons: - Underflow: ICH_HCR_UIE will be set, - EOI: ICH_LR_EOI will be set. These conditions can be checked on the in-memory copies of the regs. Should any of these two condition be valid, we must read GICH_MISR. We can then check for ICH_MISR_EOI, and only when set read ICH_EISR_EL2. This means that in most case, we don't have to save them at all. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index e596945a88f7..61a5e46b4335 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -131,6 +131,35 @@ static void __hyp_text __gic_v3_set_lr(u64 val, int lr) } } +static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr) +{ + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + int i; + bool expect_mi; + + expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE); + + for (i = 0; i < nr_lr; i++) { + if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) + continue; + + expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) && + (cpu_if->vgic_lr[i] & ICH_LR_EOI)); + } + + if (expect_mi) { + cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); + + if (cpu_if->vgic_misr & ICH_MISR_EOI) + cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); + else + cpu_if->vgic_eisr = 0; + } else { + cpu_if->vgic_misr = 0; + cpu_if->vgic_eisr = 0; + } +} + void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; @@ -148,8 +177,6 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) int i; u32 max_lr_idx, nr_pri_bits; - cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); - cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); write_gicreg(0, ICH_HCR_EL2); @@ -157,6 +184,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) max_lr_idx = vtr_to_max_lr_idx(val); nr_pri_bits = vtr_to_nr_pri_bits(val); + save_maint_int_state(vcpu, max_lr_idx + 1); + for (i = 0; i <= max_lr_idx; i++) { if (vcpu->arch.vgic_cpu.live_lrs & (1UL << i)) cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); -- cgit v1.2.3-58-ga151 From 84e8b9c88d5fe9c9a59ed24ae44d7ac0983df92b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Feb 2016 17:09:49 +0000 Subject: arm64: KVM: vgic-v3: Do not save an LR known to be empty On exit, any empty LR will be signaled in ICH_ELRSR_EL2. Which means that we do not have to save it, and we can just clear its state in the in-memory copy. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 61a5e46b4335..0db426e6c13e 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -187,8 +187,15 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) save_maint_int_state(vcpu, max_lr_idx + 1); for (i = 0; i <= max_lr_idx; i++) { - if (vcpu->arch.vgic_cpu.live_lrs & (1UL << i)) - cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); + if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) + continue; + + if (cpu_if->vgic_elrsr & (1 << i)) { + cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; + continue; + } + + cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); } switch (nr_pri_bits) { -- cgit v1.2.3-58-ga151 From 0d98d00b8d80bfdee95cf7e85f20f107377e2662 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 3 Mar 2016 15:43:58 +0000 Subject: arm64: KVM: vgic-v3: Reset LRs at boot time In order to let the GICv3 code be more lazy in the way it accesses the LRs, it is necessary to start with a clean slate. Let's reset the LRs on each CPU when the vgic is probed (which includes a round trip to EL2...). Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/kvm/hyp/vgic-v3-sr.c | 9 +++++++++ virt/kvm/arm/vgic-v3.c | 7 +++++++ 3 files changed, 17 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 1037392ae134..2d02ba67478c 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -42,6 +42,7 @@ extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern u64 __vgic_v3_get_ich_vtr_el2(void); +extern void __vgic_v3_init_lrs(void); extern u32 __kvm_get_mdcr_el2(void); diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 0db426e6c13e..81349479e17c 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -321,6 +321,15 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) } } +void __hyp_text __vgic_v3_init_lrs(void) +{ + int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2)); + int i; + + for (i = 0; i <= max_lr_idx; i++) + __gic_v3_set_lr(0, i); +} + static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void) { return read_gicreg(ICH_VTR_EL2); diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 11b5ff6ce81c..999bdc6d9d9f 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c @@ -216,6 +216,11 @@ static const struct vgic_ops vgic_v3_ops = { static struct vgic_params vgic_v3_params; +static void vgic_cpu_init_lrs(void *params) +{ + kvm_call_hyp(__vgic_v3_init_lrs); +} + /** * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT * @node: pointer to the DT node @@ -284,6 +289,8 @@ int vgic_v3_probe(struct device_node *vgic_node, kvm_info("%s@%llx IRQ%d\n", vgic_node->name, vcpu_res.start, vgic->maint_irq); + on_each_cpu(vgic_cpu_init_lrs, vgic, 1); + *ops = &vgic_v3_ops; *params = vgic; -- cgit v1.2.3-58-ga151 From b40c4892d175874d118860c8282a85ee7b64bcbb Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Feb 2016 17:36:09 +0000 Subject: arm64: KVM: vgic-v3: Only wipe LRs on vcpu exit So far, we're always writing all possible LRs, setting the empty ones with a zero value. This is obvious doing a low of work for nothing, and we're better off clearing those we've actually dirtied on the exit path (it is very rare to inject more than one interrupt at a time anyway). Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 81349479e17c..fff7cd42b3a3 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -196,6 +196,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) } cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); + __gic_v3_set_lr(0, i); } switch (nr_pri_bits) { @@ -293,12 +294,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) } for (i = 0; i <= max_lr_idx; i++) { - val = 0; - - if (live_lrs & (1 << i)) - val = cpu_if->vgic_lr[i]; + if (!(live_lrs & (1 << i))) + continue; - __gic_v3_set_lr(val, i); + __gic_v3_set_lr(cpu_if->vgic_lr[i], i); } } -- cgit v1.2.3-58-ga151 From a87036add09283e6c4f4103a15c596c67b86ab86 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 8 Mar 2016 09:52:13 +0100 Subject: KVM: x86: disable MPX if host did not enable MPX XSAVE features When eager FPU is disabled, KVM will still see the MPX bit in CPUID and presumably the MPX vmentry and vmexit controls. However, it will not be able to expose the MPX XSAVE features to the guest, because the guest's accessible XSAVE features are always a subset of host_xcr0. In this case, we should disable the MPX CPUID bit, the BNDCFGS MSR, and the MPX vmentry and vmexit controls for nested virtualization. It is then unnecessary to enable guest eager FPU if the guest has the MPX CPUID bit set. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 13 ++++++++++--- arch/x86/kvm/cpuid.h | 9 +-------- arch/x86/kvm/vmx.c | 13 ++++++------- 3 files changed, 17 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 6525e926f566..fa241d4fda98 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -46,11 +46,18 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted) return ret; } +bool kvm_mpx_supported(void) +{ + return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) + && kvm_x86_ops->mpx_supported()); +} +EXPORT_SYMBOL_GPL(kvm_mpx_supported); + u64 kvm_supported_xcr0(void) { u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0; - if (!kvm_x86_ops->mpx_supported()) + if (!kvm_mpx_supported()) xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); return xcr0; @@ -97,7 +104,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) best->ebx = xstate_required_size(vcpu->arch.xcr0, true); - vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu); + vcpu->arch.eager_fpu = use_eager_fpu(); if (vcpu->arch.eager_fpu) kvm_x86_ops->fpu_activate(vcpu); @@ -295,7 +302,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, #endif unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; - unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0; + unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0; unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; /* cpuid 1.edx */ diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index c8eda1498121..66a6581724ad 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -5,6 +5,7 @@ #include int kvm_update_cpuid(struct kvm_vcpu *vcpu); +bool kvm_mpx_supported(void); struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function, u32 index); int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, @@ -135,14 +136,6 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) return best && (best->ebx & bit(X86_FEATURE_RTM)); } -static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 7, 0); - return best && (best->ebx & bit(X86_FEATURE_MPX)); -} - static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 46154dac71e6..e512aa7ed874 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -861,7 +861,6 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); static u64 construct_eptp(unsigned long root_hpa); static void kvm_cpu_vmxon(u64 addr); static void kvm_cpu_vmxoff(void); -static bool vmx_mpx_supported(void); static bool vmx_xsaves_supported(void); static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); static void vmx_set_segment(struct kvm_vcpu *vcpu, @@ -2595,7 +2594,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; - if (vmx_mpx_supported()) + if (kvm_mpx_supported()) vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; /* We support free control of debug control saving. */ @@ -2616,7 +2615,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) VM_ENTRY_LOAD_IA32_PAT; vmx->nested.nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); - if (vmx_mpx_supported()) + if (kvm_mpx_supported()) vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; /* We support free control of debug control loading. */ @@ -2860,7 +2859,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); break; case MSR_IA32_BNDCFGS: - if (!vmx_mpx_supported()) + if (!kvm_mpx_supported()) return 1; msr_info->data = vmcs_read64(GUEST_BNDCFGS); break; @@ -2937,7 +2936,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_BNDCFGS: - if (!vmx_mpx_supported()) + if (!kvm_mpx_supported()) return 1; vmcs_write64(GUEST_BNDCFGS, data); break; @@ -3410,7 +3409,7 @@ static void init_vmcs_shadow_fields(void) for (i = j = 0; i < max_shadow_read_write_fields; i++) { switch (shadow_read_write_fields[i]) { case GUEST_BNDCFGS: - if (!vmx_mpx_supported()) + if (!kvm_mpx_supported()) continue; break; default: @@ -10265,7 +10264,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); - if (vmx_mpx_supported()) + if (kvm_mpx_supported()) vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); if (nested_cpu_has_xsaves(vmcs12)) vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP); -- cgit v1.2.3-58-ga151 From 5a5fbdc0e3f1159a734f1890da60fce70e98271d Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 8 Mar 2016 10:00:11 +0100 Subject: KVM: x86: remove eager_fpu field of struct kvm_vcpu_arch It is now equal to use_eager_fpu(), which simply tests a cpufeature bit. Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/cpuid.c | 3 +-- arch/x86/kvm/x86.c | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d110dc44d6c2..01c8b501cb6d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -503,7 +503,6 @@ struct kvm_vcpu_arch { struct kvm_mmu_memory_cache mmu_page_header_cache; struct fpu guest_fpu; - bool eager_fpu; u64 xcr0; u64 guest_supported_xcr0; u32 guest_xstate_size; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index fa241d4fda98..0029644bf09c 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -104,8 +104,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) best->ebx = xstate_required_size(vcpu->arch.xcr0, true); - vcpu->arch.eager_fpu = use_eager_fpu(); - if (vcpu->arch.eager_fpu) + if (use_eager_fpu()) kvm_x86_ops->fpu_activate(vcpu); /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 60d6c0036a98..bcbce0fa0bc2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7329,7 +7329,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) * Every 255 times fpu_counter rolls over to 0; a guest that uses * the FPU in bursts will revert to loading it on demand. */ - if (!vcpu->arch.eager_fpu) { + if (!use_eager_fpu()) { if (++vcpu->fpu_counter < 5) kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); } -- cgit v1.2.3-58-ga151