From 437f9963bc4fd75889c1fe9289a92dea9124a439 Mon Sep 17 00:00:00 2001
From: Pavel Fedin
Date: Fri, 25 Sep 2015 17:00:29 +0300
Subject: KVM: arm/arm64: Do not inject spurious interrupts
When lowering a level-triggered line from userspace, we forgot to lower
the pending bit on the emulated CPU interface and we also did not
re-compute the pending_on_cpu bitmap for the CPU affected by the change.
Update vgic_update_irq_pending() to fix the two issues above and also
raise a warning in vgic_quue_irq_to_lr if we encounter an interrupt
pending on a CPU which is neither marked active nor pending.
[ Commit text reworked completely - Christoffer ]
Signed-off-by: Pavel Fedin
Signed-off-by: Christoffer Dall
---
virt/kvm/arm/vgic.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
(limited to 'virt/kvm')
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 6bd1c9bf7ae7..596455a394af 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1132,7 +1132,8 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
vgic_irq_clear_active(vcpu, irq);
vgic_update_state(vcpu->kvm);
- } else if (vgic_dist_irq_is_pending(vcpu, irq)) {
+ } else {
+ WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq));
vlr.state |= LR_STATE_PENDING;
kvm_debug("Set pending: 0x%x\n", vlr.state);
}
@@ -1607,8 +1608,12 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
} else {
if (level_triggered) {
vgic_dist_irq_clear_level(vcpu, irq_num);
- if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
+ if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) {
vgic_dist_irq_clear_pending(vcpu, irq_num);
+ vgic_cpu_irq_clear(vcpu, irq_num);
+ if (!compute_pending_for_cpu(vcpu))
+ clear_bit(cpuid, dist->irq_pending_on_cpu);
+ }
}
ret = false;
--
cgit v1.2.3-58-ga151
From cff9211eb1a1f58ce7f5a2d596b617928fd4be0e Mon Sep 17 00:00:00 2001
From: Christoffer Dall
Date: Fri, 16 Oct 2015 12:41:21 +0200
Subject: arm/arm64: KVM: Fix arch timer behavior for disabled interrupts
We have an interesting issue when the guest disables the timer interrupt
on the VGIC, which happens when turning VCPUs off using PSCI, for
example.
The problem is that because the guest disables the virtual interrupt at
the VGIC level, we never inject interrupts to the guest and therefore
never mark the interrupt as active on the physical distributor. The
host also never takes the timer interrupt (we only use the timer device
to trigger a guest exit and everything else is done in software), so the
interrupt does not become active through normal means.
The result is that we keep entering the guest with a programmed timer
that will always fire as soon as we context switch the hardware timer
state and run the guest, preventing forward progress for the VCPU.
Since the active state on the physical distributor is really part of the
timer logic, it is the job of our virtual arch timer driver to manage
this state.
The timer->map->active boolean field indicates whether we have signalled
this interrupt to the vgic and if that interrupt is still pending or
active. As long as that is the case, the hardware doesn't have to
generate physical interrupts and therefore we mark the interrupt as
active on the physical distributor.
We also have to restore the pending state of an interrupt that was
queued to an LR but was retired from the LR for some reason, while
remaining pending in the LR.
Cc: Marc Zyngier
Reported-by: Lorenzo Pieralisi
Signed-off-by: Christoffer Dall
---
virt/kvm/arm/arch_timer.c | 19 +++++++++++++++++++
virt/kvm/arm/vgic.c | 43 +++++++++++--------------------------------
2 files changed, 30 insertions(+), 32 deletions(-)
(limited to 'virt/kvm')
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 48c6e1ac6827..b9d3a32cbc04 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -137,6 +137,8 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ bool phys_active;
+ int ret;
/*
* We're about to run this vcpu again, so there is no need to
@@ -151,6 +153,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
*/
if (kvm_timer_should_fire(vcpu))
kvm_timer_inject_irq(vcpu);
+
+ /*
+ * We keep track of whether the edge-triggered interrupt has been
+ * signalled to the vgic/guest, and if so, we mask the interrupt and
+ * the physical distributor to prevent the timer from raising a
+ * physical interrupt whenever we run a guest, preventing forward
+ * VCPU progress.
+ */
+ if (kvm_vgic_get_phys_irq_active(timer->map))
+ phys_active = true;
+ else
+ phys_active = false;
+
+ ret = irq_set_irqchip_state(timer->map->irq,
+ IRQCHIP_STATE_ACTIVE,
+ phys_active);
+ WARN_ON(ret);
}
/**
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 596455a394af..ea21bc273542 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1092,6 +1092,15 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
+ /*
+ * We must transfer the pending state back to the distributor before
+ * retiring the LR, otherwise we may loose edge-triggered interrupts.
+ */
+ if (vlr.state & LR_STATE_PENDING) {
+ vgic_dist_irq_set_pending(vcpu, irq);
+ vlr.hwirq = 0;
+ }
+
vlr.state = 0;
vgic_set_lr(vcpu, lr_nr, vlr);
clear_bit(lr_nr, vgic_cpu->lr_used);
@@ -1241,7 +1250,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
unsigned long *pa_percpu, *pa_shared;
- int i, vcpu_id, lr, ret;
+ int i, vcpu_id;
int overflow = 0;
int nr_shared = vgic_nr_shared_irqs(dist);
@@ -1296,31 +1305,6 @@ epilog:
*/
clear_bit(vcpu_id, dist->irq_pending_on_cpu);
}
-
- for (lr = 0; lr < vgic->nr_lr; lr++) {
- struct vgic_lr vlr;
-
- if (!test_bit(lr, vgic_cpu->lr_used))
- continue;
-
- vlr = vgic_get_lr(vcpu, lr);
-
- /*
- * If we have a mapping, and the virtual interrupt is
- * presented to the guest (as pending or active), then we must
- * set the state to active in the physical world. See
- * Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt.
- */
- if (vlr.state & LR_HW) {
- struct irq_phys_map *map;
- map = vgic_irq_map_search(vcpu, vlr.irq);
-
- ret = irq_set_irqchip_state(map->irq,
- IRQCHIP_STATE_ACTIVE,
- true);
- WARN_ON(ret);
- }
- }
}
static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
@@ -1430,13 +1414,8 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
WARN_ON(ret);
- if (map->active) {
- ret = irq_set_irqchip_state(map->irq,
- IRQCHIP_STATE_ACTIVE,
- false);
- WARN_ON(ret);
+ if (map->active)
return 0;
- }
return 1;
}
--
cgit v1.2.3-58-ga151
From 544c572e03174438b6656ed24a4516b9a9d5f14a Mon Sep 17 00:00:00 2001
From: Christoffer Dall
Date: Sat, 17 Oct 2015 17:55:12 +0200
Subject: arm/arm64: KVM: Clear map->active on pend/active clear
When a guest reboots or offlines/onlines CPUs, it is not uncommon for it
to clear the pending and active states of an interrupt through the
emulated VGIC distributor. However, since the architected timers are
defined by the architecture to be level triggered and the guest
rightfully expects them to be that, but we emulate them as
edge-triggered, we have to mimic level-triggered behavior for an
edge-triggered virtual implementation.
We currently do not signal the VGIC when the map->active field is true,
because it indicates that the guest has already been signalled of the
interrupt as required. Normally this field is set to false when the
guest deactivates the virtual interrupt through the sync path.
We also need to catch the case where the guest deactivates the interrupt
through the emulated distributor, again allowing guests to boot even if
the original virtual timer signal hit before the guest's GIC
initialization sequence is run.
Reviewed-by: Eric Auger
Signed-off-by: Christoffer Dall
---
virt/kvm/arm/vgic.c | 32 +++++++++++++++++++++++++++++++-
1 file changed, 31 insertions(+), 1 deletion(-)
(limited to 'virt/kvm')
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index ea21bc273542..58b125676785 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -531,6 +531,34 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm,
return false;
}
+/*
+ * If a mapped interrupt's state has been modified by the guest such that it
+ * is no longer active or pending, without it have gone through the sync path,
+ * then the map->active field must be cleared so the interrupt can be taken
+ * again.
+ */
+static void vgic_handle_clear_mapped_irq(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct list_head *root;
+ struct irq_phys_map_entry *entry;
+ struct irq_phys_map *map;
+
+ rcu_read_lock();
+
+ /* Check for PPIs */
+ root = &vgic_cpu->irq_phys_map_list;
+ list_for_each_entry_rcu(entry, root, entry) {
+ map = &entry->map;
+
+ if (!vgic_dist_irq_is_pending(vcpu, map->virt_irq) &&
+ !vgic_irq_is_active(vcpu, map->virt_irq))
+ map->active = false;
+ }
+
+ rcu_read_unlock();
+}
+
bool vgic_handle_clear_pending_reg(struct kvm *kvm,
struct kvm_exit_mmio *mmio,
phys_addr_t offset, int vcpu_id)
@@ -561,6 +589,7 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm,
vcpu_id, offset);
vgic_reg_access(mmio, reg, offset, mode);
+ vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
vgic_update_state(kvm);
return true;
}
@@ -598,6 +627,7 @@ bool vgic_handle_clear_active_reg(struct kvm *kvm,
ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
if (mmio->is_write) {
+ vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
vgic_update_state(kvm);
return true;
}
@@ -1406,7 +1436,7 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
return 0;
map = vgic_irq_map_search(vcpu, vlr.irq);
- BUG_ON(!map || !map->active);
+ BUG_ON(!map);
ret = irq_get_irqchip_state(map->irq,
IRQCHIP_STATE_ACTIVE,
--
cgit v1.2.3-58-ga151
From 0d997491f814c87310a6ad7be30a9049c7150489 Mon Sep 17 00:00:00 2001
From: Christoffer Dall
Date: Sat, 17 Oct 2015 19:05:27 +0200
Subject: arm/arm64: KVM: Fix disabled distributor operation
We currently do a single update of the vgic state when the distributor
enable/disable control register is accessed and then bypass updating the
state for as long as the distributor remains disabled.
This is incorrect, because updating the state does not consider the
distributor enable bit, and this you can end up in a situation where an
interrupt is marked as pending on the CPU interface, but not pending on
the distributor, which is an impossible state to be in, and triggers a
warning. Consider for example the following sequence of events:
1. An interrupt is marked as pending on the distributor
- the interrupt is also forwarded to the CPU interface
2. The guest turns off the distributor (it's about to do a reboot)
- we stop updating the CPU interface state from now on
3. The guest disables the pending interrupt
- we remove the pending state from the distributor, but don't touch
the CPU interface, see point 2.
Since the distributor disable bit really means that no interrupts should
be forwarded to the CPU interface, we modify the code to keep updating
the internal VGIC state, but always set the CPU interface pending bits
to zero when the distributor is disabled.
Signed-off-by: Christoffer Dall
---
virt/kvm/arm/vgic.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
(limited to 'virt/kvm')
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 58b125676785..66c66165e712 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1012,6 +1012,12 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
pend_shared = vcpu->arch.vgic_cpu.pending_shared;
+ if (!dist->enabled) {
+ bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS);
+ bitmap_zero(pend_shared, nr_shared);
+ return 0;
+ }
+
pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
@@ -1039,11 +1045,6 @@ void vgic_update_state(struct kvm *kvm)
struct kvm_vcpu *vcpu;
int c;
- if (!dist->enabled) {
- set_bit(0, dist->irq_pending_on_cpu);
- return;
- }
-
kvm_for_each_vcpu(c, vcpu, kvm) {
if (compute_pending_for_cpu(vcpu))
set_bit(c, dist->irq_pending_on_cpu);
--
cgit v1.2.3-58-ga151
From 3217f7c25bca66eed9b07f0b8bfd1937169b0736 Mon Sep 17 00:00:00 2001
From: Christoffer Dall
Date: Thu, 27 Aug 2015 16:41:15 +0200
Subject: KVM: Add kvm_arch_vcpu_{un}blocking callbacks
Some times it is useful for architecture implementations of KVM to know
when the VCPU thread is about to block or when it comes back from
blocking (arm/arm64 needs to know this to properly implement timers, for
example).
Therefore provide a generic architecture callback function in line with
what we do elsewhere for KVM generic-arch interactions.
Reviewed-by: Marc Zyngier
Signed-off-by: Christoffer Dall
---
arch/arm/include/asm/kvm_host.h | 3 +++
arch/arm64/include/asm/kvm_host.h | 3 +++
arch/mips/include/asm/kvm_host.h | 2 ++
arch/powerpc/include/asm/kvm_host.h | 2 ++
arch/s390/include/asm/kvm_host.h | 2 ++
arch/x86/include/asm/kvm_host.h | 3 +++
include/linux/kvm_host.h | 2 ++
virt/kvm/kvm_main.c | 3 +++
8 files changed, 20 insertions(+)
(limited to 'virt/kvm')
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index c4072d9f32c7..84da97901f1f 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -234,4 +234,7 @@ static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index ed039688c221..e4f4d65f7d2b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -255,4 +255,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 5a1a882e0a75..6ded8d347af9 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -847,5 +847,7 @@ static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 827a38d7a9db..c9f122d00920 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -718,5 +718,7 @@ static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslot
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_exit(void) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 8ced426091e1..72a614c68ed8 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -644,5 +644,7 @@ static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslot
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
#endif
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2beee0382088..b28f0f142ecb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1233,4 +1233,7 @@ int x86_set_memory_region(struct kvm *kvm,
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1bef9e21e725..4a86f5f072c0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -625,6 +625,8 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8db1d9361993..7873d6daccb1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2018,6 +2018,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
} while (single_task_running() && ktime_before(cur, stop));
}
+ kvm_arch_vcpu_blocking(vcpu);
+
for (;;) {
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
@@ -2031,6 +2033,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
finish_wait(&vcpu->wq, &wait);
cur = ktime_get();
+ kvm_arch_vcpu_unblocking(vcpu);
out:
block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
--
cgit v1.2.3-58-ga151
From d35268da66870d733ae763fd7f9b06a1f63f395e Mon Sep 17 00:00:00 2001
From: Christoffer Dall
Date: Tue, 25 Aug 2015 19:48:21 +0200
Subject: arm/arm64: KVM: arch_timer: Only schedule soft timer on vcpu_block
We currently schedule a soft timer every time we exit the guest if the
timer did not expire while running the guest. This is really not
necessary, because the only work we do in the timer work function is to
kick the vcpu.
Kicking the vcpu does two things:
(1) If the vpcu thread is on a waitqueue, make it runnable and remove it
from the waitqueue.
(2) If the vcpu is running on a different physical CPU from the one
doing the kick, it sends a reschedule IPI.
The second case cannot happen, because the soft timer is only ever
scheduled when the vcpu is not running. The first case is only relevant
when the vcpu thread is on a waitqueue, which is only the case when the
vcpu thread has called kvm_vcpu_block().
Therefore, we only need to make sure a timer is scheduled for
kvm_vcpu_block(), which we do by encapsulating all calls to
kvm_vcpu_block() with kvm_timer_{un}schedule calls.
Additionally, we only schedule a soft timer if the timer is enabled and
unmasked, since it is useless otherwise.
Note that theoretically userspace can use the SET_ONE_REG interface to
change registers that should cause the timer to fire, even if the vcpu
is blocked without a scheduled timer, but this case was not supported
before this patch and we leave it for future work for now.
Signed-off-by: Christoffer Dall
---
arch/arm/include/asm/kvm_host.h | 3 --
arch/arm/kvm/arm.c | 10 +++++
arch/arm64/include/asm/kvm_host.h | 3 --
include/kvm/arm_arch_timer.h | 2 +
virt/kvm/arm/arch_timer.c | 94 +++++++++++++++++++++++++--------------
5 files changed, 72 insertions(+), 40 deletions(-)
(limited to 'virt/kvm')
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 84da97901f1f..c4072d9f32c7 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -234,7 +234,4 @@ static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
-
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 78b286994577..7ed4d475d83a 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -271,6 +271,16 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return kvm_timer_should_fire(vcpu);
}
+void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
+{
+ kvm_timer_schedule(vcpu);
+}
+
+void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
+{
+ kvm_timer_unschedule(vcpu);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
/* Force users to call KVM_ARM_VCPU_INIT */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e4f4d65f7d2b..ed039688c221 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -255,7 +255,4 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
-static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
-
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index e1e4d7c38dda..ef14cc1f1f26 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -71,5 +71,7 @@ u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
bool kvm_timer_should_fire(struct kvm_vcpu *vcpu);
+void kvm_timer_schedule(struct kvm_vcpu *vcpu);
+void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
#endif
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index b9d3a32cbc04..32095fbb5d7c 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -111,14 +111,21 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
return HRTIMER_NORESTART;
}
+static bool kvm_timer_irq_can_fire(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ return !(timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
+ (timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE) &&
+ !kvm_vgic_get_phys_irq_active(timer->map);
+}
+
bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
cycle_t cval, now;
- if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
- !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE) ||
- kvm_vgic_get_phys_irq_active(timer->map))
+ if (!kvm_timer_irq_can_fire(vcpu))
return false;
cval = timer->cntv_cval;
@@ -127,12 +134,57 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
return cval <= now;
}
+/*
+ * Schedule the background timer before calling kvm_vcpu_block, so that this
+ * thread is removed from its waitqueue and made runnable when there's a timer
+ * interrupt to handle.
+ */
+void kvm_timer_schedule(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ u64 ns;
+ cycle_t cval, now;
+
+ BUG_ON(timer_is_armed(timer));
+
+ /*
+ * No need to schedule a background timer if the guest timer has
+ * already expired, because kvm_vcpu_block will return before putting
+ * the thread to sleep.
+ */
+ if (kvm_timer_should_fire(vcpu))
+ return;
+
+ /*
+ * If the timer is not capable of raising interrupts (disabled or
+ * masked), then there's no more work for us to do.
+ */
+ if (!kvm_timer_irq_can_fire(vcpu))
+ return;
+
+ /* The timer has not yet expired, schedule a background timer */
+ cval = timer->cntv_cval;
+ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
+ ns = cyclecounter_cyc2ns(timecounter->cc,
+ cval - now,
+ timecounter->mask,
+ &timecounter->frac);
+ timer_arm(timer, ns);
+}
+
+void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ timer_disarm(timer);
+}
+
/**
* kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
* @vcpu: The vcpu pointer
*
- * Disarm any pending soft timers, since the world-switch code will write the
- * virtual timer state back to the physical CPU.
+ * Check if the virtual timer has expired while we were running in the host,
+ * and inject an interrupt if that was the case.
*/
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
{
@@ -140,17 +192,6 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
bool phys_active;
int ret;
- /*
- * We're about to run this vcpu again, so there is no need to
- * keep the background timer running, as we're about to
- * populate the CPU timer again.
- */
- timer_disarm(timer);
-
- /*
- * If the timer expired while we were not scheduled, now is the time
- * to inject it.
- */
if (kvm_timer_should_fire(vcpu))
kvm_timer_inject_irq(vcpu);
@@ -176,32 +217,17 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
* kvm_timer_sync_hwstate - sync timer state from cpu
* @vcpu: The vcpu pointer
*
- * Check if the virtual timer was armed and either schedule a corresponding
- * soft timer or inject directly if already expired.
+ * Check if the virtual timer has expired while we were running in the guest,
+ * and inject an interrupt if that was the case.
*/
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- cycle_t cval, now;
- u64 ns;
BUG_ON(timer_is_armed(timer));
- if (kvm_timer_should_fire(vcpu)) {
- /*
- * Timer has already expired while we were not
- * looking. Inject the interrupt and carry on.
- */
+ if (kvm_timer_should_fire(vcpu))
kvm_timer_inject_irq(vcpu);
- return;
- }
-
- cval = timer->cntv_cval;
- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
-
- ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
- &timecounter->frac);
- timer_arm(timer, ns);
}
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
--
cgit v1.2.3-58-ga151
From 9103617df202d74e5c65f8af84a9aa727f812a06 Mon Sep 17 00:00:00 2001
From: Christoffer Dall
Date: Tue, 25 Aug 2015 22:50:57 +0200
Subject: arm/arm64: KVM: vgic: Factor out level irq processing on guest exit
Currently vgic_process_maintenance() processes dealing with a completed
level-triggered interrupt directly, but we are soon going to reuse this
logic for level-triggered mapped interrupts with the HW bit set, so
move this logic into a separate static function.
Probably the most scary part of this commit is convincing yourself that
the current flow is safe compared to the old one. In the following I
try to list the changes and why they are harmless:
Move vgic_irq_clear_queued after kvm_notify_acked_irq:
Harmless because the only potential effect of clearing the queued
flag wrt. kvm_set_irq is that vgic_update_irq_pending does not set
the pending bit on the emulated CPU interface or in the
pending_on_cpu bitmask if the function is called with level=1.
However, the point of kvm_notify_acked_irq is to call kvm_set_irq
with level=0, and we set the queued flag again in
__kvm_vgic_sync_hwstate later on if the level is stil high.
Move vgic_set_lr before kvm_notify_acked_irq:
Also, harmless because the LR are cpu-local operations and
kvm_notify_acked only affects the dist
Move vgic_dist_irq_clear_soft_pend after kvm_notify_acked_irq:
Also harmless, because now we check the level state in the
clear_soft_pend function and lower the pending bits if the level is
low.
Reviewed-by: Eric Auger
Reviewed-by: Marc Zyngier
Signed-off-by: Christoffer Dall
---
virt/kvm/arm/vgic.c | 94 +++++++++++++++++++++++++++++++----------------------
1 file changed, 56 insertions(+), 38 deletions(-)
(limited to 'virt/kvm')
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 66c66165e712..367a180fb5ac 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -107,6 +107,7 @@ static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
int virt_irq);
+static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
static const struct vgic_ops *vgic_ops;
static const struct vgic_params *vgic;
@@ -357,6 +358,11 @@ static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
+ if (!vgic_dist_irq_get_level(vcpu, irq)) {
+ vgic_dist_irq_clear_pending(vcpu, irq);
+ if (!compute_pending_for_cpu(vcpu))
+ clear_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
+ }
}
static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
@@ -1338,12 +1344,56 @@ epilog:
}
}
+static int process_level_irq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
+{
+ int level_pending = 0;
+
+ vlr.state = 0;
+ vlr.hwirq = 0;
+ vgic_set_lr(vcpu, lr, vlr);
+
+ /*
+ * If the IRQ was EOIed (called from vgic_process_maintenance) or it
+ * went from active to non-active (called from vgic_sync_hwirq) it was
+ * also ACKed and we we therefore assume we can clear the soft pending
+ * state (should it had been set) for this interrupt.
+ *
+ * Note: if the IRQ soft pending state was set after the IRQ was
+ * acked, it actually shouldn't be cleared, but we have no way of
+ * knowing that unless we start trapping ACKs when the soft-pending
+ * state is set.
+ */
+ vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
+
+ /*
+ * Tell the gic to start sampling the line of this interrupt again.
+ */
+ vgic_irq_clear_queued(vcpu, vlr.irq);
+
+ /* Any additional pending interrupt? */
+ if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
+ vgic_cpu_irq_set(vcpu, vlr.irq);
+ level_pending = 1;
+ } else {
+ vgic_dist_irq_clear_pending(vcpu, vlr.irq);
+ vgic_cpu_irq_clear(vcpu, vlr.irq);
+ }
+
+ /*
+ * Despite being EOIed, the LR may not have
+ * been marked as empty.
+ */
+ vgic_sync_lr_elrsr(vcpu, lr, vlr);
+
+ return level_pending;
+}
+
static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
{
u32 status = vgic_get_interrupt_status(vcpu);
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- bool level_pending = false;
struct kvm *kvm = vcpu->kvm;
+ int level_pending = 0;
kvm_debug("STATUS = %08x\n", status);
@@ -1358,54 +1408,22 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
- WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
- spin_lock(&dist->lock);
- vgic_irq_clear_queued(vcpu, vlr.irq);
+ WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
WARN_ON(vlr.state & LR_STATE_MASK);
- vlr.state = 0;
- vgic_set_lr(vcpu, lr, vlr);
- /*
- * If the IRQ was EOIed it was also ACKed and we we
- * therefore assume we can clear the soft pending
- * state (should it had been set) for this interrupt.
- *
- * Note: if the IRQ soft pending state was set after
- * the IRQ was acked, it actually shouldn't be
- * cleared, but we have no way of knowing that unless
- * we start trapping ACKs when the soft-pending state
- * is set.
- */
- vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
/*
* kvm_notify_acked_irq calls kvm_set_irq()
- * to reset the IRQ level. Need to release the
- * lock for kvm_set_irq to grab it.
+ * to reset the IRQ level, which grabs the dist->lock
+ * so we call this before taking the dist->lock.
*/
- spin_unlock(&dist->lock);
-
kvm_notify_acked_irq(kvm, 0,
vlr.irq - VGIC_NR_PRIVATE_IRQS);
- spin_lock(&dist->lock);
-
- /* Any additional pending interrupt? */
- if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
- vgic_cpu_irq_set(vcpu, vlr.irq);
- level_pending = true;
- } else {
- vgic_dist_irq_clear_pending(vcpu, vlr.irq);
- vgic_cpu_irq_clear(vcpu, vlr.irq);
- }
+ spin_lock(&dist->lock);
+ level_pending |= process_level_irq(vcpu, lr, vlr);
spin_unlock(&dist->lock);
-
- /*
- * Despite being EOIed, the LR may not have
- * been marked as empty.
- */
- vgic_sync_lr_elrsr(vcpu, lr, vlr);
}
}
--
cgit v1.2.3-58-ga151
From 8bf9a701e103fd17dbdf0355e43ff5200b4823aa Mon Sep 17 00:00:00 2001
From: Christoffer Dall
Date: Sun, 30 Aug 2015 14:42:16 +0200
Subject: arm/arm64: KVM: Implement GICD_ICFGR as RO for PPIs
The GICD_ICFGR allows the bits for the SGIs and PPIs to be read only.
We currently simulate this behavior by writing a hardcoded value to the
register for the SGIs and PPIs on every write of these bits to the
register (ignoring what the guest actually wrote), and by writing the
same value as the reset value to the register.
This is a bit counter-intuitive, as the register is RO for these bits,
and we can just implement it that way, allowing us to control the value
of the bits purely in the reset code.
Reviewed-by: Marc Zyngier
Signed-off-by: Christoffer Dall
---
virt/kvm/arm/vgic.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
(limited to 'virt/kvm')
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 367a180fb5ac..f8ca2e9d2f0b 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -690,10 +690,9 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
vgic_reg_access(mmio, &val, offset,
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
if (mmio->is_write) {
- if (offset < 8) {
- *reg = ~0U; /* Force PPIs/SGIs to 1 */
+ /* Ignore writes to read-only SGI and PPI bits */
+ if (offset < 8)
return false;
- }
val = vgic_cfg_compress(val);
if (offset & 4) {
--
cgit v1.2.3-58-ga151
From 54723bb37feac347a169359536f3dff122cabca3 Mon Sep 17 00:00:00 2001
From: Christoffer Dall
Date: Sun, 30 Aug 2015 14:45:20 +0200
Subject: arm/arm64: KVM: Use appropriate define in VGIC reset code
We currently initialize the SGIs to be enabled in the VGIC code, but we
use the VGIC_NR_PPIS define for this purpose, instead of the the more
natural VGIC_NR_SGIS. Change this slightly confusing use of the
defines.
Note: This should have no functional change, as both names are defined
to the number 16.
Acked-by: Marc Zyngier
Signed-off-by: Christoffer Dall
---
virt/kvm/arm/vgic.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
(limited to 'virt/kvm')
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index f8ca2e9d2f0b..a44ecf9eca4e 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -2128,8 +2128,12 @@ int vgic_init(struct kvm *kvm)
break;
}
- for (i = 0; i < dist->nr_irqs; i++) {
- if (i < VGIC_NR_PPIS)
+ /*
+ * Enable all SGIs and configure all private IRQs as
+ * edge-triggered.
+ */
+ for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
+ if (i < VGIC_NR_SGIS)
vgic_bitmap_set_irq_val(&dist->irq_enabled,
vcpu->vcpu_id, i, 1);
if (i < VGIC_NR_PRIVATE_IRQS)
--
cgit v1.2.3-58-ga151
From 4b4b4512da2a844b8da2585609b67fae1ce4f4db Mon Sep 17 00:00:00 2001
From: Christoffer Dall
Date: Sun, 30 Aug 2015 15:01:27 +0200
Subject: arm/arm64: KVM: Rework the arch timer to use level-triggered
semantics
The arch timer currently uses edge-triggered semantics in the sense that
the line is never sampled by the vgic and lowering the line from the
timer to the vgic doesn't have any effect on the pending state of
virtual interrupts in the vgic. This means that we do not support a
guest with the otherwise valid behavior of (1) disable interrupts (2)
enable the timer (3) disable the timer (4) enable interrupts. Such a
guest would validly not expect to see any interrupts on real hardware,
but will see interrupts on KVM.
This patch fixes this shortcoming through the following series of
changes.
First, we change the flow of the timer/vgic sync/flush operations. Now
the timer is always flushed/synced before the vgic, because the vgic
samples the state of the timer output. This has the implication that we
move the timer operations in to non-preempible sections, but that is
fine after the previous commit getting rid of hrtimer schedules on every
entry/exit.
Second, we change the internal behavior of the timer, letting the timer
keep track of its previous output state, and only lower/raise the line
to the vgic when the state changes. Note that in theory this could have
been accomplished more simply by signalling the vgic every time the
state *potentially* changed, but we don't want to be hitting the vgic
more often than necessary.
Third, we get rid of the use of the map->active field in the vgic and
instead simply set the interrupt as active on the physical distributor
whenever the input to the GIC is asserted and conversely clear the
physical active state when the input to the GIC is deasserted.
Fourth, and finally, we now initialize the timer PPIs (and all the other
unused PPIs for now), to be level-triggered, and modify the sync code to
sample the line state on HW sync and re-inject a new interrupt if it is
still pending at that time.
Signed-off-by: Christoffer Dall
---
arch/arm/kvm/arm.c | 11 +++--
include/kvm/arm_arch_timer.h | 2 +-
include/kvm/arm_vgic.h | 3 --
virt/kvm/arm/arch_timer.c | 81 +++++++++++++++++++++++-----------
virt/kvm/arm/vgic.c | 102 +++++++++++--------------------------------
5 files changed, 91 insertions(+), 108 deletions(-)
(limited to 'virt/kvm')
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 7ed4d475d83a..59125f48c707 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -561,9 +561,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
local_irq_enable();
+ kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
preempt_enable();
- kvm_timer_sync_hwstate(vcpu);
continue;
}
@@ -608,12 +608,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_guest_exit();
trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
+ /*
+ * We must sync the timer state before the vgic state so that
+ * the vgic can properly sample the updated state of the
+ * interrupt line.
+ */
+ kvm_timer_sync_hwstate(vcpu);
+
kvm_vgic_sync_hwstate(vcpu);
preempt_enable();
- kvm_timer_sync_hwstate(vcpu);
-
ret = handle_exit(vcpu, run, ret);
}
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index ef14cc1f1f26..1800227af9d6 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -51,7 +51,7 @@ struct arch_timer_cpu {
bool armed;
/* Timer IRQ */
- const struct kvm_irq_level *irq;
+ struct kvm_irq_level irq;
/* VGIC mapping */
struct irq_phys_map *map;
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 4e14dac282bb..7bc5d0224ab0 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -159,7 +159,6 @@ struct irq_phys_map {
u32 virt_irq;
u32 phys_irq;
u32 irq;
- bool active;
};
struct irq_phys_map_entry {
@@ -354,8 +353,6 @@ int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
int virt_irq, int irq);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
-bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map);
-void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 32095fbb5d7c..523816d8c402 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -59,18 +59,6 @@ static void timer_disarm(struct arch_timer_cpu *timer)
}
}
-static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
-{
- int ret;
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-
- kvm_vgic_set_phys_irq_active(timer->map, true);
- ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
- timer->map,
- timer->irq->level);
- WARN_ON(ret);
-}
-
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
{
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
@@ -116,8 +104,7 @@ static bool kvm_timer_irq_can_fire(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
return !(timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
- (timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE) &&
- !kvm_vgic_get_phys_irq_active(timer->map);
+ (timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE);
}
bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
@@ -134,6 +121,41 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
return cval <= now;
}
+static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
+{
+ int ret;
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ BUG_ON(!vgic_initialized(vcpu->kvm));
+
+ timer->irq.level = new_level;
+ ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
+ timer->map,
+ timer->irq.level);
+ WARN_ON(ret);
+}
+
+/*
+ * Check if there was a change in the timer state (should we raise or lower
+ * the line level to the GIC).
+ */
+static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ /*
+ * If userspace modified the timer registers via SET_ONE_REG before
+ * the vgic was initialized, we mustn't set the timer->irq.level value
+ * because the guest would never see the interrupt. Instead wait
+ * until we call this function from kvm_timer_flush_hwstate.
+ */
+ if (!vgic_initialized(vcpu->kvm))
+ return;
+
+ if (kvm_timer_should_fire(vcpu) != timer->irq.level)
+ kvm_timer_update_irq(vcpu, !timer->irq.level);
+}
+
/*
* Schedule the background timer before calling kvm_vcpu_block, so that this
* thread is removed from its waitqueue and made runnable when there's a timer
@@ -192,17 +214,20 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
bool phys_active;
int ret;
- if (kvm_timer_should_fire(vcpu))
- kvm_timer_inject_irq(vcpu);
+ kvm_timer_update_state(vcpu);
/*
- * We keep track of whether the edge-triggered interrupt has been
- * signalled to the vgic/guest, and if so, we mask the interrupt and
- * the physical distributor to prevent the timer from raising a
- * physical interrupt whenever we run a guest, preventing forward
- * VCPU progress.
+ * If we enter the guest with the virtual input level to the VGIC
+ * asserted, then we have already told the VGIC what we need to, and
+ * we don't need to exit from the guest until the guest deactivates
+ * the already injected interrupt, so therefore we should set the
+ * hardware active state to prevent unnecessary exits from the guest.
+ *
+ * Conversely, if the virtual input level is deasserted, then always
+ * clear the hardware active state to ensure that hardware interrupts
+ * from the timer triggers a guest exit.
*/
- if (kvm_vgic_get_phys_irq_active(timer->map))
+ if (timer->irq.level)
phys_active = true;
else
phys_active = false;
@@ -226,8 +251,11 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
BUG_ON(timer_is_armed(timer));
- if (kvm_timer_should_fire(vcpu))
- kvm_timer_inject_irq(vcpu);
+ /*
+ * The guest could have modified the timer registers or the timer
+ * could have expired, update the timer state.
+ */
+ kvm_timer_update_state(vcpu);
}
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
@@ -242,7 +270,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
* kvm_vcpu_set_target(). To handle this, we determine
* vcpu timer irq number when the vcpu is reset.
*/
- timer->irq = irq;
+ timer->irq.irq = irq->irq;
/*
* The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
@@ -251,6 +279,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
* the ARMv7 architecture.
*/
timer->cntv_ctl = 0;
+ kvm_timer_update_state(vcpu);
/*
* Tell the VGIC that the virtual interrupt is tied to a
@@ -295,6 +324,8 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
default:
return -1;
}
+
+ kvm_timer_update_state(vcpu);
return 0;
}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index a44ecf9eca4e..3c2909c1bda3 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -537,34 +537,6 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm,
return false;
}
-/*
- * If a mapped interrupt's state has been modified by the guest such that it
- * is no longer active or pending, without it have gone through the sync path,
- * then the map->active field must be cleared so the interrupt can be taken
- * again.
- */
-static void vgic_handle_clear_mapped_irq(struct kvm_vcpu *vcpu)
-{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- struct list_head *root;
- struct irq_phys_map_entry *entry;
- struct irq_phys_map *map;
-
- rcu_read_lock();
-
- /* Check for PPIs */
- root = &vgic_cpu->irq_phys_map_list;
- list_for_each_entry_rcu(entry, root, entry) {
- map = &entry->map;
-
- if (!vgic_dist_irq_is_pending(vcpu, map->virt_irq) &&
- !vgic_irq_is_active(vcpu, map->virt_irq))
- map->active = false;
- }
-
- rcu_read_unlock();
-}
-
bool vgic_handle_clear_pending_reg(struct kvm *kvm,
struct kvm_exit_mmio *mmio,
phys_addr_t offset, int vcpu_id)
@@ -595,7 +567,6 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm,
vcpu_id, offset);
vgic_reg_access(mmio, reg, offset, mode);
- vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
vgic_update_state(kvm);
return true;
}
@@ -633,7 +604,6 @@ bool vgic_handle_clear_active_reg(struct kvm *kvm,
ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
if (mmio->is_write) {
- vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
vgic_update_state(kvm);
return true;
}
@@ -1443,29 +1413,37 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
/*
* Save the physical active state, and reset it to inactive.
*
- * Return 1 if HW interrupt went from active to inactive, and 0 otherwise.
+ * Return true if there's a pending level triggered interrupt line to queue.
*/
-static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
+static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct irq_phys_map *map;
+ bool phys_active;
+ bool level_pending;
int ret;
if (!(vlr.state & LR_HW))
- return 0;
+ return false;
map = vgic_irq_map_search(vcpu, vlr.irq);
BUG_ON(!map);
ret = irq_get_irqchip_state(map->irq,
IRQCHIP_STATE_ACTIVE,
- &map->active);
+ &phys_active);
WARN_ON(ret);
- if (map->active)
+ if (phys_active)
return 0;
- return 1;
+ /* Mapped edge-triggered interrupts not yet supported. */
+ WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
+ spin_lock(&dist->lock);
+ level_pending = process_level_irq(vcpu, lr, vlr);
+ spin_unlock(&dist->lock);
+ return level_pending;
}
/* Sync back the VGIC state after a guest run */
@@ -1490,18 +1468,8 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
continue;
vlr = vgic_get_lr(vcpu, lr);
- if (vgic_sync_hwirq(vcpu, vlr)) {
- /*
- * So this is a HW interrupt that the guest
- * EOI-ed. Clean the LR state and allow the
- * interrupt to be sampled again.
- */
- vlr.state = 0;
- vlr.hwirq = 0;
- vgic_set_lr(vcpu, lr, vlr);
- vgic_irq_clear_queued(vcpu, vlr.irq);
- set_bit(lr, elrsr_ptr);
- }
+ if (vgic_sync_hwirq(vcpu, lr, vlr))
+ level_pending = true;
if (!test_bit(lr, elrsr_ptr))
continue;
@@ -1880,30 +1848,6 @@ static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
kfree(entry);
}
-/**
- * kvm_vgic_get_phys_irq_active - Return the active state of a mapped IRQ
- *
- * Return the logical active state of a mapped interrupt. This doesn't
- * necessarily reflects the current HW state.
- */
-bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map)
-{
- BUG_ON(!map);
- return map->active;
-}
-
-/**
- * kvm_vgic_set_phys_irq_active - Set the active state of a mapped IRQ
- *
- * Set the logical active state of a mapped interrupt. This doesn't
- * immediately affects the HW state.
- */
-void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active)
-{
- BUG_ON(!map);
- map->active = active;
-}
-
/**
* kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
* @vcpu: The VCPU pointer
@@ -2129,17 +2073,23 @@ int vgic_init(struct kvm *kvm)
}
/*
- * Enable all SGIs and configure all private IRQs as
- * edge-triggered.
+ * Enable and configure all SGIs to be edge-triggere and
+ * configure all PPIs as level-triggered.
*/
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
- if (i < VGIC_NR_SGIS)
+ if (i < VGIC_NR_SGIS) {
+ /* SGIs */
vgic_bitmap_set_irq_val(&dist->irq_enabled,
vcpu->vcpu_id, i, 1);
- if (i < VGIC_NR_PRIVATE_IRQS)
vgic_bitmap_set_irq_val(&dist->irq_cfg,
vcpu->vcpu_id, i,
VGIC_CFG_EDGE);
+ } else if (i < VGIC_NR_PRIVATE_IRQS) {
+ /* PPIs */
+ vgic_bitmap_set_irq_val(&dist->irq_cfg,
+ vcpu->vcpu_id, i,
+ VGIC_CFG_LEVEL);
+ }
}
vgic_enable(vcpu);
--
cgit v1.2.3-58-ga151
From 8fe2f19e6e6015911bdd4cfcdb23a32e146ba570 Mon Sep 17 00:00:00 2001
From: Christoffer Dall
Date: Fri, 4 Sep 2015 21:25:12 +0200
Subject: arm/arm64: KVM: Support edge-triggered forwarded interrupts
We mark edge-triggered interrupts with the HW bit set as queued to
prevent the VGIC code from injecting LRs with both the Active and
Pending bits set at the same time while also setting the HW bit,
because the hardware does not support this.
However, this means that we must also clear the queued flag when we sync
back a LR where the state on the physical distributor went from active
to inactive because the guest deactivated the interrupt. At this point
we must also check if the interrupt is pending on the distributor, and
tell the VGIC to queue it again if it is.
Since these actions on the sync path are extremely close to those for
level-triggered interrupts, rename process_level_irq to
process_queued_irq, allowing it to cater for both cases.
Signed-off-by: Christoffer Dall
---
virt/kvm/arm/vgic.c | 40 ++++++++++++++++++++++------------------
1 file changed, 22 insertions(+), 18 deletions(-)
(limited to 'virt/kvm')
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 3c2909c1bda3..84abc6f38c1d 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1313,13 +1313,10 @@ epilog:
}
}
-static int process_level_irq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
+static int process_queued_irq(struct kvm_vcpu *vcpu,
+ int lr, struct vgic_lr vlr)
{
- int level_pending = 0;
-
- vlr.state = 0;
- vlr.hwirq = 0;
- vgic_set_lr(vcpu, lr, vlr);
+ int pending = 0;
/*
* If the IRQ was EOIed (called from vgic_process_maintenance) or it
@@ -1335,26 +1332,35 @@ static int process_level_irq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
/*
- * Tell the gic to start sampling the line of this interrupt again.
+ * Tell the gic to start sampling this interrupt again.
*/
vgic_irq_clear_queued(vcpu, vlr.irq);
/* Any additional pending interrupt? */
- if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
- vgic_cpu_irq_set(vcpu, vlr.irq);
- level_pending = 1;
+ if (vgic_irq_is_edge(vcpu, vlr.irq)) {
+ BUG_ON(!(vlr.state & LR_HW));
+ pending = vgic_dist_irq_is_pending(vcpu, vlr.irq);
} else {
- vgic_dist_irq_clear_pending(vcpu, vlr.irq);
- vgic_cpu_irq_clear(vcpu, vlr.irq);
+ if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
+ vgic_cpu_irq_set(vcpu, vlr.irq);
+ pending = 1;
+ } else {
+ vgic_dist_irq_clear_pending(vcpu, vlr.irq);
+ vgic_cpu_irq_clear(vcpu, vlr.irq);
+ }
}
/*
* Despite being EOIed, the LR may not have
* been marked as empty.
*/
+ vlr.state = 0;
+ vlr.hwirq = 0;
+ vgic_set_lr(vcpu, lr, vlr);
+
vgic_sync_lr_elrsr(vcpu, lr, vlr);
- return level_pending;
+ return pending;
}
static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
@@ -1391,7 +1397,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
vlr.irq - VGIC_NR_PRIVATE_IRQS);
spin_lock(&dist->lock);
- level_pending |= process_level_irq(vcpu, lr, vlr);
+ level_pending |= process_queued_irq(vcpu, lr, vlr);
spin_unlock(&dist->lock);
}
}
@@ -1413,7 +1419,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
/*
* Save the physical active state, and reset it to inactive.
*
- * Return true if there's a pending level triggered interrupt line to queue.
+ * Return true if there's a pending forwarded interrupt to queue.
*/
static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
{
@@ -1438,10 +1444,8 @@ static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
if (phys_active)
return 0;
- /* Mapped edge-triggered interrupts not yet supported. */
- WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
spin_lock(&dist->lock);
- level_pending = process_level_irq(vcpu, lr, vlr);
+ level_pending = process_queued_irq(vcpu, lr, vlr);
spin_unlock(&dist->lock);
return level_pending;
}
--
cgit v1.2.3-58-ga151
From e21f09108754dfdfbb30e547f4edbd3b6884eedb Mon Sep 17 00:00:00 2001
From: Christoffer Dall
Date: Sun, 30 Aug 2015 13:57:20 +0200
Subject: arm/arm64: KVM: Add tracepoints for vgic and timer
The VGIC and timer code for KVM arm/arm64 doesn't have any tracepoints
or tracepoint infrastructure defined. Rewriting some of the timer code
handling showed me how much we need this, so let's add these simple
trace points once and for all and we can easily expand with additional
trace points in these files as we go along.
Cc: Wei Huang
Signed-off-by: Christoffer Dall
---
virt/kvm/arm/arch_timer.c | 4 +++
virt/kvm/arm/trace.h | 63 +++++++++++++++++++++++++++++++++++++++++++++++
virt/kvm/arm/vgic.c | 5 ++++
3 files changed, 72 insertions(+)
create mode 100644 virt/kvm/arm/trace.h
(limited to 'virt/kvm')
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 523816d8c402..21a0ab2d8919 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -28,6 +28,8 @@
#include
#include
+#include "trace.h"
+
static struct timecounter *timecounter;
static struct workqueue_struct *wqueue;
static unsigned int host_vtimer_irq;
@@ -129,6 +131,8 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
BUG_ON(!vgic_initialized(vcpu->kvm));
timer->irq.level = new_level;
+ trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->map->virt_irq,
+ timer->irq.level);
ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
timer->map,
timer->irq.level);
diff --git a/virt/kvm/arm/trace.h b/virt/kvm/arm/trace.h
new file mode 100644
index 000000000000..37d8b98867d5
--- /dev/null
+++ b/virt/kvm/arm/trace.h
@@ -0,0 +1,63 @@
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+
+/*
+ * Tracepoints for vgic
+ */
+TRACE_EVENT(vgic_update_irq_pending,
+ TP_PROTO(unsigned long vcpu_id, __u32 irq, bool level),
+ TP_ARGS(vcpu_id, irq, level),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_id )
+ __field( __u32, irq )
+ __field( bool, level )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->irq = irq;
+ __entry->level = level;
+ ),
+
+ TP_printk("VCPU: %ld, IRQ %d, level: %d",
+ __entry->vcpu_id, __entry->irq, __entry->level)
+);
+
+/*
+ * Tracepoints for arch_timer
+ */
+TRACE_EVENT(kvm_timer_update_irq,
+ TP_PROTO(unsigned long vcpu_id, __u32 irq, int level),
+ TP_ARGS(vcpu_id, irq, level),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_id )
+ __field( __u32, irq )
+ __field( int, level )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->irq = irq;
+ __entry->level = level;
+ ),
+
+ TP_printk("VCPU: %ld, IRQ %d, level %d",
+ __entry->vcpu_id, __entry->irq, __entry->level)
+);
+
+#endif /* _TRACE_KVM_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../../virt/kvm/arm
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 84abc6f38c1d..d4669eb29b77 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -34,6 +34,9 @@
#include
#include
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
/*
* How the whole thing works (courtesy of Christoffer Dall):
*
@@ -1574,6 +1577,8 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
int enabled;
bool ret = true, can_inject = true;
+ trace_vgic_update_irq_pending(cpuid, irq_num, level);
+
if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
return -EINVAL;
--
cgit v1.2.3-58-ga151
From c4cd4c168b81dad53e659d18cdae653bc0ec2384 Mon Sep 17 00:00:00 2001
From: Pavel Fedin
Date: Tue, 27 Oct 2015 11:37:29 +0300
Subject: KVM: arm/arm64: Optimize away redundant LR tracking
Currently we use vgic_irq_lr_map in order to track which LRs hold which
IRQs, and lr_used bitmap in order to track which LRs are used or free.
vgic_irq_lr_map is actually used only for piggy-back optimization, and
can be easily replaced by iteration over lr_used. This is good because in
future, when LPI support is introduced, number of IRQs will grow up to at
least 16384, while numbers from 1024 to 8192 are never going to be used.
This would be a huge memory waste.
In its turn, lr_used is also completely redundant since
ae705930fca6322600690df9dc1c7d0516145a93 ("arm/arm64: KVM: Keep elrsr/aisr
in sync with software model"), because together with lr_used we also update
elrsr. This allows to easily replace lr_used with elrsr, inverting all
conditions (because in elrsr '1' means 'free').
Signed-off-by: Pavel Fedin
Signed-off-by: Christoffer Dall
---
include/kvm/arm_vgic.h | 6 ------
virt/kvm/arm/vgic-v2.c | 1 +
virt/kvm/arm/vgic-v3.c | 1 +
virt/kvm/arm/vgic.c | 53 ++++++++++++++------------------------------------
4 files changed, 17 insertions(+), 44 deletions(-)
(limited to 'virt/kvm')
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 8065801a1847..3936bf802e1d 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -295,9 +295,6 @@ struct vgic_v3_cpu_if {
};
struct vgic_cpu {
- /* per IRQ to LR mapping */
- u8 *vgic_irq_lr_map;
-
/* Pending/active/both interrupts on this VCPU */
DECLARE_BITMAP(pending_percpu, VGIC_NR_PRIVATE_IRQS);
DECLARE_BITMAP(active_percpu, VGIC_NR_PRIVATE_IRQS);
@@ -308,9 +305,6 @@ struct vgic_cpu {
unsigned long *active_shared;
unsigned long *pend_act_shared;
- /* Bitmap of used/free list registers */
- DECLARE_BITMAP(lr_used, VGIC_V2_MAX_LRS);
-
/* Number of list registers on this CPU */
int nr_lr;
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index 8d7b04db8471..c0f5d7fad9ea 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -158,6 +158,7 @@ static void vgic_v2_enable(struct kvm_vcpu *vcpu)
* anyway.
*/
vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr = ~0;
/* Get the show on the road... */
vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 7dd5d62f10a1..92003cb61a0a 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -193,6 +193,7 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
* anyway.
*/
vgic_v3->vgic_vmcr = 0;
+ vgic_v3->vgic_elrsr = ~0;
/*
* If we are emulating a GICv3, we do it in an non-GICv2-compatible
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index d4669eb29b77..265a41035728 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -108,6 +108,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
+static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
int virt_irq);
static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
@@ -691,9 +692,11 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ u64 elrsr = vgic_get_elrsr(vcpu);
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
int i;
- for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
+ for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) {
struct vgic_lr lr = vgic_get_lr(vcpu, i);
/*
@@ -1098,7 +1101,6 @@ static inline void vgic_enable(struct kvm_vcpu *vcpu)
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
/*
@@ -1112,8 +1114,6 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
vlr.state = 0;
vgic_set_lr(vcpu, lr_nr, vlr);
- clear_bit(lr_nr, vgic_cpu->lr_used);
- vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
}
@@ -1128,10 +1128,11 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
*/
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ u64 elrsr = vgic_get_elrsr(vcpu);
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
int lr;
- for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
+ for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
@@ -1188,8 +1189,9 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
*/
bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ u64 elrsr = vgic_get_elrsr(vcpu);
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
struct vgic_lr vlr;
int lr;
@@ -1200,28 +1202,22 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
kvm_debug("Queue IRQ%d\n", irq);
- lr = vgic_cpu->vgic_irq_lr_map[irq];
-
/* Do we have an active interrupt for the same CPUID? */
- if (lr != LR_EMPTY) {
+ for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
vlr = vgic_get_lr(vcpu, lr);
- if (vlr.source == sgi_source_id) {
+ if (vlr.irq == irq && vlr.source == sgi_source_id) {
kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
- BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
return true;
}
}
/* Try to use another LR for this interrupt */
- lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
- vgic->nr_lr);
+ lr = find_first_bit(elrsr_ptr, vgic->nr_lr);
if (lr >= vgic->nr_lr)
return false;
kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
- vgic_cpu->vgic_irq_lr_map[irq] = lr;
- set_bit(lr, vgic_cpu->lr_used);
vlr.irq = irq;
vlr.source = sgi_source_id;
@@ -1456,7 +1452,6 @@ static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
/* Sync back the VGIC state after a guest run */
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
u64 elrsr;
unsigned long *elrsr_ptr;
@@ -1469,22 +1464,10 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
/* Deal with HW interrupts, and clear mappings for empty LRs */
for (lr = 0; lr < vgic->nr_lr; lr++) {
- struct vgic_lr vlr;
-
- if (!test_bit(lr, vgic_cpu->lr_used))
- continue;
-
- vlr = vgic_get_lr(vcpu, lr);
- if (vgic_sync_hwirq(vcpu, lr, vlr))
- level_pending = true;
-
- if (!test_bit(lr, elrsr_ptr))
- continue;
-
- clear_bit(lr, vgic_cpu->lr_used);
+ struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
+ level_pending |= vgic_sync_hwirq(vcpu, lr, vlr);
BUG_ON(vlr.irq >= dist->nr_irqs);
- vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
}
/* Check if we still have something up our sleeve... */
@@ -1912,12 +1895,10 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
kfree(vgic_cpu->pending_shared);
kfree(vgic_cpu->active_shared);
kfree(vgic_cpu->pend_act_shared);
- kfree(vgic_cpu->vgic_irq_lr_map);
vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
vgic_cpu->pending_shared = NULL;
vgic_cpu->active_shared = NULL;
vgic_cpu->pend_act_shared = NULL;
- vgic_cpu->vgic_irq_lr_map = NULL;
}
static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
@@ -1928,18 +1909,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
- vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
if (!vgic_cpu->pending_shared
|| !vgic_cpu->active_shared
- || !vgic_cpu->pend_act_shared
- || !vgic_cpu->vgic_irq_lr_map) {
+ || !vgic_cpu->pend_act_shared) {
kvm_vgic_vcpu_destroy(vcpu);
return -ENOMEM;
}
- memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
-
/*
* Store the number of LRs per vcpu, so we don't have to go
* all the way to the distributor structure to find out. Only
--
cgit v1.2.3-58-ga151
From 212c76545dde8370ebde2a170e4f8e1ed8441dc0 Mon Sep 17 00:00:00 2001
From: Pavel Fedin
Date: Tue, 27 Oct 2015 11:37:30 +0300
Subject: KVM: arm/arm64: Clean up vgic_retire_lr() and surroundings
1. Remove unnecessary 'irq' argument, because irq number can be retrieved
from the LR.
2. Since cff9211eb1a1f58ce7f5a2d596b617928fd4be0e
("arm/arm64: KVM: Fix arch timer behavior for disabled interrupts ")
LR_STATE_PENDING is queued back by vgic_retire_lr() itself. Also, it
clears vlr.state itself. Therefore, we remove the same, now duplicated,
check with all accompanying bit manipulations from vgic_unqueue_irqs().
3. vgic_retire_lr() is always accompanied by vgic_irq_clear_queued(). Since
it already does more than just clearing the LR, move
vgic_irq_clear_queued() inside of it.
Signed-off-by: Pavel Fedin
Signed-off-by: Christoffer Dall
---
virt/kvm/arm/vgic.c | 37 ++++++++++---------------------------
1 file changed, 10 insertions(+), 27 deletions(-)
(limited to 'virt/kvm')
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 265a41035728..96e45f3da534 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -105,7 +105,7 @@
#include "vgic.h"
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
-static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
+static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu);
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
@@ -717,30 +717,14 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
* interrupt then move the active state to the
* distributor tracking bit.
*/
- if (lr.state & LR_STATE_ACTIVE) {
+ if (lr.state & LR_STATE_ACTIVE)
vgic_irq_set_active(vcpu, lr.irq);
- lr.state &= ~LR_STATE_ACTIVE;
- }
/*
* Reestablish the pending state on the distributor and the
- * CPU interface. It may have already been pending, but that
- * is fine, then we are only setting a few bits that were
- * already set.
+ * CPU interface and mark the LR as free for other use.
*/
- if (lr.state & LR_STATE_PENDING) {
- vgic_dist_irq_set_pending(vcpu, lr.irq);
- lr.state &= ~LR_STATE_PENDING;
- }
-
- vgic_set_lr(vcpu, i, lr);
-
- /*
- * Mark the LR as free for other use.
- */
- BUG_ON(lr.state & LR_STATE_MASK);
- vgic_retire_lr(i, lr.irq, vcpu);
- vgic_irq_clear_queued(vcpu, lr.irq);
+ vgic_retire_lr(i, vcpu);
/* Finally update the VGIC state. */
vgic_update_state(vcpu->kvm);
@@ -1099,16 +1083,18 @@ static inline void vgic_enable(struct kvm_vcpu *vcpu)
vgic_ops->enable(vcpu);
}
-static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
+static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
{
struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
+ vgic_irq_clear_queued(vcpu, vlr.irq);
+
/*
* We must transfer the pending state back to the distributor before
* retiring the LR, otherwise we may loose edge-triggered interrupts.
*/
if (vlr.state & LR_STATE_PENDING) {
- vgic_dist_irq_set_pending(vcpu, irq);
+ vgic_dist_irq_set_pending(vcpu, vlr.irq);
vlr.hwirq = 0;
}
@@ -1135,11 +1121,8 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
- if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
- vgic_retire_lr(lr, vlr.irq, vcpu);
- if (vgic_irq_is_queued(vcpu, vlr.irq))
- vgic_irq_clear_queued(vcpu, vlr.irq);
- }
+ if (!vgic_irq_is_enabled(vcpu, vlr.irq))
+ vgic_retire_lr(lr, vcpu);
}
}
--
cgit v1.2.3-58-ga151
From 26caea7693cb99833fe4ecc544c842289d6b3f69 Mon Sep 17 00:00:00 2001
From: Pavel Fedin
Date: Tue, 27 Oct 2015 11:37:31 +0300
Subject: KVM: arm/arm64: Merge vgic_set_lr() and vgic_sync_lr_elrsr()
Now we see that vgic_set_lr() and vgic_sync_lr_elrsr() are always used
together. Merge them into one function, saving from second vgic_ops
dereferencing every time.
Signed-off-by: Pavel Fedin
Signed-off-by: Christoffer Dall
---
include/kvm/arm_vgic.h | 1 -
virt/kvm/arm/vgic-v2.c | 5 -----
virt/kvm/arm/vgic-v3.c | 5 -----
virt/kvm/arm/vgic.c | 14 ++------------
4 files changed, 2 insertions(+), 23 deletions(-)
(limited to 'virt/kvm')
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 3936bf802e1d..f62addc17dcf 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -112,7 +112,6 @@ struct vgic_vmcr {
struct vgic_ops {
struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
- void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
void (*clear_eisr)(struct kvm_vcpu *vcpu);
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index c0f5d7fad9ea..ff02f08df74d 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -79,11 +79,7 @@ static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
lr_val |= (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT);
vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
-}
-static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr lr_desc)
-{
if (!(lr_desc.state & LR_STATE_MASK))
vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
else
@@ -167,7 +163,6 @@ static void vgic_v2_enable(struct kvm_vcpu *vcpu)
static const struct vgic_ops vgic_v2_ops = {
.get_lr = vgic_v2_get_lr,
.set_lr = vgic_v2_set_lr,
- .sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
.get_elrsr = vgic_v2_get_elrsr,
.get_eisr = vgic_v2_get_eisr,
.clear_eisr = vgic_v2_clear_eisr,
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 92003cb61a0a..487d6357b7e7 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -112,11 +112,7 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
}
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
-}
-static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr lr_desc)
-{
if (!(lr_desc.state & LR_STATE_MASK))
vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
else
@@ -212,7 +208,6 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
static const struct vgic_ops vgic_v3_ops = {
.get_lr = vgic_v3_get_lr,
.set_lr = vgic_v3_set_lr,
- .sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
.get_elrsr = vgic_v3_get_elrsr,
.get_eisr = vgic_v3_get_eisr,
.clear_eisr = vgic_v3_clear_eisr,
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 96e45f3da534..fe451d4885ae 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1032,12 +1032,6 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
vgic_ops->set_lr(vcpu, lr, vlr);
}
-static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr vlr)
-{
- vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
-}
-
static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
{
return vgic_ops->get_elrsr(vcpu);
@@ -1100,7 +1094,6 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
vlr.state = 0;
vgic_set_lr(vcpu, lr_nr, vlr);
- vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
}
/*
@@ -1162,7 +1155,6 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
}
vgic_set_lr(vcpu, lr_nr, vlr);
- vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
}
/*
@@ -1340,8 +1332,6 @@ static int process_queued_irq(struct kvm_vcpu *vcpu,
vlr.hwirq = 0;
vgic_set_lr(vcpu, lr, vlr);
- vgic_sync_lr_elrsr(vcpu, lr, vlr);
-
return pending;
}
@@ -1442,8 +1432,6 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
bool level_pending;
level_pending = vgic_process_maintenance(vcpu);
- elrsr = vgic_get_elrsr(vcpu);
- elrsr_ptr = u64_to_bitmask(&elrsr);
/* Deal with HW interrupts, and clear mappings for empty LRs */
for (lr = 0; lr < vgic->nr_lr; lr++) {
@@ -1454,6 +1442,8 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
}
/* Check if we still have something up our sleeve... */
+ elrsr = vgic_get_elrsr(vcpu);
+ elrsr_ptr = u64_to_bitmask(&elrsr);
pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
if (level_pending || pending < vgic->nr_lr)
set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
--
cgit v1.2.3-58-ga151