diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2023-03-30 20:32:24 +1000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2023-04-03 14:54:44 +1000 |
commit | 6cd5c1db9983600f1848822e86e4906377b4a899 (patch) | |
tree | 788eb8a1a62ae65a1ccacd84bc622e12e1923f4e /arch | |
parent | 460ba21d83fef766a5d34260e464c9ab8f10aa05 (diff) |
KVM: PPC: Book3S HV: Set SRR1[PREFIX] bit on injected interrupts
Pass the hypervisor (H)SRR1[PREFIX] indication through to synchronous
interrupts injected into the guest.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230330103224.3589928-3-npiggin@gmail.com
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_radix.c | 13 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 27 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_nested.c | 9 | ||||
-rw-r--r-- | arch/powerpc/kvm/emulate_loadstore.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 3 |
5 files changed, 38 insertions, 20 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 215a6b5ba104..461307b89c3a 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -954,7 +954,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, if (dsisr & DSISR_BADACCESS) { /* Reflect to the guest as DSI */ pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); - kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, dsisr); return RESUME_GUEST; } @@ -979,7 +981,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, * Bad address in guest page table tree, or other * unusual error - reflect it to the guest as DSI. */ - kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, dsisr); return RESUME_GUEST; } return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); @@ -988,8 +992,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, if (memslot->flags & KVM_MEM_READONLY) { if (writing) { /* give the guest a DSI */ - kvmppc_core_queue_data_storage(vcpu, 0, ea, - DSISR_ISSTORE | DSISR_PROTFAULT); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, DSISR_ISSTORE | DSISR_PROTFAULT); return RESUME_GUEST; } kvm_ro = true; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 38c6b33d759e..b2be366b5309 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1428,7 +1428,8 @@ static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); return RESUME_HOST; } else { - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); return RESUME_GUEST; } } @@ -1630,7 +1631,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, * so that it knows that the machine check occurred. */ if (!vcpu->kvm->arch.fwnmi_enabled) { - ulong flags = vcpu->arch.shregs.msr & 0x083c0000; + ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); kvmppc_core_queue_machine_check(vcpu, flags); r = RESUME_GUEST; break; @@ -1659,7 +1661,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, * as a result of a hypervisor emulation interrupt * (e40) getting turned into a 700 by BML RTAS. */ - flags = vcpu->arch.shregs.msr & 0x1f0000ull; + flags = (vcpu->arch.shregs.msr & 0x1f0000ull) | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; @@ -1739,7 +1742,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, } if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { - kvmppc_core_queue_data_storage(vcpu, 0, + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); r = RESUME_GUEST; break; @@ -1757,7 +1761,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, } else if (err == -1 || err == -2) { r = RESUME_PAGE_FAULT; } else { - kvmppc_core_queue_data_storage(vcpu, 0, + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, vcpu->arch.fault_dar, err); r = RESUME_GUEST; } @@ -1785,7 +1790,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { kvmppc_core_queue_inst_storage(vcpu, - vcpu->arch.fault_dsisr); + vcpu->arch.fault_dsisr | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; break; } @@ -1802,7 +1808,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, } else if (err == -1) { r = RESUME_PAGE_FAULT; } else { - kvmppc_core_queue_inst_storage(vcpu, err); + kvmppc_core_queue_inst_storage(vcpu, + err | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; @@ -1823,7 +1830,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { r = kvmppc_emulate_debug_inst(vcpu); } else { - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; @@ -1864,7 +1872,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, r = kvmppc_tm_unavailable(vcpu); } if (r == EMULATE_FAIL) { - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 2c9db6119d89..377d0b4a05ee 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -1560,7 +1560,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) { /* unusual error -> reflect to the guest as a DSI */ - kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, dsisr); return RESUME_GUEST; } @@ -1570,8 +1572,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, if (memslot->flags & KVM_MEM_READONLY) { if (writing) { /* Give the guest a DSI */ - kvmppc_core_queue_data_storage(vcpu, 0, ea, - DSISR_ISSTORE | DSISR_PROTFAULT); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, DSISR_ISSTORE | DSISR_PROTFAULT); return RESUME_GUEST; } kvm_ro = true; diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index e324a174b585..1abe25ac3855 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -28,7 +28,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { - kvmppc_core_queue_fpunavail(vcpu, 0); + kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } @@ -40,7 +40,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { - kvmppc_core_queue_vsx_unavail(vcpu, 0); + kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } @@ -52,7 +52,7 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { - kvmppc_core_queue_vec_unavail(vcpu, 0); + kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index f9d9e0d1ab23..ec531ddc3ee5 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -321,7 +321,8 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) if (vcpu->mmio_is_write) dsisr |= DSISR_ISSTORE; - kvmppc_core_queue_data_storage(vcpu, 0, + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, vcpu->arch.vaddr_accessed, dsisr); } else { /* |