diff options
Diffstat (limited to 'arch/riscv/kvm')
-rw-r--r-- | arch/riscv/kvm/Makefile | 1 | ||||
-rw-r--r-- | arch/riscv/kvm/mmu.c | 35 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu.c | 203 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_exit.c | 496 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_fp.c | 27 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_insn.c | 752 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_timer.c | 4 | ||||
-rw-r--r-- | arch/riscv/kvm/vm.c | 4 |
8 files changed, 936 insertions, 586 deletions
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index e5c56182f48f..019df9208bdd 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -17,6 +17,7 @@ kvm-y += mmu.o kvm-y += vcpu.o kvm-y += vcpu_exit.o kvm-y += vcpu_fp.o +kvm-y += vcpu_insn.o kvm-y += vcpu_switch.o kvm-y += vcpu_sbi.o kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 9826073fbc67..3a35b2d95697 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -343,23 +343,24 @@ static void gstage_wp_memory_region(struct kvm *kvm, int slot) kvm_flush_remote_tlbs(kvm); } -static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, - unsigned long size, bool writable) +int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, + phys_addr_t hpa, unsigned long size, + bool writable, bool in_atomic) { pte_t pte; int ret = 0; unsigned long pfn; phys_addr_t addr, end; - struct kvm_mmu_memory_cache pcache; - - memset(&pcache, 0, sizeof(pcache)); - pcache.gfp_zero = __GFP_ZERO; + struct kvm_mmu_memory_cache pcache = { + .gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0, + .gfp_zero = __GFP_ZERO, + }; end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; pfn = __phys_to_pfn(hpa); for (addr = gpa; addr < end; addr += PAGE_SIZE) { - pte = pfn_pte(pfn, PAGE_KERNEL); + pte = pfn_pte(pfn, PAGE_KERNEL_IO); if (!writable) pte = pte_wrprotect(pte); @@ -382,6 +383,13 @@ out: return ret; } +void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size) +{ + spin_lock(&kvm->mmu_lock); + gstage_unmap_range(kvm, gpa, size, false); + spin_unlock(&kvm->mmu_lock); +} + void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, @@ -517,8 +525,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, goto out; } - ret = gstage_ioremap(kvm, gpa, pa, - vm_end - vm_start, writable); + ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa, + vm_end - vm_start, + writable, false); if (ret) break; } @@ -611,7 +620,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, { int ret; kvm_pfn_t hfn; - bool writeable; + bool writable; short vma_pageshift; gfn_t gfn = gpa >> PAGE_SHIFT; struct vm_area_struct *vma; @@ -659,7 +668,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, mmu_seq = kvm->mmu_notifier_seq; - hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable); + hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable); if (hfn == KVM_PFN_ERR_HWPOISON) { send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, vma_pageshift, current); @@ -673,14 +682,14 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, * for write faults. */ if (logging && !is_write) - writeable = false; + writable = false; spin_lock(&kvm->mmu_lock); if (mmu_notifier_retry(kvm, mmu_seq)) goto out_unlock; - if (writeable) { + if (writable) { kvm_set_pfn_dirty(hfn); mark_page_dirty(kvm, gfn); ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index f3455dc013fa..5d271b597613 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -26,6 +26,8 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { STATS_DESC_COUNTER(VCPU, wfi_exit_stat), STATS_DESC_COUNTER(VCPU, mmio_exit_user), STATS_DESC_COUNTER(VCPU, mmio_exit_kernel), + STATS_DESC_COUNTER(VCPU, csr_exit_user), + STATS_DESC_COUNTER(VCPU, csr_exit_kernel), STATS_DESC_COUNTER(VCPU, exits) }; @@ -38,16 +40,58 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { sizeof(kvm_vcpu_stats_desc), }; -#define KVM_RISCV_ISA_DISABLE_ALLOWED (riscv_isa_extension_mask(d) | \ - riscv_isa_extension_mask(f)) +#define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0) -#define KVM_RISCV_ISA_DISABLE_NOT_ALLOWED (riscv_isa_extension_mask(a) | \ - riscv_isa_extension_mask(c) | \ - riscv_isa_extension_mask(i) | \ - riscv_isa_extension_mask(m)) +/* Mapping between KVM ISA Extension ID & Host ISA extension ID */ +static const unsigned long kvm_isa_ext_arr[] = { + RISCV_ISA_EXT_a, + RISCV_ISA_EXT_c, + RISCV_ISA_EXT_d, + RISCV_ISA_EXT_f, + RISCV_ISA_EXT_h, + RISCV_ISA_EXT_i, + RISCV_ISA_EXT_m, + RISCV_ISA_EXT_SVPBMT, +}; + +static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) +{ + unsigned long i; + + for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) { + if (kvm_isa_ext_arr[i] == base_ext) + return i; + } + + return KVM_RISCV_ISA_EXT_MAX; +} + +static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext) +{ + switch (ext) { + case KVM_RISCV_ISA_EXT_H: + return false; + default: + break; + } + + return true; +} + +static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) +{ + switch (ext) { + case KVM_RISCV_ISA_EXT_A: + case KVM_RISCV_ISA_EXT_C: + case KVM_RISCV_ISA_EXT_I: + case KVM_RISCV_ISA_EXT_M: + return false; + default: + break; + } -#define KVM_RISCV_ISA_ALLOWED (KVM_RISCV_ISA_DISABLE_ALLOWED | \ - KVM_RISCV_ISA_DISABLE_NOT_ALLOWED) + return true; +} static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) { @@ -99,13 +143,20 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *cntx; struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; + unsigned long host_isa, i; /* Mark this VCPU never ran */ vcpu->arch.ran_atleast_once = false; vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; + bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX); /* Setup ISA features available to VCPU */ - vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED; + for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) { + host_isa = kvm_isa_ext_arr[i]; + if (__riscv_isa_extension_available(NULL, host_isa) && + kvm_riscv_vcpu_isa_enable_allowed(i)) + set_bit(host_isa, vcpu->arch.isa); + } /* Setup VCPU hfence queue */ spin_lock_init(&vcpu->arch.hfence_lock); @@ -199,7 +250,7 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, switch (reg_num) { case KVM_REG_RISCV_CONFIG_REG(isa): - reg_val = vcpu->arch.isa; + reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK; break; default: return -EINVAL; @@ -219,7 +270,7 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_RISCV_CONFIG); - unsigned long reg_val; + unsigned long i, isa_ext, reg_val; if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) return -EINVAL; @@ -227,13 +278,32 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) return -EFAULT; + /* This ONE REG interface is only defined for single letter extensions */ + if (fls(reg_val) >= RISCV_ISA_EXT_BASE) + return -EINVAL; + switch (reg_num) { case KVM_REG_RISCV_CONFIG_REG(isa): if (!vcpu->arch.ran_atleast_once) { - /* Ignore the disable request for these extensions */ - vcpu->arch.isa = reg_val | KVM_RISCV_ISA_DISABLE_NOT_ALLOWED; - vcpu->arch.isa &= riscv_isa_extension_base(NULL); - vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED; + /* Ignore the enable/disable request for certain extensions */ + for (i = 0; i < RISCV_ISA_EXT_BASE; i++) { + isa_ext = kvm_riscv_vcpu_base2isa_ext(i); + if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) { + reg_val &= ~BIT(i); + continue; + } + if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext)) + if (reg_val & BIT(i)) + reg_val &= ~BIT(i); + if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext)) + if (!(reg_val & BIT(i))) + reg_val |= BIT(i); + } + reg_val &= riscv_isa_extension_base(NULL); + /* Do not modify anything beyond single letter extensions */ + reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) | + (reg_val & KVM_RISCV_BASE_ISA_MASK); + vcpu->arch.isa[0] = reg_val; kvm_riscv_vcpu_fp_reset(vcpu); } else { return -EOPNOTSUPP; @@ -374,17 +444,6 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, return 0; } -/* Mapping between KVM ISA Extension ID & Host ISA extension ID */ -static unsigned long kvm_isa_ext_arr[] = { - RISCV_ISA_EXT_a, - RISCV_ISA_EXT_c, - RISCV_ISA_EXT_d, - RISCV_ISA_EXT_f, - RISCV_ISA_EXT_h, - RISCV_ISA_EXT_i, - RISCV_ISA_EXT_m, -}; - static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { @@ -399,11 +458,12 @@ static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu, if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) return -EINVAL; - if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) + if (reg_num >= KVM_RISCV_ISA_EXT_MAX || + reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) return -EINVAL; host_isa_ext = kvm_isa_ext_arr[reg_num]; - if (__riscv_isa_extension_available(&vcpu->arch.isa, host_isa_ext)) + if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext)) reg_val = 1; /* Mark the given extension as available */ if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) @@ -422,12 +482,12 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, KVM_REG_RISCV_ISA_EXT); unsigned long reg_val; unsigned long host_isa_ext; - unsigned long host_isa_ext_mask; if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) return -EINVAL; - if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) + if (reg_num >= KVM_RISCV_ISA_EXT_MAX || + reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) return -EINVAL; if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) @@ -437,30 +497,19 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, if (!__riscv_isa_extension_available(NULL, host_isa_ext)) return -EOPNOTSUPP; - if (host_isa_ext >= RISCV_ISA_EXT_BASE && - host_isa_ext < RISCV_ISA_EXT_MAX) { + if (!vcpu->arch.ran_atleast_once) { /* - * Multi-letter ISA extension. Currently there is no provision - * to enable/disable the multi-letter ISA extensions for guests. - * Return success if the request is to enable any ISA extension - * that is available in the hardware. - * Return -EOPNOTSUPP otherwise. + * All multi-letter extension and a few single letter + * extension can be disabled */ - if (!reg_val) - return -EOPNOTSUPP; + if (reg_val == 1 && + kvm_riscv_vcpu_isa_enable_allowed(reg_num)) + set_bit(host_isa_ext, vcpu->arch.isa); + else if (!reg_val && + kvm_riscv_vcpu_isa_disable_allowed(reg_num)) + clear_bit(host_isa_ext, vcpu->arch.isa); else - return 0; - } - - /* Single letter base ISA extension */ - if (!vcpu->arch.ran_atleast_once) { - host_isa_ext_mask = BIT_MASK(host_isa_ext); - if (!reg_val && (host_isa_ext_mask & KVM_RISCV_ISA_DISABLE_ALLOWED)) - vcpu->arch.isa &= ~host_isa_ext_mask; - else - vcpu->arch.isa |= host_isa_ext_mask; - vcpu->arch.isa &= riscv_isa_extension_base(NULL); - vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED; + return -EINVAL; kvm_riscv_vcpu_fp_reset(vcpu); } else { return -EOPNOTSUPP; @@ -729,6 +778,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, return -EINVAL; } +static void kvm_riscv_vcpu_update_config(const unsigned long *isa) +{ + u64 henvcfg = 0; + + if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SVPBMT)) + henvcfg |= ENVCFG_PBMTE; + + csr_write(CSR_HENVCFG, henvcfg); +#ifdef CONFIG_32BIT + csr_write(CSR_HENVCFGH, henvcfg >> 32); +#endif +} + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; @@ -743,6 +805,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) csr_write(CSR_HVIP, csr->hvip); csr_write(CSR_VSATP, csr->vsatp); + kvm_riscv_vcpu_update_config(vcpu->arch.isa); + kvm_riscv_gstage_update_hgatp(vcpu); kvm_riscv_vcpu_timer_restore(vcpu); @@ -853,22 +917,26 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_vcpu_srcu_read_lock(vcpu); - /* Process MMIO value returned from user-space */ - if (run->exit_reason == KVM_EXIT_MMIO) { + switch (run->exit_reason) { + case KVM_EXIT_MMIO: + /* Process MMIO value returned from user-space */ ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); - if (ret) { - kvm_vcpu_srcu_read_unlock(vcpu); - return ret; - } - } - - /* Process SBI value returned from user-space */ - if (run->exit_reason == KVM_EXIT_RISCV_SBI) { + break; + case KVM_EXIT_RISCV_SBI: + /* Process SBI value returned from user-space */ ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); - if (ret) { - kvm_vcpu_srcu_read_unlock(vcpu); - return ret; - } + break; + case KVM_EXIT_RISCV_CSR: + /* Process CSR value returned from user-space */ + ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run); + break; + default: + ret = 0; + break; + } + if (ret) { + kvm_vcpu_srcu_read_unlock(vcpu); + return ret; } if (run->immediate_exit) { @@ -890,8 +958,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_riscv_check_vcpu_requests(vcpu); - preempt_disable(); - local_irq_disable(); /* @@ -928,7 +994,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_request_pending(vcpu)) { vcpu->mode = OUTSIDE_GUEST_MODE; local_irq_enable(); - preempt_enable(); kvm_vcpu_srcu_read_lock(vcpu); continue; } @@ -962,6 +1027,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* Syncup interrupts state with HW */ kvm_riscv_vcpu_sync_interrupts(vcpu); + preempt_disable(); + /* * We must ensure that any pending interrupts are taken before * we exit guest timing so that timer ticks are accounted as diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index dbb09afd7546..d5c36386878a 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -6,435 +6,34 @@ * Anup Patel <anup.patel@wdc.com> */ -#include <linux/bitops.h> -#include <linux/errno.h> -#include <linux/err.h> #include <linux/kvm_host.h> #include <asm/csr.h> -#define INSN_OPCODE_MASK 0x007c -#define INSN_OPCODE_SHIFT 2 -#define INSN_OPCODE_SYSTEM 28 - -#define INSN_MASK_WFI 0xffffffff -#define INSN_MATCH_WFI 0x10500073 - -#define INSN_MATCH_LB 0x3 -#define INSN_MASK_LB 0x707f -#define INSN_MATCH_LH 0x1003 -#define INSN_MASK_LH 0x707f -#define INSN_MATCH_LW 0x2003 -#define INSN_MASK_LW 0x707f -#define INSN_MATCH_LD 0x3003 -#define INSN_MASK_LD 0x707f -#define INSN_MATCH_LBU 0x4003 -#define INSN_MASK_LBU 0x707f -#define INSN_MATCH_LHU 0x5003 -#define INSN_MASK_LHU 0x707f -#define INSN_MATCH_LWU 0x6003 -#define INSN_MASK_LWU 0x707f -#define INSN_MATCH_SB 0x23 -#define INSN_MASK_SB 0x707f -#define INSN_MATCH_SH 0x1023 -#define INSN_MASK_SH 0x707f -#define INSN_MATCH_SW 0x2023 -#define INSN_MASK_SW 0x707f -#define INSN_MATCH_SD 0x3023 -#define INSN_MASK_SD 0x707f - -#define INSN_MATCH_C_LD 0x6000 -#define INSN_MASK_C_LD 0xe003 -#define INSN_MATCH_C_SD 0xe000 -#define INSN_MASK_C_SD 0xe003 -#define INSN_MATCH_C_LW 0x4000 -#define INSN_MASK_C_LW 0xe003 -#define INSN_MATCH_C_SW 0xc000 -#define INSN_MASK_C_SW 0xe003 -#define INSN_MATCH_C_LDSP 0x6002 -#define INSN_MASK_C_LDSP 0xe003 -#define INSN_MATCH_C_SDSP 0xe002 -#define INSN_MASK_C_SDSP 0xe003 -#define INSN_MATCH_C_LWSP 0x4002 -#define INSN_MASK_C_LWSP 0xe003 -#define INSN_MATCH_C_SWSP 0xc002 -#define INSN_MASK_C_SWSP 0xe003 - -#define INSN_16BIT_MASK 0x3 - -#define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK) - -#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4) - -#ifdef CONFIG_64BIT -#define LOG_REGBYTES 3 -#else -#define LOG_REGBYTES 2 -#endif -#define REGBYTES (1 << LOG_REGBYTES) - -#define SH_RD 7 -#define SH_RS1 15 -#define SH_RS2 20 -#define SH_RS2C 2 - -#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1)) -#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \ - (RV_X(x, 10, 3) << 3) | \ - (RV_X(x, 5, 1) << 6)) -#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \ - (RV_X(x, 5, 2) << 6)) -#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \ - (RV_X(x, 12, 1) << 5) | \ - (RV_X(x, 2, 2) << 6)) -#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \ - (RV_X(x, 12, 1) << 5) | \ - (RV_X(x, 2, 3) << 6)) -#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \ - (RV_X(x, 7, 2) << 6)) -#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \ - (RV_X(x, 7, 3) << 6)) -#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3)) -#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3)) -#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5) - -#define SHIFT_RIGHT(x, y) \ - ((y) < 0 ? ((x) << -(y)) : ((x) >> (y))) - -#define REG_MASK \ - ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES)) - -#define REG_OFFSET(insn, pos) \ - (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK) - -#define REG_PTR(insn, pos, regs) \ - ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))) - -#define GET_RM(insn) (((insn) >> 12) & 7) - -#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs)) -#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs)) -#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs)) -#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs)) -#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs)) -#define GET_SP(regs) (*REG_PTR(2, 0, regs)) -#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val)) -#define IMM_I(insn) ((s32)(insn) >> 20) -#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \ - (s32)(((insn) >> 7) & 0x1f)) -#define MASK_FUNCT3 0x7000 - -static int truly_illegal_insn(struct kvm_vcpu *vcpu, - struct kvm_run *run, - ulong insn) -{ - struct kvm_cpu_trap utrap = { 0 }; - - /* Redirect trap to Guest VCPU */ - utrap.sepc = vcpu->arch.guest_context.sepc; - utrap.scause = EXC_INST_ILLEGAL; - utrap.stval = insn; - kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); - - return 1; -} - -static int system_opcode_insn(struct kvm_vcpu *vcpu, - struct kvm_run *run, - ulong insn) -{ - if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) { - vcpu->stat.wfi_exit_stat++; - kvm_riscv_vcpu_wfi(vcpu); - vcpu->arch.guest_context.sepc += INSN_LEN(insn); - return 1; - } - - return truly_illegal_insn(vcpu, run, insn); -} - -static int virtual_inst_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, - struct kvm_cpu_trap *trap) -{ - unsigned long insn = trap->stval; - struct kvm_cpu_trap utrap = { 0 }; - struct kvm_cpu_context *ct; - - if (unlikely(INSN_IS_16BIT(insn))) { - if (insn == 0) { - ct = &vcpu->arch.guest_context; - insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, - ct->sepc, - &utrap); - if (utrap.scause) { - utrap.sepc = ct->sepc; - kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); - return 1; - } - } - if (INSN_IS_16BIT(insn)) - return truly_illegal_insn(vcpu, run, insn); - } - - switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) { - case INSN_OPCODE_SYSTEM: - return system_opcode_insn(vcpu, run, insn); - default: - return truly_illegal_insn(vcpu, run, insn); - } -} - -static int emulate_load(struct kvm_vcpu *vcpu, struct kvm_run *run, - unsigned long fault_addr, unsigned long htinst) -{ - u8 data_buf[8]; - unsigned long insn; - int shift = 0, len = 0, insn_len = 0; - struct kvm_cpu_trap utrap = { 0 }; - struct kvm_cpu_context *ct = &vcpu->arch.guest_context; - - /* Determine trapped instruction */ - if (htinst & 0x1) { - /* - * Bit[0] == 1 implies trapped instruction value is - * transformed instruction or custom instruction. - */ - insn = htinst | INSN_16BIT_MASK; - insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2; - } else { - /* - * Bit[0] == 0 implies trapped instruction value is - * zero or special value. - */ - insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, - &utrap); - if (utrap.scause) { - /* Redirect trap if we failed to read instruction */ - utrap.sepc = ct->sepc; - kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); - return 1; - } - insn_len = INSN_LEN(insn); - } - - /* Decode length of MMIO and shift */ - if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) { - len = 4; - shift = 8 * (sizeof(ulong) - len); - } else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) { - len = 1; - shift = 8 * (sizeof(ulong) - len); - } else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) { - len = 1; - shift = 8 * (sizeof(ulong) - len); -#ifdef CONFIG_64BIT - } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) { - len = 8; - shift = 8 * (sizeof(ulong) - len); - } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) { - len = 4; -#endif - } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) { - len = 2; - shift = 8 * (sizeof(ulong) - len); - } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) { - len = 2; -#ifdef CONFIG_64BIT - } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) { - len = 8; - shift = 8 * (sizeof(ulong) - len); - insn = RVC_RS2S(insn) << SH_RD; - } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP && - ((insn >> SH_RD) & 0x1f)) { - len = 8; - shift = 8 * (sizeof(ulong) - len); -#endif - } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) { - len = 4; - shift = 8 * (sizeof(ulong) - len); - insn = RVC_RS2S(insn) << SH_RD; - } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP && - ((insn >> SH_RD) & 0x1f)) { - len = 4; - shift = 8 * (sizeof(ulong) - len); - } else { - return -EOPNOTSUPP; - } - - /* Fault address should be aligned to length of MMIO */ - if (fault_addr & (len - 1)) - return -EIO; - - /* Save instruction decode info */ - vcpu->arch.mmio_decode.insn = insn; - vcpu->arch.mmio_decode.insn_len = insn_len; - vcpu->arch.mmio_decode.shift = shift; - vcpu->arch.mmio_decode.len = len; - vcpu->arch.mmio_decode.return_handled = 0; - - /* Update MMIO details in kvm_run struct */ - run->mmio.is_write = false; - run->mmio.phys_addr = fault_addr; - run->mmio.len = len; - - /* Try to handle MMIO access in the kernel */ - if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) { - /* Successfully handled MMIO access in the kernel so resume */ - memcpy(run->mmio.data, data_buf, len); - vcpu->stat.mmio_exit_kernel++; - kvm_riscv_vcpu_mmio_return(vcpu, run); - return 1; - } - - /* Exit to userspace for MMIO emulation */ - vcpu->stat.mmio_exit_user++; - run->exit_reason = KVM_EXIT_MMIO; - - return 0; -} - -static int emulate_store(struct kvm_vcpu *vcpu, struct kvm_run *run, - unsigned long fault_addr, unsigned long htinst) -{ - u8 data8; - u16 data16; - u32 data32; - u64 data64; - ulong data; - unsigned long insn; - int len = 0, insn_len = 0; - struct kvm_cpu_trap utrap = { 0 }; - struct kvm_cpu_context *ct = &vcpu->arch.guest_context; - - /* Determine trapped instruction */ - if (htinst & 0x1) { - /* - * Bit[0] == 1 implies trapped instruction value is - * transformed instruction or custom instruction. - */ - insn = htinst | INSN_16BIT_MASK; - insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2; - } else { - /* - * Bit[0] == 0 implies trapped instruction value is - * zero or special value. - */ - insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, - &utrap); - if (utrap.scause) { - /* Redirect trap if we failed to read instruction */ - utrap.sepc = ct->sepc; - kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); - return 1; - } - insn_len = INSN_LEN(insn); - } - - data = GET_RS2(insn, &vcpu->arch.guest_context); - data8 = data16 = data32 = data64 = data; - - if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) { - len = 4; - } else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) { - len = 1; -#ifdef CONFIG_64BIT - } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) { - len = 8; -#endif - } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) { - len = 2; -#ifdef CONFIG_64BIT - } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) { - len = 8; - data64 = GET_RS2S(insn, &vcpu->arch.guest_context); - } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP && - ((insn >> SH_RD) & 0x1f)) { - len = 8; - data64 = GET_RS2C(insn, &vcpu->arch.guest_context); -#endif - } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) { - len = 4; - data32 = GET_RS2S(insn, &vcpu->arch.guest_context); - } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP && - ((insn >> SH_RD) & 0x1f)) { - len = 4; - data32 = GET_RS2C(insn, &vcpu->arch.guest_context); - } else { - return -EOPNOTSUPP; - } - - /* Fault address should be aligned to length of MMIO */ - if (fault_addr & (len - 1)) - return -EIO; - - /* Save instruction decode info */ - vcpu->arch.mmio_decode.insn = insn; - vcpu->arch.mmio_decode.insn_len = insn_len; - vcpu->arch.mmio_decode.shift = 0; - vcpu->arch.mmio_decode.len = len; - vcpu->arch.mmio_decode.return_handled = 0; - - /* Copy data to kvm_run instance */ - switch (len) { - case 1: - *((u8 *)run->mmio.data) = data8; - break; - case 2: - *((u16 *)run->mmio.data) = data16; - break; - case 4: - *((u32 *)run->mmio.data) = data32; - break; - case 8: - *((u64 *)run->mmio.data) = data64; - break; - default: - return -EOPNOTSUPP; - } - - /* Update MMIO details in kvm_run struct */ - run->mmio.is_write = true; - run->mmio.phys_addr = fault_addr; - run->mmio.len = len; - - /* Try to handle MMIO access in the kernel */ - if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS, - fault_addr, len, run->mmio.data)) { - /* Successfully handled MMIO access in the kernel so resume */ - vcpu->stat.mmio_exit_kernel++; - kvm_riscv_vcpu_mmio_return(vcpu, run); - return 1; - } - - /* Exit to userspace for MMIO emulation */ - vcpu->stat.mmio_exit_user++; - run->exit_reason = KVM_EXIT_MMIO; - - return 0; -} - static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_trap *trap) { struct kvm_memory_slot *memslot; unsigned long hva, fault_addr; - bool writeable; + bool writable; gfn_t gfn; int ret; fault_addr = (trap->htval << 2) | (trap->stval & 0x3); gfn = fault_addr >> PAGE_SHIFT; memslot = gfn_to_memslot(vcpu->kvm, gfn); - hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable); + hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); if (kvm_is_error_hva(hva) || - (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writeable)) { + (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writable)) { switch (trap->scause) { case EXC_LOAD_GUEST_PAGE_FAULT: - return emulate_load(vcpu, run, fault_addr, - trap->htinst); + return kvm_riscv_vcpu_mmio_load(vcpu, run, + fault_addr, + trap->htinst); case EXC_STORE_GUEST_PAGE_FAULT: - return emulate_store(vcpu, run, fault_addr, - trap->htinst); + return kvm_riscv_vcpu_mmio_store(vcpu, run, + fault_addr, + trap->htinst); default: return -EOPNOTSUPP; }; @@ -449,21 +48,6 @@ static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, } /** - * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour - * - * @vcpu: The VCPU pointer - */ -void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu) -{ - if (!kvm_arch_vcpu_runnable(vcpu)) { - kvm_vcpu_srcu_read_unlock(vcpu); - kvm_vcpu_halt(vcpu); - kvm_vcpu_srcu_read_lock(vcpu); - kvm_clear_request(KVM_REQ_UNHALT, vcpu); - } -} - -/** * kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory * * @vcpu: The VCPU pointer @@ -601,66 +185,6 @@ void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC); } -/** - * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation - * or in-kernel IO emulation - * - * @vcpu: The VCPU pointer - * @run: The VCPU run struct containing the mmio data - */ -int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - u8 data8; - u16 data16; - u32 data32; - u64 data64; - ulong insn; - int len, shift; - - if (vcpu->arch.mmio_decode.return_handled) - return 0; - - vcpu->arch.mmio_decode.return_handled = 1; - insn = vcpu->arch.mmio_decode.insn; - - if (run->mmio.is_write) - goto done; - - len = vcpu->arch.mmio_decode.len; - shift = vcpu->arch.mmio_decode.shift; - - switch (len) { - case 1: - data8 = *((u8 *)run->mmio.data); - SET_RD(insn, &vcpu->arch.guest_context, - (ulong)data8 << shift >> shift); - break; - case 2: - data16 = *((u16 *)run->mmio.data); - SET_RD(insn, &vcpu->arch.guest_context, - (ulong)data16 << shift >> shift); - break; - case 4: - data32 = *((u32 *)run->mmio.data); - SET_RD(insn, &vcpu->arch.guest_context, - (ulong)data32 << shift >> shift); - break; - case 8: - data64 = *((u64 *)run->mmio.data); - SET_RD(insn, &vcpu->arch.guest_context, - (ulong)data64 << shift >> shift); - break; - default: - return -EOPNOTSUPP; - } - -done: - /* Move to next instruction */ - vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len; - - return 0; -} - /* * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on * proper exit to userspace. @@ -680,7 +204,7 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, switch (trap->scause) { case EXC_VIRTUAL_INST_FAULT: if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) - ret = virtual_inst_fault(vcpu, run, trap); + ret = kvm_riscv_vcpu_virtual_insn(vcpu, run, trap); break; case EXC_INST_GUEST_PAGE_FAULT: case EXC_LOAD_GUEST_PAGE_FAULT: diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c index d4308c512007..9d8cbc42057a 100644 --- a/arch/riscv/kvm/vcpu_fp.c +++ b/arch/riscv/kvm/vcpu_fp.c @@ -16,12 +16,11 @@ #ifdef CONFIG_FPU void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) { - unsigned long isa = vcpu->arch.isa; struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; cntx->sstatus &= ~SR_FS; - if (riscv_isa_extension_available(&isa, f) || - riscv_isa_extension_available(&isa, d)) + if (riscv_isa_extension_available(vcpu->arch.isa, f) || + riscv_isa_extension_available(vcpu->arch.isa, d)) cntx->sstatus |= SR_FS_INITIAL; else cntx->sstatus |= SR_FS_OFF; @@ -34,24 +33,24 @@ static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx) } void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, - unsigned long isa) + const unsigned long *isa) { if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) { - if (riscv_isa_extension_available(&isa, d)) + if (riscv_isa_extension_available(isa, d)) __kvm_riscv_fp_d_save(cntx); - else if (riscv_isa_extension_available(&isa, f)) + else if (riscv_isa_extension_available(isa, f)) __kvm_riscv_fp_f_save(cntx); kvm_riscv_vcpu_fp_clean(cntx); } } void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, - unsigned long isa) + const unsigned long *isa) { if ((cntx->sstatus & SR_FS) != SR_FS_OFF) { - if (riscv_isa_extension_available(&isa, d)) + if (riscv_isa_extension_available(isa, d)) __kvm_riscv_fp_d_restore(cntx); - else if (riscv_isa_extension_available(&isa, f)) + else if (riscv_isa_extension_available(isa, f)) __kvm_riscv_fp_f_restore(cntx); kvm_riscv_vcpu_fp_clean(cntx); } @@ -80,7 +79,6 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, unsigned long rtype) { struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; - unsigned long isa = vcpu->arch.isa; unsigned long __user *uaddr = (unsigned long __user *)(unsigned long)reg->addr; unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | @@ -89,7 +87,7 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, void *reg_val; if ((rtype == KVM_REG_RISCV_FP_F) && - riscv_isa_extension_available(&isa, f)) { + riscv_isa_extension_available(vcpu->arch.isa, f)) { if (KVM_REG_SIZE(reg->id) != sizeof(u32)) return -EINVAL; if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) @@ -100,7 +98,7 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, else return -EINVAL; } else if ((rtype == KVM_REG_RISCV_FP_D) && - riscv_isa_extension_available(&isa, d)) { + riscv_isa_extension_available(vcpu->arch.isa, d)) { if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { if (KVM_REG_SIZE(reg->id) != sizeof(u32)) return -EINVAL; @@ -126,7 +124,6 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, unsigned long rtype) { struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; - unsigned long isa = vcpu->arch.isa; unsigned long __user *uaddr = (unsigned long __user *)(unsigned long)reg->addr; unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | @@ -135,7 +132,7 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, void *reg_val; if ((rtype == KVM_REG_RISCV_FP_F) && - riscv_isa_extension_available(&isa, f)) { + riscv_isa_extension_available(vcpu->arch.isa, f)) { if (KVM_REG_SIZE(reg->id) != sizeof(u32)) return -EINVAL; if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) @@ -146,7 +143,7 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, else return -EINVAL; } else if ((rtype == KVM_REG_RISCV_FP_D) && - riscv_isa_extension_available(&isa, d)) { + riscv_isa_extension_available(vcpu->arch.isa, d)) { if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { if (KVM_REG_SIZE(reg->id) != sizeof(u32)) return -EINVAL; diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c new file mode 100644 index 000000000000..7eb90a47b571 --- /dev/null +++ b/arch/riscv/kvm/vcpu_insn.c @@ -0,0 +1,752 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * Copyright (c) 2022 Ventana Micro Systems Inc. + */ + +#include <linux/bitops.h> +#include <linux/kvm_host.h> + +#define INSN_OPCODE_MASK 0x007c +#define INSN_OPCODE_SHIFT 2 +#define INSN_OPCODE_SYSTEM 28 + +#define INSN_MASK_WFI 0xffffffff +#define INSN_MATCH_WFI 0x10500073 + +#define INSN_MATCH_CSRRW 0x1073 +#define INSN_MASK_CSRRW 0x707f +#define INSN_MATCH_CSRRS 0x2073 +#define INSN_MASK_CSRRS 0x707f +#define INSN_MATCH_CSRRC 0x3073 +#define INSN_MASK_CSRRC 0x707f +#define INSN_MATCH_CSRRWI 0x5073 +#define INSN_MASK_CSRRWI 0x707f +#define INSN_MATCH_CSRRSI 0x6073 +#define INSN_MASK_CSRRSI 0x707f +#define INSN_MATCH_CSRRCI 0x7073 +#define INSN_MASK_CSRRCI 0x707f + +#define INSN_MATCH_LB 0x3 +#define INSN_MASK_LB 0x707f +#define INSN_MATCH_LH 0x1003 +#define INSN_MASK_LH 0x707f +#define INSN_MATCH_LW 0x2003 +#define INSN_MASK_LW 0x707f +#define INSN_MATCH_LD 0x3003 +#define INSN_MASK_LD 0x707f +#define INSN_MATCH_LBU 0x4003 +#define INSN_MASK_LBU 0x707f +#define INSN_MATCH_LHU 0x5003 +#define INSN_MASK_LHU 0x707f +#define INSN_MATCH_LWU 0x6003 +#define INSN_MASK_LWU 0x707f +#define INSN_MATCH_SB 0x23 +#define INSN_MASK_SB 0x707f +#define INSN_MATCH_SH 0x1023 +#define INSN_MASK_SH 0x707f +#define INSN_MATCH_SW 0x2023 +#define INSN_MASK_SW 0x707f +#define INSN_MATCH_SD 0x3023 +#define INSN_MASK_SD 0x707f + +#define INSN_MATCH_C_LD 0x6000 +#define INSN_MASK_C_LD 0xe003 +#define INSN_MATCH_C_SD 0xe000 +#define INSN_MASK_C_SD 0xe003 +#define INSN_MATCH_C_LW 0x4000 +#define INSN_MASK_C_LW 0xe003 +#define INSN_MATCH_C_SW 0xc000 +#define INSN_MASK_C_SW 0xe003 +#define INSN_MATCH_C_LDSP 0x6002 +#define INSN_MASK_C_LDSP 0xe003 +#define INSN_MATCH_C_SDSP 0xe002 +#define INSN_MASK_C_SDSP 0xe003 +#define INSN_MATCH_C_LWSP 0x4002 +#define INSN_MASK_C_LWSP 0xe003 +#define INSN_MATCH_C_SWSP 0xc002 +#define INSN_MASK_C_SWSP 0xe003 + +#define INSN_16BIT_MASK 0x3 + +#define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK) + +#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4) + +#ifdef CONFIG_64BIT +#define LOG_REGBYTES 3 +#else +#define LOG_REGBYTES 2 +#endif +#define REGBYTES (1 << LOG_REGBYTES) + +#define SH_RD 7 +#define SH_RS1 15 +#define SH_RS2 20 +#define SH_RS2C 2 +#define MASK_RX 0x1f + +#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1)) +#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \ + (RV_X(x, 10, 3) << 3) | \ + (RV_X(x, 5, 1) << 6)) +#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \ + (RV_X(x, 5, 2) << 6)) +#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \ + (RV_X(x, 12, 1) << 5) | \ + (RV_X(x, 2, 2) << 6)) +#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \ + (RV_X(x, 12, 1) << 5) | \ + (RV_X(x, 2, 3) << 6)) +#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \ + (RV_X(x, 7, 2) << 6)) +#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \ + (RV_X(x, 7, 3) << 6)) +#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3)) +#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3)) +#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5) + +#define SHIFT_RIGHT(x, y) \ + ((y) < 0 ? ((x) << -(y)) : ((x) >> (y))) + +#define REG_MASK \ + ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES)) + +#define REG_OFFSET(insn, pos) \ + (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK) + +#define REG_PTR(insn, pos, regs) \ + ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))) + +#define GET_FUNCT3(insn) (((insn) >> 12) & 7) + +#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs)) +#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs)) +#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs)) +#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs)) +#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs)) +#define GET_SP(regs) (*REG_PTR(2, 0, regs)) +#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val)) +#define IMM_I(insn) ((s32)(insn) >> 20) +#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \ + (s32)(((insn) >> 7) & 0x1f)) + +struct insn_func { + unsigned long mask; + unsigned long match; + /* + * Possible return values are as follows: + * 1) Returns < 0 for error case + * 2) Returns 0 for exit to user-space + * 3) Returns 1 to continue with next sepc + * 4) Returns 2 to continue with same sepc + * 5) Returns 3 to inject illegal instruction trap and continue + * 6) Returns 4 to inject virtual instruction trap and continue + * + * Use enum kvm_insn_return for return values + */ + int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn); +}; + +static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, + ulong insn) +{ + struct kvm_cpu_trap utrap = { 0 }; + + /* Redirect trap to Guest VCPU */ + utrap.sepc = vcpu->arch.guest_context.sepc; + utrap.scause = EXC_INST_ILLEGAL; + utrap.stval = insn; + utrap.htval = 0; + utrap.htinst = 0; + kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); + + return 1; +} + +static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, + ulong insn) +{ + struct kvm_cpu_trap utrap = { 0 }; + + /* Redirect trap to Guest VCPU */ + utrap.sepc = vcpu->arch.guest_context.sepc; + utrap.scause = EXC_VIRTUAL_INST_FAULT; + utrap.stval = insn; + utrap.htval = 0; + utrap.htinst = 0; + kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); + + return 1; +} + +/** + * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour + * + * @vcpu: The VCPU pointer + */ +void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu) +{ + if (!kvm_arch_vcpu_runnable(vcpu)) { + kvm_vcpu_srcu_read_unlock(vcpu); + kvm_vcpu_halt(vcpu); + kvm_vcpu_srcu_read_lock(vcpu); + kvm_clear_request(KVM_REQ_UNHALT, vcpu); + } +} + +static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) +{ + vcpu->stat.wfi_exit_stat++; + kvm_riscv_vcpu_wfi(vcpu); + return KVM_INSN_CONTINUE_NEXT_SEPC; +} + +struct csr_func { + unsigned int base; + unsigned int count; + /* + * Possible return values are as same as "func" callback in + * "struct insn_func". + */ + int (*func)(struct kvm_vcpu *vcpu, unsigned int csr_num, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask); +}; + +static const struct csr_func csr_funcs[] = { }; + +/** + * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space + * emulation or in-kernel emulation + * + * @vcpu: The VCPU pointer + * @run: The VCPU run struct containing the CSR data + * + * Returns > 0 upon failure and 0 upon success + */ +int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + ulong insn; + + if (vcpu->arch.csr_decode.return_handled) + return 0; + vcpu->arch.csr_decode.return_handled = 1; + + /* Update destination register for CSR reads */ + insn = vcpu->arch.csr_decode.insn; + if ((insn >> SH_RD) & MASK_RX) + SET_RD(insn, &vcpu->arch.guest_context, + run->riscv_csr.ret_value); + + /* Move to next instruction */ + vcpu->arch.guest_context.sepc += INSN_LEN(insn); + + return 0; +} + +static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) +{ + int i, rc = KVM_INSN_ILLEGAL_TRAP; + unsigned int csr_num = insn >> SH_RS2; + unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX; + ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context); + const struct csr_func *tcfn, *cfn = NULL; + ulong val = 0, wr_mask = 0, new_val = 0; + + /* Decode the CSR instruction */ + switch (GET_FUNCT3(insn)) { + case GET_FUNCT3(INSN_MATCH_CSRRW): + wr_mask = -1UL; + new_val = rs1_val; + break; + case GET_FUNCT3(INSN_MATCH_CSRRS): + wr_mask = rs1_val; + new_val = -1UL; + break; + case GET_FUNCT3(INSN_MATCH_CSRRC): + wr_mask = rs1_val; + new_val = 0; + break; + case GET_FUNCT3(INSN_MATCH_CSRRWI): + wr_mask = -1UL; + new_val = rs1_num; + break; + case GET_FUNCT3(INSN_MATCH_CSRRSI): + wr_mask = rs1_num; + new_val = -1UL; + break; + case GET_FUNCT3(INSN_MATCH_CSRRCI): + wr_mask = rs1_num; + new_val = 0; + break; + default: + return rc; + } + + /* Save instruction decode info */ + vcpu->arch.csr_decode.insn = insn; + vcpu->arch.csr_decode.return_handled = 0; + + /* Update CSR details in kvm_run struct */ + run->riscv_csr.csr_num = csr_num; + run->riscv_csr.new_value = new_val; + run->riscv_csr.write_mask = wr_mask; + run->riscv_csr.ret_value = 0; + + /* Find in-kernel CSR function */ + for (i = 0; i < ARRAY_SIZE(csr_funcs); i++) { + tcfn = &csr_funcs[i]; + if ((tcfn->base <= csr_num) && + (csr_num < (tcfn->base + tcfn->count))) { + cfn = tcfn; + break; + } + } + + /* First try in-kernel CSR emulation */ + if (cfn && cfn->func) { + rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask); + if (rc > KVM_INSN_EXIT_TO_USER_SPACE) { + if (rc == KVM_INSN_CONTINUE_NEXT_SEPC) { + run->riscv_csr.ret_value = val; + vcpu->stat.csr_exit_kernel++; + kvm_riscv_vcpu_csr_return(vcpu, run); + rc = KVM_INSN_CONTINUE_SAME_SEPC; + } + return rc; + } + } + + /* Exit to user-space for CSR emulation */ + if (rc <= KVM_INSN_EXIT_TO_USER_SPACE) { + vcpu->stat.csr_exit_user++; + run->exit_reason = KVM_EXIT_RISCV_CSR; + } + + return rc; +} + +static const struct insn_func system_opcode_funcs[] = { + { + .mask = INSN_MASK_CSRRW, + .match = INSN_MATCH_CSRRW, + .func = csr_insn, + }, + { + .mask = INSN_MASK_CSRRS, + .match = INSN_MATCH_CSRRS, + .func = csr_insn, + }, + { + .mask = INSN_MASK_CSRRC, + .match = INSN_MATCH_CSRRC, + .func = csr_insn, + }, + { + .mask = INSN_MASK_CSRRWI, + .match = INSN_MATCH_CSRRWI, + .func = csr_insn, + }, + { + .mask = INSN_MASK_CSRRSI, + .match = INSN_MATCH_CSRRSI, + .func = csr_insn, + }, + { + .mask = INSN_MASK_CSRRCI, + .match = INSN_MATCH_CSRRCI, + .func = csr_insn, + }, + { + .mask = INSN_MASK_WFI, + .match = INSN_MATCH_WFI, + .func = wfi_insn, + }, +}; + +static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, + ulong insn) +{ + int i, rc = KVM_INSN_ILLEGAL_TRAP; + const struct insn_func *ifn; + + for (i = 0; i < ARRAY_SIZE(system_opcode_funcs); i++) { + ifn = &system_opcode_funcs[i]; + if ((insn & ifn->mask) == ifn->match) { + rc = ifn->func(vcpu, run, insn); + break; + } + } + + switch (rc) { + case KVM_INSN_ILLEGAL_TRAP: + return truly_illegal_insn(vcpu, run, insn); + case KVM_INSN_VIRTUAL_TRAP: + return truly_virtual_insn(vcpu, run, insn); + case KVM_INSN_CONTINUE_NEXT_SEPC: + vcpu->arch.guest_context.sepc += INSN_LEN(insn); + break; + default: + break; + } + + return (rc <= 0) ? rc : 1; +} + +/** + * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap + * + * @vcpu: The VCPU pointer + * @run: The VCPU run struct containing the mmio data + * @trap: Trap details + * + * Returns > 0 to continue run-loop + * Returns 0 to exit run-loop and handle in user-space. + * Returns < 0 to report failure and exit run-loop + */ +int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_cpu_trap *trap) +{ + unsigned long insn = trap->stval; + struct kvm_cpu_trap utrap = { 0 }; + struct kvm_cpu_context *ct; + + if (unlikely(INSN_IS_16BIT(insn))) { + if (insn == 0) { + ct = &vcpu->arch.guest_context; + insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, + ct->sepc, + &utrap); + if (utrap.scause) { + utrap.sepc = ct->sepc; + kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); + return 1; + } + } + if (INSN_IS_16BIT(insn)) + return truly_illegal_insn(vcpu, run, insn); + } + + switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) { + case INSN_OPCODE_SYSTEM: + return system_opcode_insn(vcpu, run, insn); + default: + return truly_illegal_insn(vcpu, run, insn); + } +} + +/** + * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction + * + * @vcpu: The VCPU pointer + * @run: The VCPU run struct containing the mmio data + * @fault_addr: Guest physical address to load + * @htinst: Transformed encoding of the load instruction + * + * Returns > 0 to continue run-loop + * Returns 0 to exit run-loop and handle in user-space. + * Returns < 0 to report failure and exit run-loop + */ +int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long fault_addr, + unsigned long htinst) +{ + u8 data_buf[8]; + unsigned long insn; + int shift = 0, len = 0, insn_len = 0; + struct kvm_cpu_trap utrap = { 0 }; + struct kvm_cpu_context *ct = &vcpu->arch.guest_context; + + /* Determine trapped instruction */ + if (htinst & 0x1) { + /* + * Bit[0] == 1 implies trapped instruction value is + * transformed instruction or custom instruction. + */ + insn = htinst | INSN_16BIT_MASK; + insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2; + } else { + /* + * Bit[0] == 0 implies trapped instruction value is + * zero or special value. + */ + insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, + &utrap); + if (utrap.scause) { + /* Redirect trap if we failed to read instruction */ + utrap.sepc = ct->sepc; + kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); + return 1; + } + insn_len = INSN_LEN(insn); + } + + /* Decode length of MMIO and shift */ + if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) { + len = 4; + shift = 8 * (sizeof(ulong) - len); + } else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) { + len = 1; + shift = 8 * (sizeof(ulong) - len); + } else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) { + len = 1; + shift = 8 * (sizeof(ulong) - len); +#ifdef CONFIG_64BIT + } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) { + len = 8; + shift = 8 * (sizeof(ulong) - len); + } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) { + len = 4; +#endif + } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) { + len = 2; + shift = 8 * (sizeof(ulong) - len); + } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) { + len = 2; +#ifdef CONFIG_64BIT + } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) { + len = 8; + shift = 8 * (sizeof(ulong) - len); + insn = RVC_RS2S(insn) << SH_RD; + } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP && + ((insn >> SH_RD) & 0x1f)) { + len = 8; + shift = 8 * (sizeof(ulong) - len); +#endif + } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) { + len = 4; + shift = 8 * (sizeof(ulong) - len); + insn = RVC_RS2S(insn) << SH_RD; + } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP && + ((insn >> SH_RD) & 0x1f)) { + len = 4; + shift = 8 * (sizeof(ulong) - len); + } else { + return -EOPNOTSUPP; + } + + /* Fault address should be aligned to length of MMIO */ + if (fault_addr & (len - 1)) + return -EIO; + + /* Save instruction decode info */ + vcpu->arch.mmio_decode.insn = insn; + vcpu->arch.mmio_decode.insn_len = insn_len; + vcpu->arch.mmio_decode.shift = shift; + vcpu->arch.mmio_decode.len = len; + vcpu->arch.mmio_decode.return_handled = 0; + + /* Update MMIO details in kvm_run struct */ + run->mmio.is_write = false; + run->mmio.phys_addr = fault_addr; + run->mmio.len = len; + + /* Try to handle MMIO access in the kernel */ + if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) { + /* Successfully handled MMIO access in the kernel so resume */ + memcpy(run->mmio.data, data_buf, len); + vcpu->stat.mmio_exit_kernel++; + kvm_riscv_vcpu_mmio_return(vcpu, run); + return 1; + } + + /* Exit to userspace for MMIO emulation */ + vcpu->stat.mmio_exit_user++; + run->exit_reason = KVM_EXIT_MMIO; + + return 0; +} + +/** + * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction + * + * @vcpu: The VCPU pointer + * @run: The VCPU run struct containing the mmio data + * @fault_addr: Guest physical address to store + * @htinst: Transformed encoding of the store instruction + * + * Returns > 0 to continue run-loop + * Returns 0 to exit run-loop and handle in user-space. + * Returns < 0 to report failure and exit run-loop + */ +int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long fault_addr, + unsigned long htinst) +{ + u8 data8; + u16 data16; + u32 data32; + u64 data64; + ulong data; + unsigned long insn; + int len = 0, insn_len = 0; + struct kvm_cpu_trap utrap = { 0 }; + struct kvm_cpu_context *ct = &vcpu->arch.guest_context; + + /* Determine trapped instruction */ + if (htinst & 0x1) { + /* + * Bit[0] == 1 implies trapped instruction value is + * transformed instruction or custom instruction. + */ + insn = htinst | INSN_16BIT_MASK; + insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2; + } else { + /* + * Bit[0] == 0 implies trapped instruction value is + * zero or special value. + */ + insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, + &utrap); + if (utrap.scause) { + /* Redirect trap if we failed to read instruction */ + utrap.sepc = ct->sepc; + kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); + return 1; + } + insn_len = INSN_LEN(insn); + } + + data = GET_RS2(insn, &vcpu->arch.guest_context); + data8 = data16 = data32 = data64 = data; + + if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) { + len = 4; + } else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) { + len = 1; +#ifdef CONFIG_64BIT + } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) { + len = 8; +#endif + } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) { + len = 2; +#ifdef CONFIG_64BIT + } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) { + len = 8; + data64 = GET_RS2S(insn, &vcpu->arch.guest_context); + } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP && + ((insn >> SH_RD) & 0x1f)) { + len = 8; + data64 = GET_RS2C(insn, &vcpu->arch.guest_context); +#endif + } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) { + len = 4; + data32 = GET_RS2S(insn, &vcpu->arch.guest_context); + } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP && + ((insn >> SH_RD) & 0x1f)) { + len = 4; + data32 = GET_RS2C(insn, &vcpu->arch.guest_context); + } else { + return -EOPNOTSUPP; + } + + /* Fault address should be aligned to length of MMIO */ + if (fault_addr & (len - 1)) + return -EIO; + + /* Save instruction decode info */ + vcpu->arch.mmio_decode.insn = insn; + vcpu->arch.mmio_decode.insn_len = insn_len; + vcpu->arch.mmio_decode.shift = 0; + vcpu->arch.mmio_decode.len = len; + vcpu->arch.mmio_decode.return_handled = 0; + + /* Copy data to kvm_run instance */ + switch (len) { + case 1: + *((u8 *)run->mmio.data) = data8; + break; + case 2: + *((u16 *)run->mmio.data) = data16; + break; + case 4: + *((u32 *)run->mmio.data) = data32; + break; + case 8: + *((u64 *)run->mmio.data) = data64; + break; + default: + return -EOPNOTSUPP; + } + + /* Update MMIO details in kvm_run struct */ + run->mmio.is_write = true; + run->mmio.phys_addr = fault_addr; + run->mmio.len = len; + + /* Try to handle MMIO access in the kernel */ + if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS, + fault_addr, len, run->mmio.data)) { + /* Successfully handled MMIO access in the kernel so resume */ + vcpu->stat.mmio_exit_kernel++; + kvm_riscv_vcpu_mmio_return(vcpu, run); + return 1; + } + + /* Exit to userspace for MMIO emulation */ + vcpu->stat.mmio_exit_user++; + run->exit_reason = KVM_EXIT_MMIO; + + return 0; +} + +/** + * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation + * or in-kernel IO emulation + * + * @vcpu: The VCPU pointer + * @run: The VCPU run struct containing the mmio data + */ +int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + u8 data8; + u16 data16; + u32 data32; + u64 data64; + ulong insn; + int len, shift; + + if (vcpu->arch.mmio_decode.return_handled) + return 0; + + vcpu->arch.mmio_decode.return_handled = 1; + insn = vcpu->arch.mmio_decode.insn; + + if (run->mmio.is_write) + goto done; + + len = vcpu->arch.mmio_decode.len; + shift = vcpu->arch.mmio_decode.shift; + + switch (len) { + case 1: + data8 = *((u8 *)run->mmio.data); + SET_RD(insn, &vcpu->arch.guest_context, + (ulong)data8 << shift >> shift); + break; + case 2: + data16 = *((u16 *)run->mmio.data); + SET_RD(insn, &vcpu->arch.guest_context, + (ulong)data16 << shift >> shift); + break; + case 4: + data32 = *((u32 *)run->mmio.data); + SET_RD(insn, &vcpu->arch.guest_context, + (ulong)data32 << shift >> shift); + break; + case 8: + data64 = *((u64 *)run->mmio.data); + SET_RD(insn, &vcpu->arch.guest_context, + (ulong)data64 << shift >> shift); + break; + default: + return -EOPNOTSUPP; + } + +done: + /* Move to next instruction */ + vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len; + + return 0; +} diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c index 5c4c37ff2d48..595043857049 100644 --- a/arch/riscv/kvm/vcpu_timer.c +++ b/arch/riscv/kvm/vcpu_timer.c @@ -214,12 +214,10 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu) #endif } -int kvm_riscv_guest_timer_init(struct kvm *kvm) +void kvm_riscv_guest_timer_init(struct kvm *kvm) { struct kvm_guest_timer *gt = &kvm->arch.timer; riscv_cs_get_mult_shift(>->nsec_mult, >->nsec_shift); gt->time_delta = -get_cycles64(); - - return 0; } diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c index 945a2bf5e3f6..65a964d7e70d 100644 --- a/arch/riscv/kvm/vm.c +++ b/arch/riscv/kvm/vm.c @@ -41,7 +41,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) return r; } - return kvm_riscv_guest_timer_init(kvm); + kvm_riscv_guest_timer_init(kvm); + + return 0; } void kvm_arch_destroy_vm(struct kvm *kvm) |