1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Atish Patra <atish.patra@wdc.com>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_timer.h>
#define SBI_VERSION_MAJOR 0
#define SBI_VERSION_MINOR 1
static void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
vcpu->arch.sbi_context.return_handled = 0;
vcpu->stat.ecall_exit_stat++;
run->exit_reason = KVM_EXIT_RISCV_SBI;
run->riscv_sbi.extension_id = cp->a7;
run->riscv_sbi.function_id = cp->a6;
run->riscv_sbi.args[0] = cp->a0;
run->riscv_sbi.args[1] = cp->a1;
run->riscv_sbi.args[2] = cp->a2;
run->riscv_sbi.args[3] = cp->a3;
run->riscv_sbi.args[4] = cp->a4;
run->riscv_sbi.args[5] = cp->a5;
run->riscv_sbi.ret[0] = cp->a0;
run->riscv_sbi.ret[1] = cp->a1;
}
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
/* Handle SBI return only once */
if (vcpu->arch.sbi_context.return_handled)
return 0;
vcpu->arch.sbi_context.return_handled = 1;
/* Update return values */
cp->a0 = run->riscv_sbi.ret[0];
cp->a1 = run->riscv_sbi.ret[1];
/* Move to next instruction */
vcpu->arch.guest_context.sepc += 4;
return 0;
}
#ifdef CONFIG_RISCV_SBI_V01
static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu,
struct kvm_run *run, u32 type)
{
unsigned long i;
struct kvm_vcpu *tmp;
kvm_for_each_vcpu(i, tmp, vcpu->kvm)
tmp->arch.power_off = true;
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
memset(&run->system_event, 0, sizeof(run->system_event));
run->system_event.type = type;
run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
ulong hmask;
int i, ret = 1;
u64 next_cycle;
struct kvm_vcpu *rvcpu;
bool next_sepc = true;
struct cpumask cm, hm;
struct kvm *kvm = vcpu->kvm;
struct kvm_cpu_trap utrap = { 0 };
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
if (!cp)
return -EINVAL;
switch (cp->a7) {
case SBI_EXT_0_1_CONSOLE_GETCHAR:
case SBI_EXT_0_1_CONSOLE_PUTCHAR:
/*
* The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be
* handled in kernel so we forward these to user-space
*/
kvm_riscv_vcpu_sbi_forward(vcpu, run);
next_sepc = false;
ret = 0;
break;
case SBI_EXT_0_1_SET_TIMER:
#if __riscv_xlen == 32
next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
#else
next_cycle = (u64)cp->a0;
#endif
kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
break;
case SBI_EXT_0_1_CLEAR_IPI:
kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT);
break;
case SBI_EXT_0_1_SEND_IPI:
if (cp->a0)
hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
&utrap);
else
hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
if (utrap.scause) {
utrap.sepc = cp->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
next_sepc = false;
break;
}
for_each_set_bit(i, &hmask, BITS_PER_LONG) {
rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT);
}
break;
case SBI_EXT_0_1_SHUTDOWN:
kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN);
next_sepc = false;
ret = 0;
break;
case SBI_EXT_0_1_REMOTE_FENCE_I:
case SBI_EXT_0_1_REMOTE_SFENCE_VMA:
case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID:
if (cp->a0)
hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
&utrap);
else
hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
if (utrap.scause) {
utrap.sepc = cp->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
next_sepc = false;
break;
}
cpumask_clear(&cm);
for_each_set_bit(i, &hmask, BITS_PER_LONG) {
rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
if (rvcpu->cpu < 0)
continue;
cpumask_set_cpu(rvcpu->cpu, &cm);
}
riscv_cpuid_to_hartid_mask(&cm, &hm);
if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
sbi_remote_fence_i(cpumask_bits(&hm));
else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA)
sbi_remote_hfence_vvma(cpumask_bits(&hm),
cp->a1, cp->a2);
else
sbi_remote_hfence_vvma_asid(cpumask_bits(&hm),
cp->a1, cp->a2, cp->a3);
break;
default:
/* Return error for unsupported SBI calls */
cp->a0 = SBI_ERR_NOT_SUPPORTED;
break;
}
if (next_sepc)
cp->sepc += 4;
return ret;
}
#else
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
kvm_riscv_vcpu_sbi_forward(vcpu, run);
return 0;
}
#endif
|