Skip to content

Commit 3a9f66c

Browse files
atishp04avpatel
authored andcommitted
RISC-V: KVM: Add timer functionality
The RISC-V hypervisor specification doesn't have any virtual timer feature. Due to this, the guest VCPU timer will be programmed via SBI calls. The host will use a separate hrtimer event for each guest VCPU to provide timer functionality. We inject a virtual timer interrupt to the guest VCPU whenever the guest VCPU hrtimer event expires. This patch adds guest VCPU timer implementation along with ONE_REG interface to access VCPU timer state from user space. Signed-off-by: Atish Patra <atish.patra@wdc.com> Signed-off-by: Anup Patel <anup.patel@wdc.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Acked-by: Daniel Lezcano <daniel.lezcano@linaro.org> Acked-by: Palmer Dabbelt <palmerdabbelt@google.com>
1 parent 9955371 commit 3a9f66c

9 files changed

Lines changed: 334 additions & 1 deletion

File tree

arch/riscv/include/asm/kvm_host.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <linux/types.h>
1313
#include <linux/kvm.h>
1414
#include <linux/kvm_types.h>
15+
#include <asm/kvm_vcpu_timer.h>
1516

1617
#ifdef CONFIG_64BIT
1718
#define KVM_MAX_VCPUS (1U << 16)
@@ -60,6 +61,9 @@ struct kvm_arch {
6061
/* stage2 page table */
6162
pgd_t *pgd;
6263
phys_addr_t pgd_phys;
64+
65+
/* Guest Timer */
66+
struct kvm_guest_timer timer;
6367
};
6468

6569
struct kvm_mmio_decode {
@@ -175,6 +179,9 @@ struct kvm_vcpu_arch {
175179
unsigned long irqs_pending;
176180
unsigned long irqs_pending_mask;
177181

182+
/* VCPU Timer */
183+
struct kvm_vcpu_timer timer;
184+
178185
/* MMIO instruction details */
179186
struct kvm_mmio_decode mmio_decode;
180187

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
4+
*
5+
* Authors:
6+
* Atish Patra <atish.patra@wdc.com>
7+
*/
8+
9+
#ifndef __KVM_VCPU_RISCV_TIMER_H
10+
#define __KVM_VCPU_RISCV_TIMER_H
11+
12+
#include <linux/hrtimer.h>
13+
14+
struct kvm_guest_timer {
15+
/* Mult & Shift values to get nanoseconds from cycles */
16+
u32 nsec_mult;
17+
u32 nsec_shift;
18+
/* Time delta value */
19+
u64 time_delta;
20+
};
21+
22+
struct kvm_vcpu_timer {
23+
/* Flag for whether init is done */
24+
bool init_done;
25+
/* Flag for whether timer event is configured */
26+
bool next_set;
27+
/* Next timer event cycles */
28+
u64 next_cycles;
29+
/* Underlying hrtimer instance */
30+
struct hrtimer hrt;
31+
};
32+
33+
int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles);
34+
int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
35+
const struct kvm_one_reg *reg);
36+
int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
37+
const struct kvm_one_reg *reg);
38+
int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu);
39+
int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
40+
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
41+
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
42+
int kvm_riscv_guest_timer_init(struct kvm *kvm);
43+
44+
#endif

arch/riscv/include/uapi/asm/kvm.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,18 @@ struct kvm_riscv_csr {
7474
unsigned long scounteren;
7575
};
7676

77+
/* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
78+
struct kvm_riscv_timer {
79+
__u64 frequency;
80+
__u64 time;
81+
__u64 compare;
82+
__u64 state;
83+
};
84+
85+
/* Possible states for kvm_riscv_timer */
86+
#define KVM_RISCV_TIMER_STATE_OFF 0
87+
#define KVM_RISCV_TIMER_STATE_ON 1
88+
7789
#define KVM_REG_SIZE(id) \
7890
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
7991

@@ -96,6 +108,11 @@ struct kvm_riscv_csr {
96108
#define KVM_REG_RISCV_CSR_REG(name) \
97109
(offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long))
98110

111+
/* Timer registers are mapped as type 4 */
112+
#define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT)
113+
#define KVM_REG_RISCV_TIMER_REG(name) \
114+
(offsetof(struct kvm_riscv_timer, name) / sizeof(__u64))
115+
99116
#endif
100117

101118
#endif /* __LINUX_KVM_RISCV_H */

arch/riscv/kvm/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,3 +21,4 @@ kvm-y += mmu.o
2121
kvm-y += vcpu.o
2222
kvm-y += vcpu_exit.o
2323
kvm-y += vcpu_switch.o
24+
kvm-y += vcpu_timer.o

arch/riscv/kvm/vcpu.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
5858

5959
memcpy(cntx, reset_cntx, sizeof(*cntx));
6060

61+
kvm_riscv_vcpu_timer_reset(vcpu);
62+
6163
WRITE_ONCE(vcpu->arch.irqs_pending, 0);
6264
WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
6365
}
@@ -85,6 +87,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
8587
cntx->hstatus |= HSTATUS_SPVP;
8688
cntx->hstatus |= HSTATUS_SPV;
8789

90+
/* Setup VCPU timer */
91+
kvm_riscv_vcpu_timer_init(vcpu);
92+
8893
/* Reset VCPU */
8994
kvm_riscv_reset_vcpu(vcpu);
9095

@@ -97,6 +102,9 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
97102

98103
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
99104
{
105+
/* Cleanup VCPU timer */
106+
kvm_riscv_vcpu_timer_deinit(vcpu);
107+
100108
/* Flush the pages pre-allocated for Stage2 page table mappings */
101109
kvm_riscv_stage2_flush_cache(vcpu);
102110
}
@@ -332,6 +340,8 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
332340
return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
333341
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
334342
return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
343+
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
344+
return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
335345

336346
return -EINVAL;
337347
}
@@ -345,6 +355,8 @@ static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
345355
return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
346356
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
347357
return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
358+
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
359+
return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
348360

349361
return -EINVAL;
350362
}
@@ -579,6 +591,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
579591

580592
kvm_riscv_stage2_update_hgatp(vcpu);
581593

594+
kvm_riscv_vcpu_timer_restore(vcpu);
595+
582596
vcpu->cpu = cpu;
583597
}
584598

arch/riscv/kvm/vcpu_timer.c

Lines changed: 225 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,225 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
4+
*
5+
* Authors:
6+
* Atish Patra <atish.patra@wdc.com>
7+
*/
8+
9+
#include <linux/errno.h>
10+
#include <linux/err.h>
11+
#include <linux/kvm_host.h>
12+
#include <linux/uaccess.h>
13+
#include <clocksource/timer-riscv.h>
14+
#include <asm/csr.h>
15+
#include <asm/delay.h>
16+
#include <asm/kvm_vcpu_timer.h>
17+
18+
static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt)
19+
{
20+
return get_cycles64() + gt->time_delta;
21+
}
22+
23+
static u64 kvm_riscv_delta_cycles2ns(u64 cycles,
24+
struct kvm_guest_timer *gt,
25+
struct kvm_vcpu_timer *t)
26+
{
27+
unsigned long flags;
28+
u64 cycles_now, cycles_delta, delta_ns;
29+
30+
local_irq_save(flags);
31+
cycles_now = kvm_riscv_current_cycles(gt);
32+
if (cycles_now < cycles)
33+
cycles_delta = cycles - cycles_now;
34+
else
35+
cycles_delta = 0;
36+
delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift;
37+
local_irq_restore(flags);
38+
39+
return delta_ns;
40+
}
41+
42+
static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h)
43+
{
44+
u64 delta_ns;
45+
struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
46+
struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
47+
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
48+
49+
if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
50+
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
51+
hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
52+
return HRTIMER_RESTART;
53+
}
54+
55+
t->next_set = false;
56+
kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER);
57+
58+
return HRTIMER_NORESTART;
59+
}
60+
61+
static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
62+
{
63+
if (!t->init_done || !t->next_set)
64+
return -EINVAL;
65+
66+
hrtimer_cancel(&t->hrt);
67+
t->next_set = false;
68+
69+
return 0;
70+
}
71+
72+
int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
73+
{
74+
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
75+
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
76+
u64 delta_ns;
77+
78+
if (!t->init_done)
79+
return -EINVAL;
80+
81+
kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER);
82+
83+
delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t);
84+
t->next_cycles = ncycles;
85+
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
86+
t->next_set = true;
87+
88+
return 0;
89+
}
90+
91+
int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
92+
const struct kvm_one_reg *reg)
93+
{
94+
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
95+
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
96+
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
97+
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
98+
KVM_REG_SIZE_MASK |
99+
KVM_REG_RISCV_TIMER);
100+
u64 reg_val;
101+
102+
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
103+
return -EINVAL;
104+
if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
105+
return -EINVAL;
106+
107+
switch (reg_num) {
108+
case KVM_REG_RISCV_TIMER_REG(frequency):
109+
reg_val = riscv_timebase;
110+
break;
111+
case KVM_REG_RISCV_TIMER_REG(time):
112+
reg_val = kvm_riscv_current_cycles(gt);
113+
break;
114+
case KVM_REG_RISCV_TIMER_REG(compare):
115+
reg_val = t->next_cycles;
116+
break;
117+
case KVM_REG_RISCV_TIMER_REG(state):
118+
reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON :
119+
KVM_RISCV_TIMER_STATE_OFF;
120+
break;
121+
default:
122+
return -EINVAL;
123+
};
124+
125+
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
126+
return -EFAULT;
127+
128+
return 0;
129+
}
130+
131+
int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
132+
const struct kvm_one_reg *reg)
133+
{
134+
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
135+
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
136+
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
137+
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
138+
KVM_REG_SIZE_MASK |
139+
KVM_REG_RISCV_TIMER);
140+
u64 reg_val;
141+
int ret = 0;
142+
143+
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
144+
return -EINVAL;
145+
if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
146+
return -EINVAL;
147+
148+
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
149+
return -EFAULT;
150+
151+
switch (reg_num) {
152+
case KVM_REG_RISCV_TIMER_REG(frequency):
153+
ret = -EOPNOTSUPP;
154+
break;
155+
case KVM_REG_RISCV_TIMER_REG(time):
156+
gt->time_delta = reg_val - get_cycles64();
157+
break;
158+
case KVM_REG_RISCV_TIMER_REG(compare):
159+
t->next_cycles = reg_val;
160+
break;
161+
case KVM_REG_RISCV_TIMER_REG(state):
162+
if (reg_val == KVM_RISCV_TIMER_STATE_ON)
163+
ret = kvm_riscv_vcpu_timer_next_event(vcpu, reg_val);
164+
else
165+
ret = kvm_riscv_vcpu_timer_cancel(t);
166+
break;
167+
default:
168+
ret = -EINVAL;
169+
break;
170+
};
171+
172+
return ret;
173+
}
174+
175+
int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
176+
{
177+
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
178+
179+
if (t->init_done)
180+
return -EINVAL;
181+
182+
hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
183+
t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
184+
t->init_done = true;
185+
t->next_set = false;
186+
187+
return 0;
188+
}
189+
190+
int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
191+
{
192+
int ret;
193+
194+
ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
195+
vcpu->arch.timer.init_done = false;
196+
197+
return ret;
198+
}
199+
200+
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
201+
{
202+
return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
203+
}
204+
205+
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
206+
{
207+
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
208+
209+
#ifdef CONFIG_64BIT
210+
csr_write(CSR_HTIMEDELTA, gt->time_delta);
211+
#else
212+
csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
213+
csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
214+
#endif
215+
}
216+
217+
int kvm_riscv_guest_timer_init(struct kvm *kvm)
218+
{
219+
struct kvm_guest_timer *gt = &kvm->arch.timer;
220+
221+
riscv_cs_get_mult_shift(&gt->nsec_mult, &gt->nsec_shift);
222+
gt->time_delta = -get_cycles64();
223+
224+
return 0;
225+
}

arch/riscv/kvm/vm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
4141
return r;
4242
}
4343

44-
return 0;
44+
return kvm_riscv_guest_timer_init(kvm);
4545
}
4646

4747
void kvm_arch_destroy_vm(struct kvm *kvm)

0 commit comments

Comments
 (0)