summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@arm.com>2018-09-18 20:08:18 +0300
committerMarc Zyngier <marc.zyngier@arm.com>2019-02-20 00:05:41 +0300
commite604dd5d45c75c2112424dec74853efb708f4fa6 (patch)
tree0898f81bdaa7ab3e90338ffaf7e8bd34ec2dc914 /virt
parent84135d3d18da2ff17d3ad1a609b2818cc3049552 (diff)
downloadlinux-e604dd5d45c75c2112424dec74853efb708f4fa6.tar.xz
KVM: arm/arm64: timer: Rework data structures for multiple timers
Prepare for having 4 timer data structures (2 for now). Move loaded to the cpu data structure and not the individual timer structure, in preparation for assigning the EL1 phys timer as well. Signed-off-by: Christoffer Dall <christoffer.dall@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arch_timer.c58
1 files changed, 30 insertions, 28 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index f7d377448438..471f9fd004c9 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -184,13 +184,11 @@ static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt)
{
struct arch_timer_context *ptimer;
- struct arch_timer_cpu *timer;
struct kvm_vcpu *vcpu;
u64 ns;
- timer = container_of(hrt, struct arch_timer_cpu, phys_timer);
- vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
- ptimer = vcpu_ptimer(vcpu);
+ ptimer = container_of(hrt, struct arch_timer_context, hrtimer);
+ vcpu = ptimer->vcpu;
/*
* Check that the timer has really expired from the guest's
@@ -209,9 +207,10 @@ static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt)
static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
{
+ struct arch_timer_cpu *timer = vcpu_timer(timer_ctx->vcpu);
u64 cval, now;
- if (timer_ctx->loaded) {
+ if (timer->loaded) {
u32 cnt_ctl;
/* Only the virtual timer can be loaded so far */
@@ -280,7 +279,6 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
/* Schedule the background timer for the emulated timer. */
static void phys_timer_emulate(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
/*
@@ -289,11 +287,11 @@ static void phys_timer_emulate(struct kvm_vcpu *vcpu)
* then we also don't need a soft timer.
*/
if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) {
- soft_timer_cancel(&timer->phys_timer);
+ soft_timer_cancel(&ptimer->hrtimer);
return;
}
- soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(ptimer));
+ soft_timer_start(&ptimer->hrtimer, kvm_timer_compute_delta(ptimer));
}
/*
@@ -303,7 +301,7 @@ static void phys_timer_emulate(struct kvm_vcpu *vcpu)
*/
static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
bool level;
@@ -329,13 +327,13 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
static void vtimer_save_state(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
unsigned long flags;
local_irq_save(flags);
- if (!vtimer->loaded)
+ if (!timer->loaded)
goto out;
if (timer->enabled) {
@@ -347,7 +345,7 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
write_sysreg_el0(0, cntv_ctl);
isb();
- vtimer->loaded = false;
+ timer->loaded = false;
out:
local_irq_restore(flags);
}
@@ -359,7 +357,7 @@ out:
*/
static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
@@ -379,20 +377,20 @@ static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
soft_timer_cancel(&timer->bg_timer);
}
static void vtimer_restore_state(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
unsigned long flags;
local_irq_save(flags);
- if (vtimer->loaded)
+ if (timer->loaded)
goto out;
if (timer->enabled) {
@@ -401,7 +399,7 @@ static void vtimer_restore_state(struct kvm_vcpu *vcpu)
write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl);
}
- vtimer->loaded = true;
+ timer->loaded = true;
out:
local_irq_restore(flags);
}
@@ -462,7 +460,7 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
@@ -507,7 +505,8 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
+ struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
if (unlikely(!timer->enabled))
return;
@@ -523,7 +522,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
* In any case, we re-schedule the hrtimer for the physical timer when
* coming back to the VCPU thread in kvm_timer_vcpu_load().
*/
- soft_timer_cancel(&timer->phys_timer);
+ soft_timer_cancel(&ptimer->hrtimer);
if (swait_active(kvm_arch_vcpu_wq(vcpu)))
kvm_timer_blocking(vcpu);
@@ -559,7 +558,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
if (unlikely(!timer->enabled))
return;
@@ -570,7 +569,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
@@ -611,22 +610,25 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
/* Synchronize cntvoff across all vtimers of a VM. */
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
- vcpu_ptimer(vcpu)->cntvoff = 0;
+ ptimer->cntvoff = 0;
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
timer->bg_timer.function = kvm_bg_timer_expire;
- hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- timer->phys_timer.function = kvm_phys_timer_expire;
+ hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ ptimer->hrtimer.function = kvm_phys_timer_expire;
vtimer->irq.irq = default_vtimer_irq.irq;
ptimer->irq.irq = default_ptimer_irq.irq;
+
+ vtimer->vcpu = vcpu;
+ ptimer->vcpu = vcpu;
}
static void kvm_timer_init_interrupt(void *info)
@@ -860,7 +862,7 @@ out_free_irq:
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
soft_timer_cancel(&timer->bg_timer);
}
@@ -904,7 +906,7 @@ bool kvm_arch_timer_get_input_level(int vintid)
int kvm_timer_enable(struct kvm_vcpu *vcpu)
{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
int ret;