summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2020-03-24 15:45:27 +0300
committerMarc Zyngier <maz@kernel.org>2020-03-24 15:45:27 +0300
commitcc98702c1775a9ff3d943674c97d9561cf605fcd (patch)
treefbc3f129b173e6f31343cb73a9fb4d79754f0d4c /virt
parent76a5db107273b1ad01471e48c49635e2b944a7f4 (diff)
parentdab4fe3bf6dd87a7d6dbab2c929afd1ef62a120b (diff)
downloadlinux-cc98702c1775a9ff3d943674c97d9561cf605fcd.tar.xz
Merge branch 'kvm-arm64/gic-v4.1' into kvmarm-master/next
Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arm.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c81
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c88
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c6
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c141
-rw-r--r--virt/kvm/arm/vgic/vgic.h1
7 files changed, 305 insertions, 34 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index eda7b624eab8..4d864f857ac8 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -625,6 +625,14 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
kvm_update_stolen_time(vcpu);
+
+ if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
+ /* The distributor enable bits were changed */
+ preempt_disable();
+ vgic_v4_put(vcpu, false);
+ vgic_v4_load(vcpu);
+ preempt_enable();
+ }
}
}
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
index cc12fe9b2df3..b13a9e3f99dd 100644
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -178,6 +178,8 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
struct kvm_vcpu *vcpu)
{
char *type;
+ bool pending;
+
if (irq->intid < VGIC_NR_SGIS)
type = "SGI";
else if (irq->intid < VGIC_NR_PRIVATE_IRQS)
@@ -190,6 +192,16 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
if (irq->intid ==0 || irq->intid == VGIC_NR_PRIVATE_IRQS)
print_header(s, irq, vcpu);
+ pending = irq->pending_latch;
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ int err;
+
+ err = irq_get_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ &pending);
+ WARN_ON_ONCE(err);
+ }
+
seq_printf(s, " %s %4d "
" %2d "
"%d%d%d%d%d%d%d "
@@ -201,7 +213,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
"\n",
type, irq->intid,
(irq->target_vcpu) ? irq->target_vcpu->vcpu_id : -1,
- irq->pending_latch,
+ pending,
irq->line_level,
irq->active,
irq->enabled,
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index ebc218840fc2..e72dcc454247 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -3,9 +3,11 @@
* VGICv3 MMIO handling functions
*/
+#include <linux/bitfield.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
#include <kvm/iodev.h>
#include <kvm/arm_vgic.h>
@@ -69,6 +71,8 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
if (vgic->enabled)
value |= GICD_CTLR_ENABLE_SS_G1;
value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
+ if (vgic->nassgireq)
+ value |= GICD_CTLR_nASSGIreq;
break;
case GICD_TYPER:
value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
@@ -80,6 +84,10 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
}
break;
+ case GICD_TYPER2:
+ if (kvm_vgic_global_state.has_gicv4_1)
+ value = GICD_TYPER2_nASSGIcap;
+ break;
case GICD_IIDR:
value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
(vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
@@ -97,17 +105,46 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
unsigned long val)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- bool was_enabled = dist->enabled;
switch (addr & 0x0c) {
- case GICD_CTLR:
+ case GICD_CTLR: {
+ bool was_enabled, is_hwsgi;
+
+ mutex_lock(&vcpu->kvm->lock);
+
+ was_enabled = dist->enabled;
+ is_hwsgi = dist->nassgireq;
+
dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
- if (!was_enabled && dist->enabled)
+ /* Not a GICv4.1? No HW SGIs */
+ if (!kvm_vgic_global_state.has_gicv4_1)
+ val &= ~GICD_CTLR_nASSGIreq;
+
+ /* Dist stays enabled? nASSGIreq is RO */
+ if (was_enabled && dist->enabled) {
+ val &= ~GICD_CTLR_nASSGIreq;
+ val |= FIELD_PREP(GICD_CTLR_nASSGIreq, is_hwsgi);
+ }
+
+ /* Switching HW SGIs? */
+ dist->nassgireq = val & GICD_CTLR_nASSGIreq;
+ if (is_hwsgi != dist->nassgireq)
+ vgic_v4_configure_vsgis(vcpu->kvm);
+
+ if (kvm_vgic_global_state.has_gicv4_1 &&
+ was_enabled != dist->enabled)
+ kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_RELOAD_GICv4);
+ else if (!was_enabled && dist->enabled)
vgic_kick_vcpus(vcpu->kvm);
+
+ mutex_unlock(&vcpu->kvm->lock);
break;
+ }
case GICD_TYPER:
+ case GICD_TYPER2:
case GICD_IIDR:
+ /* This is at best for documentation purposes... */
return;
}
}
@@ -116,10 +153,22 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
switch (addr & 0x0c) {
+ case GICD_TYPER2:
case GICD_IIDR:
if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
return -EINVAL;
+ return 0;
+ case GICD_CTLR:
+ /* Not a GICv4.1? No HW SGIs */
+ if (!kvm_vgic_global_state.has_gicv4_1)
+ val &= ~GICD_CTLR_nASSGIreq;
+
+ dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
+ dist->nassgireq = val & GICD_CTLR_nASSGIreq;
+ return 0;
}
vgic_mmio_write_v3_misc(vcpu, addr, len, val);
@@ -257,8 +306,18 @@ static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
*/
for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ bool state = irq->pending_latch;
- if (irq->pending_latch)
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ int err;
+
+ err = irq_get_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ &state);
+ WARN_ON(err);
+ }
+
+ if (state)
value |= (1U << i);
vgic_put_irq(vcpu->kvm, irq);
@@ -942,8 +1001,18 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
* generate interrupts of either group.
*/
if (!irq->group || allow_group1) {
- irq->pending_latch = true;
- vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+ if (!irq->hw) {
+ irq->pending_latch = true;
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+ } else {
+ /* HW SGI? Ask the GIC to inject it */
+ int err;
+ err = irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ true);
+ WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ }
} else {
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 97fb2a40e6ba..2199302597fa 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -5,6 +5,8 @@
#include <linux/bitops.h>
#include <linux/bsearch.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <kvm/iodev.h>
@@ -59,6 +61,11 @@ unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
return value;
}
+static void vgic_update_vsgi(struct vgic_irq *irq)
+{
+ WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group));
+}
+
void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
unsigned int len, unsigned long val)
{
@@ -71,7 +78,12 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->group = !!(val & BIT(i));
- vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ vgic_update_vsgi(irq);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ } else {
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+ }
vgic_put_irq(vcpu->kvm, irq);
}
@@ -113,7 +125,21 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
- if (vgic_irq_is_mapped_level(irq)) {
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ if (!irq->enabled) {
+ struct irq_data *data;
+
+ irq->enabled = true;
+ data = &irq_to_desc(irq->host_irq)->irq_data;
+ while (irqd_irq_disabled(data))
+ enable_irq(irq->host_irq);
+ }
+
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->kvm, irq);
+
+ continue;
+ } else if (vgic_irq_is_mapped_level(irq)) {
bool was_high = irq->line_level;
/*
@@ -148,6 +174,8 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
+ disable_irq_nosync(irq->host_irq);
irq->enabled = false;
@@ -167,10 +195,22 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
unsigned long flags;
+ bool val;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
- if (irq_is_pending(irq))
- value |= (1U << i);
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ int err;
+
+ val = false;
+ err = irq_get_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ &val);
+ WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+ } else {
+ val = irq_is_pending(irq);
+ }
+
+ value |= ((u32)val << i);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
@@ -215,6 +255,21 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
}
raw_spin_lock_irqsave(&irq->irq_lock, flags);
+
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ /* HW SGI? Ask the GIC to inject it */
+ int err;
+ err = irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ true);
+ WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->kvm, irq);
+
+ continue;
+ }
+
if (irq->hw)
vgic_hw_irq_spending(vcpu, irq, is_uaccess);
else
@@ -269,6 +324,20 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ /* HW SGI? Ask the GIC to clear its pending bit */
+ int err;
+ err = irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ false);
+ WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->kvm, irq);
+
+ continue;
+ }
+
if (irq->hw)
vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
else
@@ -318,8 +387,15 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
raw_spin_lock_irqsave(&irq->irq_lock, flags);
- if (irq->hw) {
+ if (irq->hw && !vgic_irq_is_sgi(irq->intid)) {
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
+ } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ /*
+ * GICv4.1 VSGI feature doesn't track an active state,
+ * so let's not kid ourselves, there is nothing we can
+ * do here.
+ */
+ irq->active = false;
} else {
u32 model = vcpu->kvm->arch.vgic.vgic_model;
u8 active_source;
@@ -493,6 +569,8 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* Narrow the priority range to what we actually support */
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
+ if (irq->hw && vgic_irq_is_sgi(irq->intid))
+ vgic_update_vsgi(irq);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index f45635a6f0ec..2c9fc13e2c59 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -540,6 +540,8 @@ int vgic_v3_map_resources(struct kvm *kvm)
goto out;
}
+ if (kvm_vgic_global_state.has_gicv4_1)
+ vgic_v4_configure_vsgis(kvm);
dist->ready = true;
out:
@@ -595,7 +597,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
/* GICv4 support? */
if (info->has_v4) {
kvm_vgic_global_state.has_gicv4 = gicv4_enable;
- kvm_info("GICv4 support %sabled\n",
+ kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
+ kvm_info("GICv4%s support %sabled\n",
+ kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
gicv4_enable ? "en" : "dis");
}
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
index 46f875589c47..27ac833e5ec7 100644
--- a/virt/kvm/arm/vgic/vgic-v4.c
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -67,10 +67,10 @@
* it. And if we've migrated our vcpu from one CPU to another, we must
* tell the ITS (so that the messages reach the right redistributor).
* This is done in two steps: first issue a irq_set_affinity() on the
- * irq corresponding to the vcpu, then call its_schedule_vpe(). You
- * must be in a non-preemptible context. On exit, another call to
- * its_schedule_vpe() tells the redistributor that we're done with the
- * vcpu.
+ * irq corresponding to the vcpu, then call its_make_vpe_resident().
+ * You must be in a non-preemptible context. On exit, a call to
+ * its_make_vpe_non_resident() tells the redistributor that we're done
+ * with the vcpu.
*
* Finally, the doorbell handling: Each vcpu is allocated an interrupt
* which will fire each time a VLPI is made pending whilst the vcpu is
@@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
struct kvm_vcpu *vcpu = info;
/* We got the message, no need to fire again */
- if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
+ if (!kvm_vgic_global_state.has_gicv4_1 &&
+ !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
@@ -96,6 +97,104 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
return IRQ_HANDLED;
}
+static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq)
+{
+ vpe->sgi_config[irq->intid].enabled = irq->enabled;
+ vpe->sgi_config[irq->intid].group = irq->group;
+ vpe->sgi_config[irq->intid].priority = irq->priority;
+}
+
+static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
+{
+ struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+ int i;
+
+ /*
+ * With GICv4.1, every virtual SGI can be directly injected. So
+ * let's pretend that they are HW interrupts, tied to a host
+ * IRQ. The SGI code will do its magic.
+ */
+ for (i = 0; i < VGIC_NR_SGIS; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
+ struct irq_desc *desc;
+ unsigned long flags;
+ int ret;
+
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+
+ if (irq->hw)
+ goto unlock;
+
+ irq->hw = true;
+ irq->host_irq = irq_find_mapping(vpe->sgi_domain, i);
+
+ /* Transfer the full irq state to the vPE */
+ vgic_v4_sync_sgi_config(vpe, irq);
+ desc = irq_to_desc(irq->host_irq);
+ ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc),
+ false);
+ if (!WARN_ON(ret)) {
+ /* Transfer pending state */
+ ret = irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ irq->pending_latch);
+ WARN_ON(ret);
+ irq->pending_latch = false;
+ }
+ unlock:
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->kvm, irq);
+ }
+}
+
+static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ for (i = 0; i < VGIC_NR_SGIS; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
+ struct irq_desc *desc;
+ unsigned long flags;
+ int ret;
+
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+
+ if (!irq->hw)
+ goto unlock;
+
+ irq->hw = false;
+ ret = irq_get_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ &irq->pending_latch);
+ WARN_ON(ret);
+
+ desc = irq_to_desc(irq->host_irq);
+ irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
+ unlock:
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->kvm, irq);
+ }
+}
+
+/* Must be called with the kvm lock held */
+void vgic_v4_configure_vsgis(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_arm_halt_guest(kvm);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (dist->nassgireq)
+ vgic_v4_enable_vsgis(vcpu);
+ else
+ vgic_v4_disable_vsgis(vcpu);
+ }
+
+ kvm_arm_resume_guest(kvm);
+}
+
/**
* vgic_v4_init - Initialize the GICv4 data structures
* @kvm: Pointer to the VM being initialized
@@ -140,6 +239,7 @@ int vgic_v4_init(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) {
int irq = dist->its_vm.vpes[i]->irq;
+ unsigned long irq_flags = DB_IRQ_FLAGS;
/*
* Don't automatically enable the doorbell, as we're
@@ -147,8 +247,14 @@ int vgic_v4_init(struct kvm *kvm)
* blocked. Also disable the lazy disabling, as the
* doorbell could kick us out of the guest too
* early...
+ *
+ * On GICv4.1, the doorbell is managed in HW and must
+ * be left enabled.
*/
- irq_set_status_flags(irq, DB_IRQ_FLAGS);
+ if (kvm_vgic_global_state.has_gicv4_1)
+ irq_flags &= ~IRQ_NOAUTOEN;
+ irq_set_status_flags(irq, irq_flags);
+
ret = request_irq(irq, vgic_v4_doorbell_handler,
0, "vcpu", vcpu);
if (ret) {
@@ -199,19 +305,11 @@ void vgic_v4_teardown(struct kvm *kvm)
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
{
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
- struct irq_desc *desc = irq_to_desc(vpe->irq);
if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
return 0;
- /*
- * If blocking, a doorbell is required. Undo the nested
- * disable_irq() calls...
- */
- while (need_db && irqd_irq_disabled(&desc->irq_data))
- enable_irq(vpe->irq);
-
- return its_schedule_vpe(vpe, false);
+ return its_make_vpe_non_resident(vpe, need_db);
}
int vgic_v4_load(struct kvm_vcpu *vcpu)
@@ -232,18 +330,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
if (err)
return err;
- /* Disabled the doorbell, as we're about to enter the guest */
- disable_irq_nosync(vpe->irq);
-
- err = its_schedule_vpe(vpe, true);
+ err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
if (err)
return err;
/*
* Now that the VPE is resident, let's get rid of a potential
- * doorbell interrupt that would still be pending.
+ * doorbell interrupt that would still be pending. This is a
+ * GICv4.0 only "feature"...
*/
- return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
+ if (!kvm_vgic_global_state.has_gicv4_1)
+ err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
+
+ return err;
}
static struct vgic_its *vgic_get_its(struct kvm *kvm,
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index c7fefd6b1c80..769e4802645e 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -316,5 +316,6 @@ void vgic_its_invalidate_cache(struct kvm *kvm);
bool vgic_supports_direct_msis(struct kvm *kvm);
int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm);
+void vgic_v4_configure_vsgis(struct kvm *kvm);
#endif