summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arm.c15
-rw-r--r--virt/kvm/arm/psci.c60
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c34
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c10
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c38
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c53
-rw-r--r--virt/kvm/arm/vgic/vgic.c74
-rw-r--r--virt/kvm/arm/vgic/vgic.h14
-rw-r--r--virt/kvm/eventfd.c2
12 files changed, 213 insertions, 99 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index dba629c5f8ac..a4c1b76240df 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u32 kvm_next_vmid;
static unsigned int kvm_vmid_bits __read_mostly;
-static DEFINE_SPINLOCK(kvm_vmid_lock);
+static DEFINE_RWLOCK(kvm_vmid_lock);
static bool vgic_present;
@@ -473,11 +473,16 @@ static void update_vttbr(struct kvm *kvm)
{
phys_addr_t pgd_phys;
u64 vmid;
+ bool new_gen;
- if (!need_new_vmid_gen(kvm))
+ read_lock(&kvm_vmid_lock);
+ new_gen = need_new_vmid_gen(kvm);
+ read_unlock(&kvm_vmid_lock);
+
+ if (!new_gen)
return;
- spin_lock(&kvm_vmid_lock);
+ write_lock(&kvm_vmid_lock);
/*
* We need to re-check the vmid_gen here to ensure that if another vcpu
@@ -485,7 +490,7 @@ static void update_vttbr(struct kvm *kvm)
* use the same vmid.
*/
if (!need_new_vmid_gen(kvm)) {
- spin_unlock(&kvm_vmid_lock);
+ write_unlock(&kvm_vmid_lock);
return;
}
@@ -519,7 +524,7 @@ static void update_vttbr(struct kvm *kvm)
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
- spin_unlock(&kvm_vmid_lock);
+ write_unlock(&kvm_vmid_lock);
}
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 6919352cbf15..c4762bef13c6 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -18,6 +18,7 @@
#include <linux/arm-smccc.h>
#include <linux/preempt.h>
#include <linux/kvm_host.h>
+#include <linux/uaccess.h>
#include <linux/wait.h>
#include <asm/cputype.h>
@@ -427,3 +428,62 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
smccc_set_retval(vcpu, val, 0, 0, 0);
return 1;
}
+
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
+{
+ return 1; /* PSCI version */
+}
+
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
+ return -EFAULT;
+
+ return 0;
+}
+
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ u64 val;
+
+ val = kvm_psci_version(vcpu, vcpu->kvm);
+ if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ bool wants_02;
+ u64 val;
+
+ if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
+
+ switch (val) {
+ case KVM_ARM_PSCI_0_1:
+ if (wants_02)
+ return -EINVAL;
+ vcpu->kvm->arch.psci_version = val;
+ return 0;
+ case KVM_ARM_PSCI_0_2:
+ case KVM_ARM_PSCI_1_0:
+ if (!wants_02)
+ return -EINVAL;
+ vcpu->kvm->arch.psci_version = val;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
index 10b38178cff2..4ffc0b5e6105 100644
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
struct vgic_irq *irq;
struct kvm_vcpu *vcpu = NULL;
+ unsigned long flags;
if (iter->dist_id == 0) {
print_dist_state(s, &kvm->arch.vgic);
@@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
}
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
print_irq_state(s, irq, vcpu);
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
return 0;
}
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 68378fe17a0e..e07156c30323 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -423,7 +423,7 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
* We cannot rely on the vgic maintenance interrupt to be
* delivered synchronously. This means we can only use it to
* exit the VM, and we perform the handling of EOIed
- * interrupts on the exit path (see vgic_process_maintenance).
+ * interrupts on the exit path (see vgic_fold_lr_state).
*/
return IRQ_HANDLED;
}
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index a8f07243aa9f..4ed79c939fb4 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
+ unsigned long flags;
int ret;
/* In this case there is no put, since we keep the reference. */
@@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
irq->intid = intid;
irq->target_vcpu = vcpu;
- spin_lock(&dist->lpi_list_lock);
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
/*
* There could be a race with another vgic_add_lpi(), so we need to
@@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
dist->lpi_list_count++;
out_unlock:
- spin_unlock(&dist->lpi_list_lock);
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
/*
* We "cache" the configuration table entries in our struct vgic_irq's.
@@ -280,8 +281,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
int ret;
unsigned long flags;
- ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
- &prop, 1);
+ ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
+ &prop, 1);
if (ret)
return ret;
@@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_irq *irq;
+ unsigned long flags;
u32 *intids;
int irq_count, i = 0;
@@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
if (!intids)
return -ENOMEM;
- spin_lock(&dist->lpi_list_lock);
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (i == irq_count)
break;
@@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
continue;
intids[i++] = irq->intid;
}
- spin_unlock(&dist->lpi_list_lock);
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
*intid_ptr = intids;
return i;
@@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
{
int ret = 0;
+ unsigned long flags;
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->target_vcpu = vcpu;
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
if (irq->hw) {
struct its_vlpi_map map;
@@ -441,8 +444,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
* this very same byte in the last iteration. Reuse that.
*/
if (byte_offset != last_byte_offset) {
- ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
- &pendmask, 1);
+ ret = kvm_read_guest_lock(vcpu->kvm,
+ pendbase + byte_offset,
+ &pendmask, 1);
if (ret) {
kfree(intids);
return ret;
@@ -786,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
return false;
/* Each 1st level entry is represented by a 64-bit value. */
- if (kvm_read_guest(its->dev->kvm,
+ if (kvm_read_guest_lock(its->dev->kvm,
BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
&indirect_ptr, sizeof(indirect_ptr)))
return false;
@@ -1367,8 +1371,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
cbaser = CBASER_ADDRESS(its->cbaser);
while (its->cwriter != its->creadr) {
- int ret = kvm_read_guest(kvm, cbaser + its->creadr,
- cmd_buf, ITS_CMD_SIZE);
+ int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
+ cmd_buf, ITS_CMD_SIZE);
/*
* If kvm_read_guest() fails, this could be due to the guest
* programming a bogus value in CBASER or something else going
@@ -1893,7 +1897,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
int next_offset;
size_t byte_offset;
- ret = kvm_read_guest(kvm, gpa, entry, esz);
+ ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
if (ret)
return ret;
@@ -2263,7 +2267,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
int ret;
BUG_ON(esz > sizeof(val));
- ret = kvm_read_guest(kvm, gpa, &val, esz);
+ ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
if (ret)
return ret;
val = le64_to_cpu(val);
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index e21e2f49b005..ffc587bf4742 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -14,6 +14,8 @@
#include <linux/irqchip/arm-gic.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <linux/nospec.h>
+
#include <kvm/iodev.h>
#include <kvm/arm_vgic.h>
@@ -324,6 +326,9 @@ static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
if (n > vgic_v3_max_apr_idx(vcpu))
return 0;
+
+ n = array_index_nospec(n, 4);
+
/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
return vgicv3->vgic_ap1r[n];
}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index dbe99d635c80..ff9655cfeb2f 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -289,10 +289,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
irq->vcpu->cpu != -1) /* VCPU thread is running */
cond_resched_lock(&irq->irq_lock);
- if (irq->hw)
+ if (irq->hw) {
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
- else
+ } else {
+ u32 model = vcpu->kvm->arch.vgic.vgic_model;
+
irq->active = active;
+ if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
+ active && vgic_irq_is_sgi(irq->intid))
+ irq->active_source = requester_vcpu->vcpu_id;
+ }
if (irq->active)
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 45aa433f018f..a5f2e44f1c33 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -37,13 +37,6 @@ void vgic_v2_init_lrs(void)
vgic_v2_write_lr(i, 0);
}
-void vgic_v2_set_npie(struct kvm_vcpu *vcpu)
-{
- struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
-
- cpuif->vgic_hcr |= GICH_HCR_NPIE;
-}
-
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -71,13 +64,18 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
int lr;
unsigned long flags;
- cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE);
+ cpuif->vgic_hcr &= ~GICH_HCR_UIE;
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
u32 val = cpuif->vgic_lr[lr];
- u32 intid = val & GICH_LR_VIRTUALID;
+ u32 cpuid, intid = val & GICH_LR_VIRTUALID;
struct vgic_irq *irq;
+ /* Extract the source vCPU id from the LR */
+ cpuid = val & GICH_LR_PHYSID_CPUID;
+ cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+ cpuid &= 7;
+
/* Notify fds when the guest EOI'ed a level-triggered SPI */
if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
kvm_notify_acked_irq(vcpu->kvm, 0,
@@ -90,17 +88,16 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
/* Always preserve the active bit */
irq->active = !!(val & GICH_LR_ACTIVE_BIT);
+ if (irq->active && vgic_irq_is_sgi(intid))
+ irq->active_source = cpuid;
+
/* Edge is the only case where we preserve the pending bit */
if (irq->config == VGIC_CONFIG_EDGE &&
(val & GICH_LR_PENDING_BIT)) {
irq->pending_latch = true;
- if (vgic_irq_is_sgi(intid)) {
- u32 cpuid = val & GICH_LR_PHYSID_CPUID;
-
- cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+ if (vgic_irq_is_sgi(intid))
irq->source |= (1 << cpuid);
- }
}
/*
@@ -152,8 +149,15 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
u32 val = irq->intid;
bool allow_pending = true;
- if (irq->active)
+ if (irq->active) {
val |= GICH_LR_ACTIVE_BIT;
+ if (vgic_irq_is_sgi(irq->intid))
+ val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
+ if (vgic_irq_is_multi_sgi(irq)) {
+ allow_pending = false;
+ val |= GICH_LR_EOI;
+ }
+ }
if (irq->hw) {
val |= GICH_LR_HW;
@@ -190,8 +194,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
BUG_ON(!src);
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
irq->source &= ~(1 << (src - 1));
- if (irq->source)
+ if (irq->source) {
irq->pending_latch = true;
+ val |= GICH_LR_EOI;
+ }
}
}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 8195f52ae6f0..bdcf8e7a6161 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -27,13 +27,6 @@ static bool group1_trap;
static bool common_trap;
static bool gicv4_enable;
-void vgic_v3_set_npie(struct kvm_vcpu *vcpu)
-{
- struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
-
- cpuif->vgic_hcr |= ICH_HCR_NPIE;
-}
-
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
@@ -55,17 +48,23 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
int lr;
unsigned long flags;
- cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE);
+ cpuif->vgic_hcr &= ~ICH_HCR_UIE;
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
u64 val = cpuif->vgic_lr[lr];
- u32 intid;
+ u32 intid, cpuid;
struct vgic_irq *irq;
+ bool is_v2_sgi = false;
- if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ cpuid = val & GICH_LR_PHYSID_CPUID;
+ cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+
+ if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
intid = val & ICH_LR_VIRTUAL_ID_MASK;
- else
+ } else {
intid = val & GICH_LR_VIRTUALID;
+ is_v2_sgi = vgic_irq_is_sgi(intid);
+ }
/* Notify fds when the guest EOI'ed a level-triggered IRQ */
if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
@@ -81,18 +80,16 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
/* Always preserve the active bit */
irq->active = !!(val & ICH_LR_ACTIVE_BIT);
+ if (irq->active && is_v2_sgi)
+ irq->active_source = cpuid;
+
/* Edge is the only case where we preserve the pending bit */
if (irq->config == VGIC_CONFIG_EDGE &&
(val & ICH_LR_PENDING_BIT)) {
irq->pending_latch = true;
- if (vgic_irq_is_sgi(intid) &&
- model == KVM_DEV_TYPE_ARM_VGIC_V2) {
- u32 cpuid = val & GICH_LR_PHYSID_CPUID;
-
- cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+ if (is_v2_sgi)
irq->source |= (1 << cpuid);
- }
}
/*
@@ -133,10 +130,20 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
{
u32 model = vcpu->kvm->arch.vgic.vgic_model;
u64 val = irq->intid;
- bool allow_pending = true;
+ bool allow_pending = true, is_v2_sgi;
- if (irq->active)
+ is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
+ model == KVM_DEV_TYPE_ARM_VGIC_V2);
+
+ if (irq->active) {
val |= ICH_LR_ACTIVE_BIT;
+ if (is_v2_sgi)
+ val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
+ if (vgic_irq_is_multi_sgi(irq)) {
+ allow_pending = false;
+ val |= ICH_LR_EOI;
+ }
+ }
if (irq->hw) {
val |= ICH_LR_HW;
@@ -174,8 +181,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
BUG_ON(!src);
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
irq->source &= ~(1 << (src - 1));
- if (irq->source)
+ if (irq->source) {
irq->pending_latch = true;
+ val |= ICH_LR_EOI;
+ }
}
}
@@ -335,7 +344,7 @@ retry:
bit_nr = irq->intid % BITS_PER_BYTE;
ptr = pendbase + byte_offset;
- ret = kvm_read_guest(kvm, ptr, &val, 1);
+ ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
if (ret)
return ret;
@@ -388,7 +397,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
ptr = pendbase + byte_offset;
if (byte_offset != last_byte_offset) {
- ret = kvm_read_guest(kvm, ptr, &val, 1);
+ ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
if (ret)
return ret;
last_byte_offset = byte_offset;
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index e74baec76361..33c8325c8f35 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -14,11 +14,13 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/list_sort.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
+#include <linux/nospec.h>
+
#include <asm/kvm_hyp.h>
#include "vgic.h"
@@ -41,9 +43,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* kvm->lock (mutex)
* its->cmd_lock (mutex)
* its->its_lock (mutex)
- * vgic_cpu->ap_list_lock
- * kvm->lpi_list_lock
- * vgic_irq->irq_lock
+ * vgic_cpu->ap_list_lock must be taken with IRQs disabled
+ * kvm->lpi_list_lock must be taken with IRQs disabled
+ * vgic_irq->irq_lock must be taken with IRQs disabled
+ *
+ * As the ap_list_lock might be taken from the timer interrupt handler,
+ * we have to disable IRQs before taking this lock and everything lower
+ * than it.
*
* If you need to take multiple locks, always take the upper lock first,
* then the lower ones, e.g. first take the its_lock, then the irq_lock.
@@ -70,8 +76,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = NULL;
+ unsigned long flags;
- spin_lock(&dist->lpi_list_lock);
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (irq->intid != intid)
@@ -87,7 +94,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
irq = NULL;
out_unlock:
- spin_unlock(&dist->lpi_list_lock);
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return irq;
}
@@ -101,12 +108,16 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
u32 intid)
{
/* SGIs and PPIs */
- if (intid <= VGIC_MAX_PRIVATE)
+ if (intid <= VGIC_MAX_PRIVATE) {
+ intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
return &vcpu->arch.vgic_cpu.private_irqs[intid];
+ }
/* SPIs */
- if (intid <= VGIC_MAX_SPI)
+ if (intid <= VGIC_MAX_SPI) {
+ intid = array_index_nospec(intid, VGIC_MAX_SPI);
return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
+ }
/* LPIs */
if (intid >= VGIC_MIN_LPI)
@@ -128,19 +139,20 @@ static void vgic_irq_release(struct kref *ref)
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
struct vgic_dist *dist = &kvm->arch.vgic;
+ unsigned long flags;
if (irq->intid < VGIC_MIN_LPI)
return;
- spin_lock(&dist->lpi_list_lock);
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
if (!kref_put(&irq->refcount, vgic_irq_release)) {
- spin_unlock(&dist->lpi_list_lock);
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return;
};
list_del(&irq->lpi_list);
dist->lpi_list_count--;
- spin_unlock(&dist->lpi_list_lock);
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
kfree(irq);
}
@@ -594,6 +606,7 @@ retry:
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
+ bool target_vcpu_needs_kick = false;
spin_lock(&irq->irq_lock);
@@ -664,11 +677,18 @@ retry:
list_del(&irq->ap_list);
irq->vcpu = target_vcpu;
list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
+ target_vcpu_needs_kick = true;
}
spin_unlock(&irq->irq_lock);
spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
+
+ if (target_vcpu_needs_kick) {
+ kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
+ kvm_vcpu_kick(target_vcpu);
+ }
+
goto retry;
}
@@ -711,14 +731,6 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
vgic_v3_set_underflow(vcpu);
}
-static inline void vgic_set_npie(struct kvm_vcpu *vcpu)
-{
- if (kvm_vgic_global_state.type == VGIC_V2)
- vgic_v2_set_npie(vcpu);
- else
- vgic_v3_set_npie(vcpu);
-}
-
/* Requires the ap_list_lock to be held. */
static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
bool *multi_sgi)
@@ -732,17 +744,15 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
+ int w;
+
spin_lock(&irq->irq_lock);
/* GICv2 SGIs can count for more than one... */
- if (vgic_irq_is_sgi(irq->intid) && irq->source) {
- int w = hweight8(irq->source);
-
- count += w;
- *multi_sgi |= (w > 1);
- } else {
- count++;
- }
+ w = vgic_irq_get_lr_count(irq);
spin_unlock(&irq->irq_lock);
+
+ count += w;
+ *multi_sgi |= (w > 1);
}
return count;
}
@@ -753,7 +763,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq;
int count;
- bool npie = false;
bool multi_sgi;
u8 prio = 0xff;
@@ -783,10 +792,8 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
if (likely(vgic_target_oracle(irq) == vcpu)) {
vgic_populate_lr(vcpu, irq, count++);
- if (irq->source) {
- npie = true;
+ if (irq->source)
prio = irq->priority;
- }
}
spin_unlock(&irq->irq_lock);
@@ -799,9 +806,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
}
}
- if (npie)
- vgic_set_npie(vcpu);
-
vcpu->arch.vgic_cpu.used_lrs = count;
/* Nuke remaining LRs */
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 830e815748a0..32c25d42c93f 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -110,6 +110,20 @@ static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
}
+static inline int vgic_irq_get_lr_count(struct vgic_irq *irq)
+{
+ /* Account for the active state as an interrupt */
+ if (vgic_irq_is_sgi(irq->intid) && irq->source)
+ return hweight8(irq->source) + irq->active;
+
+ return irq_is_pending(irq) || irq->active;
+}
+
+static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
+{
+ return vgic_irq_get_lr_count(irq) > 1;
+}
+
/*
* This struct provides an intermediate representation of the fields contained
* in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 6e865e8b5b10..90d30fbe95ae 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -397,7 +397,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
* Check if there was an event already pending on the eventfd
* before we registered, and trigger it as if we didn't miss it.
*/
- events = f.file->f_op->poll(f.file, &irqfd->pt);
+ events = vfs_poll(f.file, &irqfd->pt);
if (events & EPOLLIN)
schedule_work(&irqfd->inject);