summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorOliver Upton <oliver.upton@linux.dev>2024-04-22 23:01:45 +0300
committerMarc Zyngier <maz@kernel.org>2024-04-25 15:19:55 +0300
commit30a0ce9c4928640efd3112d01d432d0778878b7e (patch)
treea99b792ba387dbdf7d850d8f2a227b44653dc6c7 /arch
parent85d3ccc8b75bb5a443edb3c42fa22e97da2e60ec (diff)
downloadlinux-30a0ce9c4928640efd3112d01d432d0778878b7e.tar.xz
KVM: arm64: vgic-its: Get rid of vgic_copy_lpi_list()
The last user has been transitioned to walking the LPI xarray directly. Cut the wart off, and get rid of the now unneeded lpi_count while doing so. Signed-off-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20240422200158.2606761-7-oliver.upton@linux.dev Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c48
-rw-r--r--arch/arm64/kvm/vgic/vgic.c1
-rw-r--r--arch/arm64/kvm/vgic/vgic.h1
3 files changed, 0 insertions, 50 deletions
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 5025ac968d27..441134ad674e 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -86,11 +86,8 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
if (ret) {
xa_release(&dist->lpi_xa, intid);
kfree(irq);
- goto out_unlock;
}
- atomic_inc(&dist->lpi_count);
-
out_unlock:
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
@@ -316,51 +313,6 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
return 0;
}
-/*
- * Create a snapshot of the current LPIs targeting @vcpu, so that we can
- * enumerate those LPIs without holding any lock.
- * Returns their number and puts the kmalloc'ed array into intid_ptr.
- */
-int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- XA_STATE(xas, &dist->lpi_xa, GIC_LPI_OFFSET);
- struct vgic_irq *irq;
- unsigned long flags;
- u32 *intids;
- int irq_count, i = 0;
-
- /*
- * There is an obvious race between allocating the array and LPIs
- * being mapped/unmapped. If we ended up here as a result of a
- * command, we're safe (locks are held, preventing another
- * command). If coming from another path (such as enabling LPIs),
- * we must be careful not to overrun the array.
- */
- irq_count = atomic_read(&dist->lpi_count);
- intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL_ACCOUNT);
- if (!intids)
- return -ENOMEM;
-
- raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
- rcu_read_lock();
-
- xas_for_each(&xas, irq, VGIC_LPI_MAX_INTID) {
- if (i == irq_count)
- break;
- /* We don't need to "get" the IRQ, as we hold the list lock. */
- if (vcpu && irq->target_vcpu != vcpu)
- continue;
- intids[i++] = irq->intid;
- }
-
- rcu_read_unlock();
- raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
-
- *intid_ptr = intids;
- return i;
-}
-
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
{
int ret = 0;
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 4ec93587c8cd..e3ee1bc1214a 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -126,7 +126,6 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
__xa_erase(&dist->lpi_xa, irq->intid);
xa_unlock_irqrestore(&dist->lpi_xa, flags);
- atomic_dec(&dist->lpi_count);
kfree_rcu(irq, rcu);
}
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index e0c77e1bd9f6..060dfd96b41f 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -331,7 +331,6 @@ static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size)
}
bool vgic_lpis_enabled(struct kvm_vcpu *vcpu);
-int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr);
int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
u32 devid, u32 eventid, struct vgic_irq **irq);
struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);