summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mana/cq.c
diff options
context:
space:
mode:
authorLong Li <longli@microsoft.com>2023-12-16 05:04:15 +0300
committerLeon Romanovsky <leon@kernel.org>2023-12-20 11:29:03 +0300
commitc15d7802a42402a87880a17eee89ff023e49ecc0 (patch)
treeec73327affaebeb3ed2bbd58cab42b43101b14c7 /drivers/infiniband/hw/mana/cq.c
parent2c20e20b22d9fc64072e3445ae3ca244cbd523a2 (diff)
downloadlinux-c15d7802a42402a87880a17eee89ff023e49ecc0.tar.xz
RDMA/mana_ib: Add CQ interrupt support for RAW QP
At probing time, the MANA core code allocates EQs for supporting interrupts on Ethernet queues. The same interrupt mechanisum is used by RAW QP. Use the same EQs for delivering interrupts on the CQ for the RAW QP. Signed-off-by: Long Li <longli@microsoft.com> Link: https://lore.kernel.org/r/1702692255-23640-4-git-send-email-longli@linuxonhyperv.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband/hw/mana/cq.c')
-rw-r--r--drivers/infiniband/hw/mana/cq.c32
1 files changed, 31 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 09a2c263e39b..83ebd070535a 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -12,13 +12,20 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev;
+ struct gdma_context *gc;
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ gc = mdev->gdma_dev->gdma_context;
if (udata->inlen < sizeof(ucmd))
return -EINVAL;
+ if (attr->comp_vector > gc->max_num_queues)
+ return -EINVAL;
+
+ cq->comp_vector = attr->comp_vector;
+
err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
if (err) {
ibdev_dbg(ibdev,
@@ -56,6 +63,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
/*
* The CQ ID is not known at this time. The ID is generated at create_qp
*/
+ cq->id = INVALID_QUEUE_ID;
return 0;
@@ -69,11 +77,33 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
struct ib_device *ibdev = ibcq->device;
struct mana_ib_dev *mdev;
+ struct gdma_context *gc;
+ int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ gc = mdev->gdma_dev->gdma_context;
+
+ err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
+ if (err) {
+ ibdev_dbg(ibdev,
+ "Failed to destroy dma region, %d\n", err);
+ return err;
+ }
+
+ if (cq->id != INVALID_QUEUE_ID) {
+ kfree(gc->cq_table[cq->id]);
+ gc->cq_table[cq->id] = NULL;
+ }
- mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
ib_umem_release(cq->umem);
return 0;
}
+
+void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
+{
+ struct mana_ib_cq *cq = ctx;
+
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}