diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-25 02:11:03 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-25 02:11:03 +0300 |
commit | 8cbd92339db08b19b93d1637e5799ff2a8dddfd2 (patch) | |
tree | 7e62d961f32e8a2a96271029b376f1e8bbd70a7c /drivers/infiniband/hw/mlx4/qp.c | |
parent | 143c7bc6496c886ce5db2a2f9cec580494690170 (diff) | |
parent | 66fb1d5df6ace316a4a6e2c31e13fc123ea2b644 (diff) | |
download | linux-8cbd92339db08b19b93d1637e5799ff2a8dddfd2.tar.xz |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"Quite a small cycle this time, even with the rc8. I suppose everyone
went to sleep over xmas.
- Minor driver updates for hfi1, cxgb4, erdma, hns, irdma, mlx5, siw,
mana
- inline CQE support for hns
- Have mlx5 display device error codes
- Pinned DMABUF support for irdma
- Continued rxe cleanups, particularly converting the MRs to use
xarray
- Improvements to what can be cached in the mlx5 mkey cache"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (61 commits)
IB/mlx5: Extend debug control for CC parameters
IB/hfi1: Fix sdma.h tx->num_descs off-by-one errors
IB/hfi1: Fix math bugs in hfi1_can_pin_pages()
RDMA/irdma: Add support for dmabuf pin memory regions
RDMA/mlx5: Use query_special_contexts for mkeys
net/mlx5e: Use query_special_contexts for mkeys
net/mlx5: Change define name for 0x100 lkey value
net/mlx5: Expose bits for querying special mkeys
RDMA/rxe: Fix missing memory barriers in rxe_queue.h
RDMA/mana_ib: Fix a bug when the PF indicates more entries for registering memory on first packet
RDMA/rxe: Remove rxe_alloc()
RDMA/cma: Distinguish between sockaddr_in and sockaddr_in6 by size
Subject: RDMA/rxe: Handle zero length rdma
iw_cxgb4: Fix potential NULL dereference in c4iw_fill_res_cm_id_entry()
RDMA/mlx5: Use rdma_umem_for_each_dma_block()
RDMA/umem: Remove unused 'work' member from struct ib_umem
RDMA/irdma: Cap MSIX used to online CPUs + 1
RDMA/mlx5: Check reg_create() create for errors
RDMA/restrack: Correct spelling
RDMA/cxgb4: Fix potential null-ptr-deref in pass_establish()
...
Diffstat (limited to 'drivers/infiniband/hw/mlx4/qp.c')
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 121 |
1 files changed, 85 insertions, 36 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index b17d6ebc5b70..884825b2e5f7 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -102,6 +102,14 @@ enum mlx4_ib_source_type { MLX4_IB_RWQ_SRC = 1, }; +struct mlx4_ib_qp_event_work { + struct work_struct work; + struct mlx4_qp *qp; + enum mlx4_event type; +}; + +static struct workqueue_struct *mlx4_ib_qp_event_wq; + static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { if (!mlx4_is_master(dev->dev)) @@ -200,50 +208,77 @@ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) } } +static void mlx4_ib_handle_qp_event(struct work_struct *_work) +{ + struct mlx4_ib_qp_event_work *qpe_work = + container_of(_work, struct mlx4_ib_qp_event_work, work); + struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp; + struct ib_event event = {}; + + event.device = ibqp->device; + event.element.qp = ibqp; + + switch (qpe_work->type) { + case MLX4_EVENT_TYPE_PATH_MIG: + event.event = IB_EVENT_PATH_MIG; + break; + case MLX4_EVENT_TYPE_COMM_EST: + event.event = IB_EVENT_COMM_EST; + break; + case MLX4_EVENT_TYPE_SQ_DRAINED: + event.event = IB_EVENT_SQ_DRAINED; + break; + case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: + event.event = IB_EVENT_QP_LAST_WQE_REACHED; + break; + case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: + event.event = IB_EVENT_QP_FATAL; + break; + case MLX4_EVENT_TYPE_PATH_MIG_FAILED: + event.event = IB_EVENT_PATH_MIG_ERR; + break; + case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + event.event = IB_EVENT_QP_REQ_ERR; + break; + case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: + event.event = IB_EVENT_QP_ACCESS_ERR; + break; + default: + pr_warn("Unexpected event type %d on QP %06x\n", + qpe_work->type, qpe_work->qp->qpn); + goto out; + } + + ibqp->event_handler(&event, ibqp->qp_context); + +out: + mlx4_put_qp(qpe_work->qp); + kfree(qpe_work); +} + static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) { - struct ib_event event; struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; + struct mlx4_ib_qp_event_work *qpe_work; if (type == MLX4_EVENT_TYPE_PATH_MIG) to_mibqp(qp)->port = to_mibqp(qp)->alt_port; - if (ibqp->event_handler) { - event.device = ibqp->device; - event.element.qp = ibqp; - switch (type) { - case MLX4_EVENT_TYPE_PATH_MIG: - event.event = IB_EVENT_PATH_MIG; - break; - case MLX4_EVENT_TYPE_COMM_EST: - event.event = IB_EVENT_COMM_EST; - break; - case MLX4_EVENT_TYPE_SQ_DRAINED: - event.event = IB_EVENT_SQ_DRAINED; - break; - case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: - event.event = IB_EVENT_QP_LAST_WQE_REACHED; - break; - case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: - event.event = IB_EVENT_QP_FATAL; - break; - case MLX4_EVENT_TYPE_PATH_MIG_FAILED: - event.event = IB_EVENT_PATH_MIG_ERR; - break; - case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: - event.event = IB_EVENT_QP_REQ_ERR; - break; - case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: - event.event = IB_EVENT_QP_ACCESS_ERR; - break; - default: - pr_warn("Unexpected event type %d " - "on QP %06x\n", type, qp->qpn); - return; - } + if (!ibqp->event_handler) + goto out_no_handler; - ibqp->event_handler(&event, ibqp->qp_context); - } + qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC); + if (!qpe_work) + goto out_no_handler; + + qpe_work->qp = qp; + qpe_work->type = type; + INIT_WORK(&qpe_work->work, mlx4_ib_handle_qp_event); + queue_work(mlx4_ib_qp_event_wq, &qpe_work->work); + return; + +out_no_handler: + mlx4_put_qp(qp); } static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type) @@ -4468,3 +4503,17 @@ void mlx4_ib_drain_rq(struct ib_qp *qp) handle_drain_completion(cq, &rdrain, dev); } + +int mlx4_ib_qp_event_init(void) +{ + mlx4_ib_qp_event_wq = alloc_ordered_workqueue("mlx4_ib_qp_event_wq", 0); + if (!mlx4_ib_qp_event_wq) + return -ENOMEM; + + return 0; +} + +void mlx4_ib_qp_event_cleanup(void) +{ + destroy_workqueue(mlx4_ib_qp_event_wq); +} |