summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorWenpeng Liang <liangwenpeng@huawei.com>2021-06-18 13:10:18 +0300
committerJason Gunthorpe <jgg@nvidia.com>2021-06-22 21:17:07 +0300
commitc462a0242bd938967c9a69c41364f80e188c1a7a (patch)
treeb20db4ba16c5c7e1460093c70f6eaf25ef70b519 /drivers/infiniband
parenta33958ca5204f8d2342fd8fe9f547e33fa6c07ed (diff)
downloadlinux-c462a0242bd938967c9a69c41364f80e188c1a7a.tar.xz
RDMA/hns: Encapsulate flushing CQE as a function
The process of flushing CQE can be encapsultated into a function, which can reduce duplicate code. Link: https://lore.kernel.org/r/1624011020-16992-9-git-send-email-liweihang@huawei.com Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com> Signed-off-by: Weihang Li <liweihang@huawei.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c36
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c19
3 files changed, 21 insertions, 35 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index cb01b25bc93e..a2a256f7f87e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -1252,6 +1252,7 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
+void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 6bcdd89dcba8..f6ace9049a95 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -624,18 +624,8 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
static inline void update_sq_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *qp)
{
- /*
- * Hip08 hardware cannot flush the WQEs in SQ if the QP state
- * gets into errored mode. Hence, as a workaround to this
- * hardware limitation, driver needs to assist in flushing. But
- * the flushing operation uses mailbox to convey the QP state to
- * the hardware and which can sleep due to the mutex protection
- * around the mailbox calls. Hence, use the deferred flush for
- * now.
- */
if (unlikely(qp->state == IB_QPS_ERR)) {
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
- init_flush_work(hr_dev, qp);
+ flush_cqe(hr_dev, qp);
} else {
struct hns_roce_v2_db sq_db = {};
@@ -651,18 +641,8 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
static inline void update_rq_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *qp)
{
- /*
- * Hip08 hardware cannot flush the WQEs in RQ if the QP state
- * gets into errored mode. Hence, as a workaround to this
- * hardware limitation, driver needs to assist in flushing. But
- * the flushing operation uses mailbox to convey the QP state to
- * the hardware and which can sleep due to the mutex protection
- * around the mailbox calls. Hence, use the deferred flush for
- * now.
- */
if (unlikely(qp->state == IB_QPS_ERR)) {
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
- init_flush_work(hr_dev, qp);
+ flush_cqe(hr_dev, qp);
} else {
if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
*qp->rdb.db_record =
@@ -3553,17 +3533,7 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
return;
- /*
- * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
- * into errored mode. Hence, as a workaround to this hardware
- * limitation, driver needs to assist in flushing. But the flushing
- * operation uses mailbox to convey the QP state to the hardware and
- * which can sleep due to the mutex protection around the mailbox calls.
- * Hence, use the deferred flush for now. Once wc error detected, the
- * flushing operation is needed.
- */
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
- init_flush_work(hr_dev, qp);
+ flush_cqe(hr_dev, qp);
}
static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 090b1433ae82..b101b7e578f2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -79,6 +79,21 @@ void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
queue_work(hr_dev->irq_workq, &flush_work->work);
}
+void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp)
+{
+ /*
+ * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state
+ * gets into errored mode. Hence, as a workaround to this
+ * hardware limitation, driver needs to assist in flushing. But
+ * the flushing operation uses mailbox to convey the QP state to
+ * the hardware and which can sleep due to the mutex protection
+ * around the mailbox calls. Hence, use the deferred flush for
+ * now.
+ */
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+ init_flush_work(dev, qp);
+}
+
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{
struct device *dev = hr_dev->dev;
@@ -102,8 +117,8 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
qp->state = IB_QPS_ERR;
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
- init_flush_work(hr_dev, qp);
+
+ flush_cqe(hr_dev, qp);
}
qp->event(qp, (enum hns_roce_event)event_type);