summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
diff options
context:
space:
mode:
authorLuoyouming <luoyouming@huawei.com>2022-12-24 13:21:59 +0300
committerJason Gunthorpe <jgg@nvidia.com>2023-01-09 17:45:27 +0300
commit968606e252e3f4c06f1ac63f8f6527c8374c5eb6 (patch)
treec7cd74b1486e5df9881c8a66f321b6644d16ba7f /drivers/infiniband/hw/hns/hns_roce_hw_v2.c
parentbd99ede8ef2dc03e29a181b755ba4f78da2644e6 (diff)
downloadlinux-968606e252e3f4c06f1ac63f8f6527c8374c5eb6.tar.xz
RDMA/hns: Remove rq inline in kernel
The roce driver kernel space will no longer provide support for the rq inline feature. This patch deletes the code related to the rq inline feature in the kernel space. Link: https://lore.kernel.org/r/20221224102201.3114536-2-xuhaoyue1@hisilicon.com Signed-off-by: Luoyouming <luoyouming@huawei.com> Signed-off-by: Haoyue Xu <xuhaoyue1@hisilicon.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_hw_v2.c')
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c67
1 files changed, 0 insertions, 67 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index b2421883993b..c0c57b9ba452 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -821,22 +821,10 @@ static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
u32 wqe_idx, u32 max_sge)
{
- struct hns_roce_rinl_sge *sge_list;
void *wqe = NULL;
- u32 i;
wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
-
- /* rq support inline data */
- if (hr_qp->rq_inl_buf.wqe_cnt) {
- sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
- hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
- for (i = 0; i < wr->num_sge; i++) {
- sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
- sge_list[i].len = wr->sg_list[i].length;
- }
- }
}
static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
@@ -3730,39 +3718,6 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
return 0;
}
-static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
- struct hns_roce_qp *qp,
- struct ib_wc *wc)
-{
- struct hns_roce_rinl_sge *sge_list;
- u32 wr_num, wr_cnt, sge_num;
- u32 sge_cnt, data_len, size;
- void *wqe_buf;
-
- wr_num = hr_reg_read(cqe, CQE_WQE_IDX);
- wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
-
- sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
- sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
- wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
- data_len = wc->byte_len;
-
- for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
- size = min(sge_list[sge_cnt].len, data_len);
- memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
-
- data_len -= size;
- wqe_buf += size;
- }
-
- if (unlikely(data_len)) {
- wc->status = IB_WC_LOC_LEN_ERR;
- return -EAGAIN;
- }
-
- return 0;
-}
-
static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
int num_entries, struct ib_wc *wc)
{
@@ -3974,22 +3929,10 @@ static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
wc->opcode = ib_opcode;
}
-static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
- struct hns_roce_v2_cqe *cqe)
-{
- return wc->qp->qp_type != IB_QPT_UD && wc->qp->qp_type != IB_QPT_GSI &&
- (hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
- hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
- hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
- hr_reg_read(cqe, CQE_RQ_INLINE);
-}
-
static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
{
- struct hns_roce_qp *qp = to_hr_qp(wc->qp);
u32 hr_opcode;
int ib_opcode;
- int ret;
wc->byte_len = le32_to_cpu(cqe->byte_cnt);
@@ -4014,12 +3957,6 @@ static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
else
wc->opcode = ib_opcode;
- if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
- ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
- if (unlikely(ret))
- return ret;
- }
-
wc->sl = hr_reg_read(cqe, CQE_SL);
wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN);
wc->slid = 0;
@@ -4445,10 +4382,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
upper_32_bits(hr_qp->rdb.dma));
- if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
- hr_reg_write_bool(context, QPC_RQIE,
- hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE);
-
hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
if (ibqp->srq) {