summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/irdma/uk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/irdma/uk.c')
-rw-r--r--drivers/infiniband/hw/irdma/uk.c122
1 files changed, 57 insertions, 65 deletions
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index 57a9444e9ea7..daeab5daed5b 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h"
-#include "status.h"
#include "defs.h"
#include "user.h"
#include "irdma.h"
@@ -56,7 +55,7 @@ static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
* irdma_nop_1 - insert a NOP wqe
* @qp: hw qp ptr
*/
-static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp)
+static int irdma_nop_1(struct irdma_qp_uk *qp)
{
u64 hdr;
__le64 *wqe;
@@ -64,7 +63,7 @@ static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp)
bool signaled = false;
if (!qp->sq_ring.head)
- return IRDMA_ERR_PARAM;
+ return -EINVAL;
wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
wqe = qp->sq_base[wqe_idx].elem;
@@ -245,7 +244,7 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
{
__le64 *wqe;
- enum irdma_status_code ret_code;
+ int ret_code;
if (IRDMA_RING_FULL_ERR(qp->rq_ring))
return NULL;
@@ -268,16 +267,15 @@ __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq)
+int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
{
u64 hdr;
__le64 *wqe;
struct irdma_rdma_write *op_info;
u32 i, wqe_idx;
u32 total_size = 0, byte_off;
- enum irdma_status_code ret_code;
+ int ret_code;
u32 frag_cnt, addl_frag_cnt;
bool read_fence = false;
u16 quanta;
@@ -286,7 +284,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
op_info = &info->op.rdma_write;
if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
for (i = 0; i < op_info->num_lo_sges; i++)
total_size += op_info->lo_sg_list[i].length;
@@ -305,7 +303,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -370,12 +368,11 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
* @inv_stag: flag for inv_stag
* @post_sq: flag to post sq
*/
-enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool inv_stag, bool post_sq)
+int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool inv_stag, bool post_sq)
{
struct irdma_rdma_read *op_info;
- enum irdma_status_code ret_code;
+ int ret_code;
u32 i, byte_off, total_size = 0;
bool local_fence = false;
u32 addl_frag_cnt;
@@ -388,7 +385,7 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
op_info = &info->op.rdma_read;
if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
for (i = 0; i < op_info->num_lo_sges; i++)
total_size += op_info->lo_sg_list[i].length;
@@ -400,7 +397,7 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -457,15 +454,14 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq)
+int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
{
__le64 *wqe;
struct irdma_post_send *op_info;
u64 hdr;
u32 i, wqe_idx, total_size = 0, byte_off;
- enum irdma_status_code ret_code;
+ int ret_code;
u32 frag_cnt, addl_frag_cnt;
bool read_fence = false;
u16 quanta;
@@ -474,7 +470,7 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
op_info = &info->op.send;
if (qp->max_sq_frag_cnt < op_info->num_sges)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
for (i = 0; i < op_info->num_sges; i++)
total_size += op_info->sg_list[i].length;
@@ -490,7 +486,7 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -678,9 +674,8 @@ static u16 irdma_inline_data_size_to_quanta(u32 data_size)
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code
-irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
- bool post_sq)
+int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
{
__le64 *wqe;
struct irdma_inline_rdma_write *op_info;
@@ -693,13 +688,13 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *in
op_info = &info->op.inline_rdma_write;
if (op_info->len > qp->max_inline_data)
- return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
+ return -EINVAL;
quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -745,9 +740,8 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *in
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq)
+int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
{
__le64 *wqe;
struct irdma_post_inline_send *op_info;
@@ -760,13 +754,13 @@ enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
op_info = &info->op.inline_send;
if (op_info->len > qp->max_inline_data)
- return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
+ return -EINVAL;
quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -817,9 +811,9 @@ enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
* @info: post sq information
* @post_sq: flag to post sq
*/
-enum irdma_status_code
-irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info, bool post_sq)
+int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq)
{
__le64 *wqe;
struct irdma_inv_local_stag *op_info;
@@ -835,7 +829,7 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
0, info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -871,8 +865,8 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
* @qp: hw qp ptr
* @info: post rq information
*/
-enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
- struct irdma_post_rq_info *info)
+int irdma_uk_post_receive(struct irdma_qp_uk *qp,
+ struct irdma_post_rq_info *info)
{
u32 wqe_idx, i, byte_off;
u32 addl_frag_cnt;
@@ -880,11 +874,11 @@ enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
u64 hdr;
if (qp->max_rq_frag_cnt < info->num_sges)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
qp->rq_wrid_array[wqe_idx] = info->wr_id;
addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
@@ -1000,15 +994,15 @@ void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
* @cq: hw cq
* @info: cq poll information returned
*/
-enum irdma_status_code
-irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
+int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ struct irdma_cq_poll_info *info)
{
u64 comp_ctx, qword0, qword2, qword3;
__le64 *cqe;
struct irdma_qp_uk *qp;
struct irdma_ring *pring = NULL;
u32 wqe_idx, q_type;
- enum irdma_status_code ret_code;
+ int ret_code;
bool move_cq_head = true;
u8 polarity;
bool ext_valid;
@@ -1022,7 +1016,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
get_64bit_val(cqe, 24, &qword3);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
if (polarity != cq->polarity)
- return IRDMA_ERR_Q_EMPTY;
+ return -ENOENT;
/* Ensure CQE contents are read after valid bit is checked */
dma_rmb();
@@ -1045,7 +1039,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
polarity ^= 1;
}
if (polarity != cq->polarity)
- return IRDMA_ERR_Q_EMPTY;
+ return -ENOENT;
/* Ensure ext CQE contents are read after ext valid bit is checked */
dma_rmb();
@@ -1112,7 +1106,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
if (!qp || qp->destroy_pending) {
- ret_code = IRDMA_ERR_Q_DESTROYED;
+ ret_code = -EFAULT;
goto exit;
}
wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
@@ -1126,7 +1120,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
- ret_code = IRDMA_ERR_Q_EMPTY;
+ ret_code = -ENOENT;
goto exit;
}
@@ -1186,7 +1180,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
} else {
if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
- ret_code = IRDMA_ERR_Q_EMPTY;
+ ret_code = -ENOENT;
goto exit;
}
@@ -1303,15 +1297,15 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
* @sqdepth: depth of SQ
*
*/
-enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
- u32 sq_size, u8 shift, u32 *sqdepth)
+int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
+ u32 *sqdepth)
{
*sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
*sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
return 0;
}
@@ -1323,15 +1317,15 @@ enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
* @shift: shift which determines size of WQE
* @rqdepth: depth of RQ
*/
-enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
- u32 rq_size, u8 shift, u32 *rqdepth)
+int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
+ u32 *rqdepth)
{
*rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
*rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
return 0;
}
@@ -1381,17 +1375,16 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
* allowed. Then size of wqe * the number of wqes should be the
* amount of memory allocated for sq and rq.
*/
-enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
- struct irdma_qp_uk_init_info *info)
+int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
{
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u32 sq_ring_size;
u8 sqshift, rqshift;
qp->uk_attrs = info->uk_attrs;
if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
@@ -1502,8 +1495,7 @@ void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
* @signaled: signaled for completion
* @post_sq: ring doorbell
*/
-enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
- bool signaled, bool post_sq)
+int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
{
__le64 *wqe;
u64 hdr;
@@ -1515,7 +1507,7 @@ enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
0, &info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(qp, wqe_idx);
@@ -1541,7 +1533,7 @@ enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
* @frag_cnt: number of fragments
* @quanta: quanta for frag_cnt
*/
-enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
+int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
{
switch (frag_cnt) {
case 0:
@@ -1577,7 +1569,7 @@ enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
*quanta = 8;
break;
default:
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
}
return 0;
@@ -1588,7 +1580,7 @@ enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
* @frag_cnt: number of fragments
* @wqe_size: size in bytes given frag_cnt
*/
-enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
+int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
{
switch (frag_cnt) {
case 0:
@@ -1615,7 +1607,7 @@ enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
*wqe_size = 256;
break;
default:
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
}
return 0;