summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c61
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h6
5 files changed, 74 insertions, 8 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 9fd9849ebdd1..9dca451ed522 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -41,6 +41,7 @@
#define __BNXT_RE_H__
#include <rdma/uverbs_ioctl.h>
#include "hw_counters.h"
+#include <linux/hashtable.h>
#define ROCE_DRV_MODULE_NAME "bnxt_re"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
@@ -135,6 +136,7 @@ struct bnxt_re_pacing {
#define BNXT_RE_DB_FIFO_ROOM_SHIFT 15
#define BNXT_RE_GRC_FIFO_REG_BASE 0x2000
+#define MAX_CQ_HASH_BITS (16)
struct bnxt_re_dev {
struct ib_device ibdev;
struct list_head list;
@@ -189,6 +191,7 @@ struct bnxt_re_dev {
struct bnxt_re_pacing pacing;
struct work_struct dbq_fifo_check_work;
struct delayed_work dbq_pacing_work;
+ DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS);
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 758ea02e1d13..7213dc7574d0 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -50,6 +50,7 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_cache.h>
#include <rdma/uverbs_ioctl.h>
+#include <linux/hashtable.h>
#include "bnxt_ulp.h"
@@ -2910,14 +2911,20 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
/* Completion Queues */
int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
- struct bnxt_re_cq *cq;
+ struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_nq *nq;
struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
rdev = cq->rdev;
nq = cq->qplib_cq.nq;
+ cctx = rdev->chip_ctx;
+ if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
+ free_page((unsigned long)cq->uctx_cq_page);
+ hash_del(&cq->hash_entry);
+ }
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
ib_umem_release(cq->umem);
@@ -2935,10 +2942,11 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct bnxt_re_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- int rc, entries;
- int cqe = attr->cqe;
+ struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_nq *nq = NULL;
+ int rc = -ENOMEM, entries;
unsigned int nq_alloc_cnt;
+ int cqe = attr->cqe;
u32 active_cqs;
if (attr->flags)
@@ -2951,6 +2959,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
cq->rdev = rdev;
+ cctx = rdev->chip_ctx;
cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
entries = bnxt_re_init_depth(cqe + 1, uctx);
@@ -3012,22 +3021,32 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
spin_lock_init(&cq->cq_lock);
if (udata) {
- struct bnxt_re_cq_resp resp;
-
+ struct bnxt_re_cq_resp resp = {};
+
+ if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
+ hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
+ /* Allocate a page */
+ cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cq->uctx_cq_page)
+ goto c2fail;
+ resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
+ }
resp.cqid = cq->qplib_cq.id;
resp.tail = cq->qplib_cq.hwq.cons;
resp.phase = cq->qplib_cq.period;
resp.rsvd = 0;
- rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
- goto c2fail;
+ goto free_mem;
}
}
return 0;
+free_mem:
+ free_page((unsigned long)cq->uctx_cq_page);
c2fail:
ib_umem_release(cq->umem);
fail:
@@ -4214,6 +4233,19 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
}
}
+static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
+{
+ struct bnxt_re_cq *cq = NULL, *tmp_cq;
+
+ hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) {
+ if (tmp_cq->qplib_cq.id == cq_id) {
+ cq = tmp_cq;
+ break;
+ }
+ }
+ return cq;
+}
+
/* Helper function to mmap the virtual memory from user app */
int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
{
@@ -4442,10 +4474,12 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund
struct bnxt_re_ucontext *uctx;
struct ib_ucontext *ib_uctx;
struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
u64 mem_offset;
u64 addr = 0;
u32 length;
u32 offset;
+ u32 cq_id;
int err;
ib_uctx = ib_uverbs_get_ucontext(attrs);
@@ -4461,6 +4495,19 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund
switch (res_type) {
case BNXT_RE_CQ_TOGGLE_MEM:
+ err = uverbs_copy_from(&cq_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
+ if (err)
+ return err;
+
+ cq = bnxt_re_search_for_cq(rdev, cq_id);
+ if (!cq)
+ return -EINVAL;
+
+ length = PAGE_SIZE;
+ addr = (u64)cq->uctx_cq_page;
+ mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
+ offset = 0;
+ break;
case BNXT_RE_SRQ_TOGGLE_MEM:
break;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index da3fe018f255..b267d6d5975f 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -108,6 +108,8 @@ struct bnxt_re_cq {
struct ib_umem *umem;
struct ib_umem *resize_umem;
int resize_cqe;
+ void *uctx_cq_page;
+ struct hlist_node hash_entry;
};
struct bnxt_re_mr {
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 7f4f6db1392c..eb03ebad2e5a 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -54,6 +54,7 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_addr.h>
+#include <linux/hashtable.h>
#include "bnxt_ulp.h"
#include "roce_hsi.h"
@@ -136,6 +137,8 @@ static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
if (bnxt_re_hwrm_qcaps(rdev))
dev_err(rdev_to_dev(rdev),
"Failed to query hwrm qcaps\n");
+ if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx))
+ cctx->modes.toggle_bits |= BNXT_QPLIB_CQ_TOGGLE_BIT;
}
static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
@@ -1206,9 +1209,13 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
{
struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
qplib_cq);
+ u32 *cq_ptr;
if (cq->ib_cq.comp_handler) {
- /* Lock comp_handler? */
+ if (cq->uctx_cq_page) {
+ cq_ptr = (u32 *)cq->uctx_cq_page;
+ *cq_ptr = cq->qplib_cq.toggle;
+ }
(*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
}
@@ -1730,6 +1737,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
*/
bnxt_re_vf_res_config(rdev);
}
+ hash_init(rdev->cq_hash);
return 0;
free_sctx:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 382d89fa7d16..61628f7f1253 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -55,6 +55,12 @@ struct bnxt_qplib_drv_modes {
u8 wqe_mode;
bool db_push;
bool dbr_pacing;
+ u32 toggle_bits;
+};
+
+enum bnxt_re_toggle_modes {
+ BNXT_QPLIB_CQ_TOGGLE_BIT = 0x1,
+ BNXT_QPLIB_SRQ_TOGGLE_BIT = 0x2,
};
struct bnxt_qplib_chip_ctx {