summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/bnxt_re/ib_verbs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re/ib_verbs.c')
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c303
1 files changed, 268 insertions, 35 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index e86afecfbe46..abef0b8baa7c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -61,6 +61,15 @@
#include "bnxt_re.h"
#include "ib_verbs.h"
+
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_std_types.h>
+
+#include <rdma/ib_user_ioctl_cmds.h>
+
+#define UVERBS_MODULE_NAME bnxt_re
+#include <rdma/uverbs_named_ioctl.h>
+
#include <rdma/bnxt_re-abi.h>
static int __from_ib_access_flags(int iflags)
@@ -199,6 +208,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ int rc;
memset(port_attr, 0, sizeof(*port_attr));
@@ -228,10 +238,10 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
port_attr->sm_sl = 0;
port_attr->subnet_timeout = 0;
port_attr->init_type_reply = 0;
- port_attr->active_speed = rdev->active_speed;
- port_attr->active_width = rdev->active_width;
+ rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed,
+ &port_attr->active_width);
- return 0;
+ return rc;
}
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
@@ -533,12 +543,57 @@ fail:
return rc;
}
+static struct bnxt_re_user_mmap_entry*
+bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
+ enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
+{
+ struct bnxt_re_user_mmap_entry *entry;
+ int ret;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ entry->mem_offset = mem_offset;
+ entry->mmap_flag = mmap_flag;
+ entry->uctx = uctx;
+
+ switch (mmap_flag) {
+ case BNXT_RE_MMAP_SH_PAGE:
+ ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
+ &entry->rdma_entry, PAGE_SIZE, 0);
+ break;
+ case BNXT_RE_MMAP_UC_DB:
+ case BNXT_RE_MMAP_WC_DB:
+ ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
+ &entry->rdma_entry, PAGE_SIZE);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ kfree(entry);
+ return NULL;
+ }
+ if (offset)
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return entry;
+}
+
/* Protection Domains */
int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
+ if (udata) {
+ rdma_user_mmap_entry_remove(pd->pd_db_mmap);
+ pd->pd_db_mmap = NULL;
+ }
+
bnxt_re_destroy_fence_mr(pd);
if (pd->qplib_pd.id) {
@@ -557,7 +612,8 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
udata, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
- int rc;
+ struct bnxt_re_user_mmap_entry *entry = NULL;
+ int rc = 0;
pd->rdev = rdev;
if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
@@ -567,15 +623,15 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
}
if (udata) {
- struct bnxt_re_pd_resp resp;
+ struct bnxt_re_pd_resp resp = {};
if (!ucntx->dpi.dbr) {
/* Allocate DPI in alloc_pd to avoid failing of
* ibv_devinfo and family of application when DPIs
* are depleted.
*/
- if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
- &ucntx->dpi, ucntx)) {
+ if (bnxt_qplib_alloc_dpi(&rdev->qplib_res,
+ &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) {
rc = -ENOMEM;
goto dbfail;
}
@@ -584,12 +640,21 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
resp.pdid = pd->qplib_pd.id;
/* Still allow mapping this DBR to the new user PD. */
resp.dpi = ucntx->dpi.dpi;
- resp.dbr = (u64)ucntx->dpi.umdbr;
- rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
+ BNXT_RE_MMAP_UC_DB, &resp.dbr);
+
+ if (!entry) {
+ rc = -ENOMEM;
+ goto dbfail;
+ }
+
+ pd->pd_db_mmap = &entry->rdma_entry;
+
+ rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
if (rc) {
- ibdev_err(&rdev->ibdev,
- "Failed to copy user response\n");
+ rdma_user_mmap_entry_remove(pd->pd_db_mmap);
+ rc = -EFAULT;
goto dbfail;
}
}
@@ -613,12 +678,20 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
{
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
struct bnxt_re_dev *rdev = ah->rdev;
+ bool block = true;
+ int rc = 0;
- bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
- !(flags & RDMA_DESTROY_AH_SLEEPABLE));
+ block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
+ rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
+ if (BNXT_RE_CHECK_RC(rc)) {
+ if (rc == -ETIMEDOUT)
+ rc = 0;
+ else
+ goto fail;
+ }
atomic_dec(&rdev->ah_count);
-
- return 0;
+fail:
+ return rc;
}
static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
@@ -3341,9 +3414,7 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
/* post data received in the send queue */
- rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
-
- return 0;
+ return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
}
static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
@@ -3956,6 +4027,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_re_user_mmap_entry *entry;
struct bnxt_re_uctx_resp resp = {};
u32 chip_met_rev_num = 0;
int rc;
@@ -3994,6 +4066,16 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
resp.mode = rdev->chip_ctx->modes.wqe_mode;
+ if (rdev->chip_ctx->modes.db_push)
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
+
+ entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto cfail;
+ }
+ uctx->shpage_mmap = &entry->rdma_entry;
+
rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
if (rc) {
ibdev_err(ibdev, "Failed to copy user context");
@@ -4017,6 +4099,8 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
struct bnxt_re_dev *rdev = uctx->rdev;
+ rdma_user_mmap_entry_remove(uctx->shpage_mmap);
+ uctx->shpage_mmap = NULL;
if (uctx->shpg)
free_page((unsigned long)uctx->shpg);
@@ -4024,8 +4108,7 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
/* Free DPI only if this is the first PD allocated by the
* application and mark the context dpi as NULL
*/
- bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
- &rdev->qplib_res.dpi_tbl, &uctx->dpi);
+ bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi);
uctx->dpi.dbr = NULL;
}
}
@@ -4036,27 +4119,177 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
struct bnxt_re_ucontext,
ib_uctx);
- struct bnxt_re_dev *rdev = uctx->rdev;
+ struct bnxt_re_user_mmap_entry *bnxt_entry;
+ struct rdma_user_mmap_entry *rdma_entry;
+ int ret = 0;
u64 pfn;
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
+ if (!rdma_entry)
return -EINVAL;
- if (vma->vm_pgoff) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- PAGE_SIZE, vma->vm_page_prot)) {
- ibdev_err(&rdev->ibdev, "Failed to map DPI");
- return -EAGAIN;
- }
- } else {
- pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
- if (remap_pfn_range(vma, vma->vm_start,
- pfn, PAGE_SIZE, vma->vm_page_prot)) {
- ibdev_err(&rdev->ibdev, "Failed to map shared page");
- return -EAGAIN;
+ bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
+ rdma_entry);
+
+ switch (bnxt_entry->mmap_flag) {
+ case BNXT_RE_MMAP_WC_DB:
+ pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
+ ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case BNXT_RE_MMAP_UC_DB:
+ pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
+ ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case BNXT_RE_MMAP_SH_PAGE:
+ ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return ret;
+}
+
+void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct bnxt_re_user_mmap_entry *bnxt_entry;
+
+ bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
+ rdma_entry);
+
+ kfree(bnxt_entry);
+}
+
+static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
+ enum bnxt_re_alloc_page_type alloc_type;
+ struct bnxt_re_user_mmap_entry *entry;
+ enum bnxt_re_mmap_flag mmap_flag;
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_re_ucontext *uctx;
+ struct bnxt_re_dev *rdev;
+ u64 mmap_offset;
+ u32 length;
+ u32 dpi;
+ u64 dbr;
+ int err;
+
+ uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
+ if (IS_ERR(uctx))
+ return PTR_ERR(uctx);
+
+ err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE);
+ if (err)
+ return err;
+
+ rdev = uctx->rdev;
+ cctx = rdev->chip_ctx;
+
+ switch (alloc_type) {
+ case BNXT_RE_ALLOC_WC_PAGE:
+ if (cctx->modes.db_push) {
+ if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi,
+ uctx, BNXT_QPLIB_DPI_TYPE_WC))
+ return -ENOMEM;
+ length = PAGE_SIZE;
+ dpi = uctx->wcdpi.dpi;
+ dbr = (u64)uctx->wcdpi.umdbr;
+ mmap_flag = BNXT_RE_MMAP_WC_DB;
+ } else {
+ return -EINVAL;
}
+
+ break;
+
+ default:
+ return -EOPNOTSUPP;
}
+ entry = bnxt_re_mmap_entry_insert(uctx, dbr, mmap_flag, &mmap_offset);
+ if (!entry)
+ return -ENOMEM;
+
+ uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
+ err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
+ &mmap_offset, sizeof(mmap_offset));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
+ &length, sizeof(length));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
+ &dpi, sizeof(length));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct bnxt_re_user_mmap_entry *entry = uobject->object;
+ struct bnxt_re_ucontext *uctx = entry->uctx;
+
+ switch (entry->mmap_flag) {
+ case BNXT_RE_MMAP_WC_DB:
+ if (uctx && uctx->wcdpi.dbr) {
+ struct bnxt_re_dev *rdev = uctx->rdev;
+
+ bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi);
+ uctx->wcdpi.dbr = NULL;
+ }
+ break;
+ default:
+ goto exit;
+ }
+ rdma_user_mmap_entry_remove(&entry->rdma_entry);
+exit:
return 0;
}
+
+DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE,
+ UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE,
+ BNXT_RE_OBJECT_ALLOC_PAGE,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE,
+ enum bnxt_re_alloc_page_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE,
+ UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE,
+ BNXT_RE_OBJECT_ALLOC_PAGE,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
+ UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup),
+ &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
+ &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
+
+const struct uapi_definition bnxt_re_uapi_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
+ {}
+};