summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe/rxe_mw.c
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2021-06-08 07:25:50 +0300
committerJason Gunthorpe <jgg@nvidia.com>2021-06-17 02:51:18 +0300
commit32a577b4c3a9d0b5d3e47ac47ffd50774a04f82a (patch)
tree4b5aa5d3879888514c4c1157d5653d5ef7a6137c /drivers/infiniband/sw/rxe/rxe_mw.c
parentc1a411268a4b17ecdf271f0cefed53e10e9fa166 (diff)
downloadlinux-32a577b4c3a9d0b5d3e47ac47ffd50774a04f82a.tar.xz
RDMA/rxe: Add support for bind MW work requests
Add support for bind MW work requests from user space. Since rdma/core does not support bind mw in ib_send_wr there is no way to support bind mw in kernel space. Added bind_mw local operation in rxe_req.c. Added bind_mw WR operation in rxe_opcode.c. Added bind_mw WC in rxe_comp.c. Added additional fields to rxe_mw in rxe_verbs.h. Added rxe_do_dealloc_mw() subroutine to cleanup an mw when rxe_dealloc_mw is called. Added code to implement bind_mw operation in rxe_mw.c Link: https://lore.kernel.org/r/20210608042552.33275-8-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_mw.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c202
1 files changed, 201 insertions, 1 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 69128e298d44..65215dde9974 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -29,6 +29,29 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
return 0;
}
+static void rxe_do_dealloc_mw(struct rxe_mw *mw)
+{
+ if (mw->mr) {
+ struct rxe_mr *mr = mw->mr;
+
+ mw->mr = NULL;
+ atomic_dec(&mr->num_mw);
+ rxe_drop_ref(mr);
+ }
+
+ if (mw->qp) {
+ struct rxe_qp *qp = mw->qp;
+
+ mw->qp = NULL;
+ rxe_drop_ref(qp);
+ }
+
+ mw->access = 0;
+ mw->addr = 0;
+ mw->length = 0;
+ mw->state = RXE_MW_STATE_INVALID;
+}
+
int rxe_dealloc_mw(struct ib_mw *ibmw)
{
struct rxe_mw *mw = to_rmw(ibmw);
@@ -36,7 +59,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
unsigned long flags;
spin_lock_irqsave(&mw->lock, flags);
- mw->state = RXE_MW_STATE_INVALID;
+ rxe_do_dealloc_mw(mw);
spin_unlock_irqrestore(&mw->lock, flags);
rxe_drop_ref(mw);
@@ -45,6 +68,183 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
return 0;
}
+static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_mw *mw, struct rxe_mr *mr)
+{
+ if (mw->ibmw.type == IB_MW_TYPE_1) {
+ if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
+ pr_err_once(
+ "attempt to bind a type 1 MW not in the valid state\n");
+ return -EINVAL;
+ }
+
+ /* o10-36.2.2 */
+ if (unlikely((mw->access & IB_ZERO_BASED))) {
+ pr_err_once("attempt to bind a zero based type 1 MW\n");
+ return -EINVAL;
+ }
+ }
+
+ if (mw->ibmw.type == IB_MW_TYPE_2) {
+ /* o10-37.2.30 */
+ if (unlikely(mw->state != RXE_MW_STATE_FREE)) {
+ pr_err_once(
+ "attempt to bind a type 2 MW not in the free state\n");
+ return -EINVAL;
+ }
+
+ /* C10-72 */
+ if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) {
+ pr_err_once(
+ "attempt to bind type 2 MW with qp with different PD\n");
+ return -EINVAL;
+ }
+
+ /* o10-37.2.40 */
+ if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) {
+ pr_err_once(
+ "attempt to invalidate type 2 MW by binding with NULL or zero length MR\n");
+ return -EINVAL;
+ }
+ }
+
+ if (unlikely((wqe->wr.wr.mw.rkey & 0xff) == (mw->ibmw.rkey & 0xff))) {
+ pr_err_once("attempt to bind MW with same key\n");
+ return -EINVAL;
+ }
+
+ /* remaining checks only apply to a nonzero MR */
+ if (!mr)
+ return 0;
+
+ if (unlikely(mr->access & IB_ZERO_BASED)) {
+ pr_err_once("attempt to bind MW to zero based MR\n");
+ return -EINVAL;
+ }
+
+ /* C10-73 */
+ if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) {
+ pr_err_once(
+ "attempt to bind an MW to an MR without bind access\n");
+ return -EINVAL;
+ }
+
+ /* C10-74 */
+ if (unlikely((mw->access &
+ (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) &&
+ !(mr->access & IB_ACCESS_LOCAL_WRITE))) {
+ pr_err_once(
+ "attempt to bind an writeable MW to an MR without local write access\n");
+ return -EINVAL;
+ }
+
+ /* C10-75 */
+ if (mw->access & IB_ZERO_BASED) {
+ if (unlikely(wqe->wr.wr.mw.length > mr->length)) {
+ pr_err_once(
+ "attempt to bind a ZB MW outside of the MR\n");
+ return -EINVAL;
+ }
+ } else {
+ if (unlikely((wqe->wr.wr.mw.addr < mr->iova) ||
+ ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
+ (mr->iova + mr->length)))) {
+ pr_err_once(
+ "attempt to bind a VA MW outside of the MR\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_mw *mw, struct rxe_mr *mr)
+{
+ u32 rkey;
+ u32 new_rkey;
+
+ rkey = mw->ibmw.rkey;
+ new_rkey = (rkey & 0xffffff00) | (wqe->wr.wr.mw.rkey & 0x000000ff);
+
+ mw->ibmw.rkey = new_rkey;
+ mw->access = wqe->wr.wr.mw.access;
+ mw->state = RXE_MW_STATE_VALID;
+ mw->addr = wqe->wr.wr.mw.addr;
+ mw->length = wqe->wr.wr.mw.length;
+
+ if (mw->mr) {
+ rxe_drop_ref(mw->mr);
+ atomic_dec(&mw->mr->num_mw);
+ mw->mr = NULL;
+ }
+
+ if (mw->length) {
+ mw->mr = mr;
+ atomic_inc(&mr->num_mw);
+ rxe_add_ref(mr);
+ }
+
+ if (mw->ibmw.type == IB_MW_TYPE_2) {
+ rxe_add_ref(qp);
+ mw->qp = qp;
+ }
+}
+
+int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ int ret;
+ struct rxe_mw *mw;
+ struct rxe_mr *mr;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ unsigned long flags;
+
+ mw = rxe_pool_get_index(&rxe->mw_pool,
+ wqe->wr.wr.mw.mw_rkey >> 8);
+ if (unlikely(!mw)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (unlikely(mw->ibmw.rkey != wqe->wr.wr.mw.mw_rkey)) {
+ ret = -EINVAL;
+ goto err_drop_mw;
+ }
+
+ if (likely(wqe->wr.wr.mw.length)) {
+ mr = rxe_pool_get_index(&rxe->mr_pool,
+ wqe->wr.wr.mw.mr_lkey >> 8);
+ if (unlikely(!mr)) {
+ ret = -EINVAL;
+ goto err_drop_mw;
+ }
+
+ if (unlikely(mr->ibmr.lkey != wqe->wr.wr.mw.mr_lkey)) {
+ ret = -EINVAL;
+ goto err_drop_mr;
+ }
+ } else {
+ mr = NULL;
+ }
+
+ spin_lock_irqsave(&mw->lock, flags);
+
+ ret = rxe_check_bind_mw(qp, wqe, mw, mr);
+ if (ret)
+ goto err_unlock;
+
+ rxe_do_bind_mw(qp, wqe, mw, mr);
+err_unlock:
+ spin_unlock_irqrestore(&mw->lock, flags);
+err_drop_mr:
+ if (mr)
+ rxe_drop_ref(mr);
+err_drop_mw:
+ rxe_drop_ref(mw);
+err:
+ return ret;
+}
+
void rxe_mw_cleanup(struct rxe_pool_entry *elem)
{
struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem);