diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-17 21:18:18 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-17 21:18:18 +0300 |
commit | a1e16bc7d5f7ca3599d8a7f061841c93a563665e (patch) | |
tree | 73e557536e5dc52894e0e24439406db3510c5434 /drivers/infiniband/hw/cxgb4/mem.c | |
parent | 2a934b38c066ff221b08a9c703314a2a1c885dbd (diff) | |
parent | c7a198c700763ac89abbb166378f546aeb9afb33 (diff) | |
download | linux-a1e16bc7d5f7ca3599d8a7f061841c93a563665e.tar.xz |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"A usual cycle for RDMA with a typical mix of driver and core subsystem
updates:
- Driver minor changes and bug fixes for mlx5, efa, rxe, vmw_pvrdma,
hns, usnic, qib, qedr, cxgb4, hns, bnxt_re
- Various rtrs fixes and updates
- Bug fix for mlx4 CM emulation for virtualization scenarios where
MRA wasn't working right
- Use tracepoints instead of pr_debug in the CM code
- Scrub the locking in ucma and cma to close more syzkaller bugs
- Use tasklet_setup in the subsystem
- Revert the idea that 'destroy' operations are not allowed to fail
at the driver level. This proved unworkable from a HW perspective.
- Revise how the umem API works so drivers make fewer mistakes using
it
- XRC support for qedr
- Convert uverbs objects RWQ and MW to new the allocation scheme
- Large queue entry sizes for hns
- Use hmm_range_fault() for mlx5 On Demand Paging
- uverbs APIs to inspect the GID table instead of sysfs
- Move some of the RDMA code for building large page SGLs into
lib/scatterlist"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (191 commits)
RDMA/ucma: Fix use after free in destroy id flow
RDMA/rxe: Handle skb_clone() failure in rxe_recv.c
RDMA/rxe: Move the definitions for rxe_av.network_type to uAPI
RDMA: Explicitly pass in the dma_device to ib_register_device
lib/scatterlist: Do not limit max_segment to PAGE_ALIGNED values
IB/mlx4: Convert rej_tmout radix-tree to XArray
RDMA/rxe: Fix bug rejecting all multicast packets
RDMA/rxe: Fix skb lifetime in rxe_rcv_mcast_pkt()
RDMA/rxe: Remove duplicate entries in struct rxe_mr
IB/hfi,rdmavt,qib,opa_vnic: Update MAINTAINERS
IB/rdmavt: Fix sizeof mismatch
MAINTAINERS: CISCO VIC LOW LATENCY NIC DRIVER
RDMA/bnxt_re: Fix sizeof mismatch for allocation of pbl_tbl.
RDMA/bnxt_re: Use rdma_umem_for_each_dma_block()
RDMA/umem: Move to allocate SG table from pages
lib/scatterlist: Add support in dynamic allocation of SG table from pages
tools/testing/scatterlist: Show errors in human readable form
tools/testing/scatterlist: Rejuvenate bit-rotten test
RDMA/ipoib: Set rtnl_link_ops for ipoib interfaces
RDMA/uverbs: Expose the new GID query API to user space
...
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/mem.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/mem.c | 40 |
1 files changed, 15 insertions, 25 deletions
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 73936c3341b7..42234df896fb 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -510,7 +510,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, __be64 *pages; int shift, n, i; int err = -ENOMEM; - struct sg_dma_page_iter sg_iter; + struct ib_block_iter biter; struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; @@ -548,7 +548,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, shift = PAGE_SHIFT; - n = ib_umem_num_pages(mhp->umem); + n = ib_umem_num_dma_blocks(mhp->umem, 1 << shift); err = alloc_pbl(mhp, n); if (err) goto err_umem_release; @@ -561,8 +561,8 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, i = n = 0; - for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) { - pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter)); + rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) { + pages[i++] = cpu_to_be64(rdma_block_iter_dma_address(&biter)); if (i == PAGE_SIZE / sizeof(*pages)) { err = write_pbl(&mhp->rhp->rdev, pages, mhp->attr.pbl_addr + (n << 3), i, @@ -611,30 +611,23 @@ err_free_mhp: return ERR_PTR(err); } -struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, - struct ib_udata *udata) +int c4iw_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) { + struct c4iw_mw *mhp = to_c4iw_mw(ibmw); struct c4iw_dev *rhp; struct c4iw_pd *php; - struct c4iw_mw *mhp; u32 mmid; u32 stag = 0; int ret; - if (type != IB_MW_TYPE_1) - return ERR_PTR(-EINVAL); + if (ibmw->type != IB_MW_TYPE_1) + return -EINVAL; - php = to_c4iw_pd(pd); + php = to_c4iw_pd(ibmw->pd); rhp = php->rhp; - mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); - if (!mhp) - return ERR_PTR(-ENOMEM); - mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); - if (!mhp->wr_waitp) { - ret = -ENOMEM; - goto free_mhp; - } + if (!mhp->wr_waitp) + return -ENOMEM; mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); if (!mhp->dereg_skb) { @@ -645,18 +638,19 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp); if (ret) goto free_skb; + mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.type = FW_RI_STAG_MW; mhp->attr.stag = stag; mmid = (stag) >> 8; - mhp->ibmw.rkey = stag; + ibmw->rkey = stag; if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) { ret = -ENOMEM; goto dealloc_win; } pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag); - return &(mhp->ibmw); + return 0; dealloc_win: deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb, @@ -665,9 +659,7 @@ free_skb: kfree_skb(mhp->dereg_skb); free_wr_wait: c4iw_put_wr_wait(mhp->wr_waitp); -free_mhp: - kfree(mhp); - return ERR_PTR(ret); + return ret; } int c4iw_dealloc_mw(struct ib_mw *mw) @@ -684,8 +676,6 @@ int c4iw_dealloc_mw(struct ib_mw *mw) mhp->wr_waitp); kfree_skb(mhp->dereg_skb); c4iw_put_wr_wait(mhp->wr_waitp); - pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp); - kfree(mhp); return 0; } |