summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/bnxt_re/qplib_res.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-17 21:18:18 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-17 21:18:18 +0300
commita1e16bc7d5f7ca3599d8a7f061841c93a563665e (patch)
tree73e557536e5dc52894e0e24439406db3510c5434 /drivers/infiniband/hw/bnxt_re/qplib_res.c
parent2a934b38c066ff221b08a9c703314a2a1c885dbd (diff)
parentc7a198c700763ac89abbb166378f546aeb9afb33 (diff)
downloadlinux-a1e16bc7d5f7ca3599d8a7f061841c93a563665e.tar.xz
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "A usual cycle for RDMA with a typical mix of driver and core subsystem updates: - Driver minor changes and bug fixes for mlx5, efa, rxe, vmw_pvrdma, hns, usnic, qib, qedr, cxgb4, hns, bnxt_re - Various rtrs fixes and updates - Bug fix for mlx4 CM emulation for virtualization scenarios where MRA wasn't working right - Use tracepoints instead of pr_debug in the CM code - Scrub the locking in ucma and cma to close more syzkaller bugs - Use tasklet_setup in the subsystem - Revert the idea that 'destroy' operations are not allowed to fail at the driver level. This proved unworkable from a HW perspective. - Revise how the umem API works so drivers make fewer mistakes using it - XRC support for qedr - Convert uverbs objects RWQ and MW to new the allocation scheme - Large queue entry sizes for hns - Use hmm_range_fault() for mlx5 On Demand Paging - uverbs APIs to inspect the GID table instead of sysfs - Move some of the RDMA code for building large page SGLs into lib/scatterlist" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (191 commits) RDMA/ucma: Fix use after free in destroy id flow RDMA/rxe: Handle skb_clone() failure in rxe_recv.c RDMA/rxe: Move the definitions for rxe_av.network_type to uAPI RDMA: Explicitly pass in the dma_device to ib_register_device lib/scatterlist: Do not limit max_segment to PAGE_ALIGNED values IB/mlx4: Convert rej_tmout radix-tree to XArray RDMA/rxe: Fix bug rejecting all multicast packets RDMA/rxe: Fix skb lifetime in rxe_rcv_mcast_pkt() RDMA/rxe: Remove duplicate entries in struct rxe_mr IB/hfi,rdmavt,qib,opa_vnic: Update MAINTAINERS IB/rdmavt: Fix sizeof mismatch MAINTAINERS: CISCO VIC LOW LATENCY NIC DRIVER RDMA/bnxt_re: Fix sizeof mismatch for allocation of pbl_tbl. RDMA/bnxt_re: Use rdma_umem_for_each_dma_block() RDMA/umem: Move to allocate SG table from pages lib/scatterlist: Add support in dynamic allocation of SG table from pages tools/testing/scatterlist: Show errors in human readable form tools/testing/scatterlist: Rejuvenate bit-rotten test RDMA/ipoib: Set rtnl_link_ops for ipoib interfaces RDMA/uverbs: Expose the new GID query API to user space ...
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re/qplib_res.c')
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 7efa6e5dce62..fa7878336100 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -45,6 +45,9 @@
#include <linux/dma-mapping.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
+
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -87,12 +90,11 @@ static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
struct bnxt_qplib_sg_info *sginfo)
{
- struct scatterlist *sghead = sginfo->sghead;
- struct sg_dma_page_iter sg_iter;
+ struct ib_block_iter biter;
int i = 0;
- for_each_sg_dma_page(sghead, &sg_iter, sginfo->nmap, 0) {
- pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
+ rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
+ pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
pbl->pg_arr[i] = NULL;
pbl->pg_count++;
i++;
@@ -104,15 +106,16 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_sg_info *sginfo)
{
struct pci_dev *pdev = res->pdev;
- struct scatterlist *sghead;
bool is_umem = false;
u32 pages;
int i;
if (sginfo->nopte)
return 0;
- pages = sginfo->npages;
- sghead = sginfo->sghead;
+ if (sginfo->umem)
+ pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
+ else
+ pages = sginfo->npages;
/* page ptr arrays */
pbl->pg_arr = vmalloc(pages * sizeof(void *));
if (!pbl->pg_arr)
@@ -127,7 +130,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
pbl->pg_count = 0;
pbl->pg_size = sginfo->pgsize;
- if (!sghead) {
+ if (!sginfo->umem) {
for (i = 0; i < pages; i++) {
pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
pbl->pg_size,
@@ -183,14 +186,12 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
struct bnxt_qplib_sg_info sginfo = {};
u32 depth, stride, npbl, npde;
dma_addr_t *src_phys_ptr, **dst_virt_ptr;
- struct scatterlist *sghead = NULL;
struct bnxt_qplib_res *res;
struct pci_dev *pdev;
int i, rc, lvl;
res = hwq_attr->res;
pdev = res->pdev;
- sghead = hwq_attr->sginfo->sghead;
pg_size = hwq_attr->sginfo->pgsize;
hwq->level = PBL_LVL_MAX;
@@ -204,7 +205,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
aux_pages++;
}
- if (!sghead) {
+ if (!hwq_attr->sginfo->umem) {
hwq->is_user = false;
npages = (depth * stride) / pg_size + aux_pages;
if ((depth * stride) % pg_size)
@@ -213,11 +214,14 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
return -EINVAL;
hwq_attr->sginfo->npages = npages;
} else {
+ unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
+ hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
+
hwq->is_user = true;
- npages = hwq_attr->sginfo->npages;
+ npages = sginfo_num_pages;
npages = (npages * PAGE_SIZE) /
BIT_ULL(hwq_attr->sginfo->pgshft);
- if ((hwq_attr->sginfo->npages * PAGE_SIZE) %
+ if ((sginfo_num_pages * PAGE_SIZE) %
BIT_ULL(hwq_attr->sginfo->pgshft))
if (!npages)
npages++;