summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mana
diff options
context:
space:
mode:
authorLong Li <longli@microsoft.com>2023-02-16 03:32:02 +0300
committerJason Gunthorpe <jgg@nvidia.com>2023-02-16 19:03:16 +0300
commit89d42b8c85b4c67d310c5ccaf491acbf71a260c3 (patch)
tree5c5c191b5b48654e0f28e73f65ee1895cf070a4c /drivers/infiniband/hw/mana
parent72a03627443d5bc7032ab98bd784740cd8a76f8a (diff)
downloadlinux-89d42b8c85b4c67d310c5ccaf491acbf71a260c3.tar.xz
RDMA/mana_ib: Fix a bug when the PF indicates more entries for registering memory on first packet
When registering memory in a large chunk that doesn't fit into a single PF message, the PF may return GDMA_STATUS_MORE_ENTRIES on the first message if there are more messages needed for registering more chunks. Fix the VF to make it process the correct return code. Fixes: 0266a177631d ("RDMA/mana_ib: Add a driver for Microsoft Azure Network Adapter") Link: https://lore.kernel.org/r/1676507522-21018-1-git-send-email-longli@linuxonhyperv.com Signed-off-by: Long Li <longli@microsoft.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/mana')
-rw-r--r--drivers/infiniband/hw/mana/main.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 8b3bc302d6f3..7be4c3adb4e2 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -249,7 +249,8 @@ static int
mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
struct gdma_context *gc,
struct gdma_create_dma_region_req *create_req,
- size_t num_pages, mana_handle_t *gdma_region)
+ size_t num_pages, mana_handle_t *gdma_region,
+ u32 expected_status)
{
struct gdma_create_dma_region_resp create_resp = {};
unsigned int create_req_msg_size;
@@ -261,7 +262,7 @@ mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
err = mana_gd_send_request(gc, create_req_msg_size, create_req,
sizeof(create_resp), &create_resp);
- if (err || create_resp.hdr.status) {
+ if (err || create_resp.hdr.status != expected_status) {
ibdev_dbg(&dev->ib_dev,
"Failed to create DMA region: %d, 0x%x\n",
err, create_resp.hdr.status);
@@ -372,14 +373,21 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
page_addr_list = create_req->page_addr_list;
rdma_umem_for_each_dma_block(umem, &biter, page_sz) {
+ u32 expected_status = 0;
+
page_addr_list[tail++] = rdma_block_iter_dma_address(&biter);
if (tail < num_pages_to_handle)
continue;
+ if (num_pages_processed + num_pages_to_handle <
+ num_pages_total)
+ expected_status = GDMA_STATUS_MORE_ENTRIES;
+
if (!num_pages_processed) {
/* First create message */
err = mana_ib_gd_first_dma_region(dev, gc, create_req,
- tail, gdma_region);
+ tail, gdma_region,
+ expected_status);
if (err)
goto out;
@@ -392,14 +400,8 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
page_addr_list = add_req->page_addr_list;
} else {
/* Subsequent create messages */
- u32 expected_s = 0;
-
- if (num_pages_processed + num_pages_to_handle <
- num_pages_total)
- expected_s = GDMA_STATUS_MORE_ENTRIES;
-
err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail,
- expected_s);
+ expected_status);
if (err)
break;
}