summaryrefslogtreecommitdiff
path: root/drivers/net/ipa
diff options
context:
space:
mode:
authorAlex Elder <elder@linaro.org>2022-02-03 20:09:22 +0300
committerDavid S. Miller <davem@davemloft.net>2022-02-04 13:16:08 +0300
commit6a606b90153b821915daade0b8f253d01d443d75 (patch)
treed0db175c200941713b31ea822a4d9679606ffdd7 /drivers/net/ipa
parentb9dbabc5ca84087862cc3ea21bb718d2ef99fa2f (diff)
downloadlinux-6a606b90153b821915daade0b8f253d01d443d75.tar.xz
net: ipa: allocate transaction in replenish loop
When replenishing, have ipa_endpoint_replenish() allocate a transaction, and pass that to ipa_endpoint_replenish_one() to fill. Then, if that produces no error, commit the transaction within the replenish loop as well. In this way we can distinguish between transaction failures and buffer allocation/mapping failures. Failure to allocate a transaction simply means the hardware already has as many receive buffers as it can hold. In that case we can break out of the replenish loop because there's nothing more to do. If we fail to allocate or map pages for the receive buffer, just try again later. Signed-off-by: Alex Elder <elder@linaro.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ipa')
-rw-r--r--drivers/net/ipa/ipa_endpoint.c40
1 files changed, 16 insertions, 24 deletions
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 274cf1c30b59..f5367b902c27 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1036,24 +1036,19 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
iowrite32(val, ipa->reg_virt + offset);
}
-static int
-ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint, bool doorbell)
+static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
{
- struct gsi_trans *trans;
struct page *page;
u32 buffer_size;
u32 offset;
u32 len;
int ret;
- trans = ipa_endpoint_trans_alloc(endpoint, 1);
- if (!trans)
- return -ENOMEM;
-
buffer_size = endpoint->data->rx.buffer_size;
page = dev_alloc_pages(get_order(buffer_size));
if (!page)
- goto err_trans_free;
+ return -ENOMEM;
/* Offset the buffer to make space for skb headroom */
offset = NET_SKB_PAD;
@@ -1061,19 +1056,11 @@ ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint, bool doorbell)
ret = gsi_trans_page_add(trans, page, len, offset);
if (ret)
- goto err_free_pages;
- trans->data = page; /* transaction owns page now */
-
- gsi_trans_commit(trans, doorbell);
-
- return 0;
-
-err_free_pages:
- __free_pages(page, get_order(buffer_size));
-err_trans_free:
- gsi_trans_free(trans);
+ __free_pages(page, get_order(buffer_size));
+ else
+ trans->data = page; /* transaction owns page now */
- return -ENOMEM;
+ return ret;
}
/**
@@ -1089,6 +1076,7 @@ err_trans_free:
*/
static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
{
+ struct gsi_trans *trans;
struct gsi *gsi;
u32 backlog;
@@ -1100,15 +1088,18 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
return;
while (atomic_dec_not_zero(&endpoint->replenish_backlog)) {
- bool doorbell;
+ trans = ipa_endpoint_trans_alloc(endpoint, 1);
+ if (!trans)
+ break;
+
+ if (ipa_endpoint_replenish_one(endpoint, trans))
+ goto try_again_later;
if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH)
endpoint->replenish_ready = 0;
/* Ring the doorbell if we've got a full batch */
- doorbell = !endpoint->replenish_ready;
- if (ipa_endpoint_replenish_one(endpoint, doorbell))
- goto try_again_later;
+ gsi_trans_commit(trans, !endpoint->replenish_ready);
}
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
@@ -1116,6 +1107,7 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
return;
try_again_later:
+ gsi_trans_free(trans);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
/* The last one didn't succeed, so fix the backlog */