summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ibm
diff options
context:
space:
mode:
authorSukadev Bhattiprolu <sukadev@linux.ibm.com>2021-09-15 06:52:52 +0300
committerDavid S. Miller <davem@davemloft.net>2021-09-15 13:12:23 +0300
commit0f2bf3188c438210dd1f8d92d448d6226f9792e0 (patch)
tree073e233ef6f587a244861aa256dfebec28284d46 /drivers/net/ethernet/ibm
parent38106b2c433e886344c33c90522343c37edfacea (diff)
downloadlinux-0f2bf3188c438210dd1f8d92d448d6226f9792e0.tar.xz
ibmvnic: Fix up some comments and messages
Add/update some comments/function headers and fix up some messages. Reviewed-by: Rick Lindsley <ricklind@linux.vnet.ibm.com> Reviewed-by: Dany Madden <drt@linux.ibm.com> Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c44
1 files changed, 38 insertions, 6 deletions
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index e8b1231be485..cd04c3eb6c41 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -243,14 +243,13 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
if (rc) {
- dev_err(dev,
- "Long term map request aborted or timed out,rc = %d\n",
+ dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
rc);
goto out;
}
if (adapter->fw_done_rc) {
- dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
+ dev_err(dev, "Couldn't map LTB, rc = %d\n",
adapter->fw_done_rc);
rc = -1;
goto out;
@@ -281,7 +280,9 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
adapter->reset_reason != VNIC_RESET_MOBILITY &&
adapter->reset_reason != VNIC_RESET_TIMEOUT)
send_request_unmap(adapter, ltb->map_id);
+
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+
ltb->buff = NULL;
ltb->map_id = 0;
}
@@ -574,6 +575,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
return 0;
}
+/**
+ * release_rx_pools() - Release any rx pools attached to @adapter.
+ * @adapter: ibmvnic adapter
+ *
+ * Safe to call this multiple times - even if no pools are attached.
+ */
static void release_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
@@ -628,6 +635,9 @@ static int init_rx_pools(struct net_device *netdev)
return -1;
}
+ /* Set num_active_rx_pools early. If we fail below after partial
+ * allocation, release_rx_pools() will know how many to look for.
+ */
adapter->num_active_rx_pools = rxadd_subcrqs;
for (i = 0; i < rxadd_subcrqs; i++) {
@@ -646,6 +656,7 @@ static int init_rx_pools(struct net_device *netdev)
rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
GFP_KERNEL);
if (!rx_pool->free_map) {
+ dev_err(dev, "Couldn't alloc free_map %d\n", i);
release_rx_pools(adapter);
return -1;
}
@@ -739,10 +750,19 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
free_long_term_buff(adapter, &tx_pool->long_term_buff);
}
+/**
+ * release_tx_pools() - Release any tx pools attached to @adapter.
+ * @adapter: ibmvnic adapter
+ *
+ * Safe to call this multiple times - even if no pools are attached.
+ */
static void release_tx_pools(struct ibmvnic_adapter *adapter)
{
int i;
+ /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
+ * both NULL or both non-NULL. So we only need to check one.
+ */
if (!adapter->tx_pool)
return;
@@ -793,6 +813,7 @@ static int init_one_tx_pool(struct net_device *netdev,
static int init_tx_pools(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->vdev->dev;
int tx_subcrqs;
u64 buff_size;
int i, rc;
@@ -805,17 +826,27 @@ static int init_tx_pools(struct net_device *netdev)
adapter->tso_pool = kcalloc(tx_subcrqs,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
+ /* To simplify release_tx_pools() ensure that ->tx_pool and
+ * ->tso_pool are either both NULL or both non-NULL.
+ */
if (!adapter->tso_pool) {
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
return -1;
}
+ /* Set num_active_tx_pools early. If we fail below after partial
+ * allocation, release_tx_pools() will know how many to look for.
+ */
adapter->num_active_tx_pools = tx_subcrqs;
for (i = 0; i < tx_subcrqs; i++) {
buff_size = adapter->req_mtu + VLAN_HLEN;
buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
+
+ dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
+ i, adapter->req_tx_entries_per_subcrq, buff_size);
+
rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
adapter->req_tx_entries_per_subcrq,
buff_size);
@@ -4774,9 +4805,10 @@ static void handle_query_map_rsp(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
return;
}
- netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
- crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
- crq->query_map_rsp.free_pages);
+ netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
+ crq->query_map_rsp.page_size,
+ __be32_to_cpu(crq->query_map_rsp.tot_pages),
+ __be32_to_cpu(crq->query_map_rsp.free_pages));
}
static void handle_query_cap_rsp(union ibmvnic_crq *crq,