diff options
Diffstat (limited to 'drivers/net/ethernet/amazon/ena/ena_netdev.c')
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_netdev.c | 64 |
1 files changed, 35 insertions, 29 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d0121aaafa38..cab83a9de651 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -112,7 +112,7 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu) ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); if (!ret) { - netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu); + netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu); update_rx_ring_mtu(adapter, new_mtu); dev->mtu = new_mtu; } else { @@ -151,7 +151,7 @@ static int ena_xmit_common(struct net_device *dev, */ if (unlikely(rc)) { netif_err(adapter, tx_queued, dev, - "failed to prepare tx bufs\n"); + "Failed to prepare tx bufs\n"); u64_stats_update_begin(&ring->syncp); ring->tx_stats.prepare_ctx_err++; u64_stats_update_end(&ring->syncp); @@ -265,7 +265,7 @@ error_report_dma_error: u64_stats_update_begin(&xdp_ring->syncp); xdp_ring->tx_stats.dma_mapping_err++; u64_stats_update_end(&xdp_ring->syncp); - netif_warn(adapter, tx_queued, adapter->netdev, "failed to map xdp buff\n"); + netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n"); xdp_return_frame_rx_napi(tx_info->xdpf); tx_info->xdpf = NULL; @@ -537,7 +537,7 @@ static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf) if (!old_bpf_prog) netif_info(adapter, drv, adapter->netdev, - "xdp program set, changing the max_mtu from %d to %d", + "XDP program is set, changing the max_mtu from %d to %d", prev_mtu, netdev->max_mtu); } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) { @@ -956,7 +956,7 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring, return -EIO; } netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, - "alloc page %p, rx_info %p\n", page, rx_info); + "Allocate page %p, rx_info %p\n", page, rx_info); rx_info->page = page; rx_info->page_offset = 0; @@ -1006,7 +1006,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) GFP_ATOMIC | __GFP_COMP); if (unlikely(rc < 0)) { netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, - "failed to alloc buffer for rx queue %d\n", + "Failed to allocate buffer for rx queue %d\n", rx_ring->qid); break; } @@ -1015,7 +1015,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) req_id); if (unlikely(rc)) { netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, - "failed to add buffer for rx queue %d\n", + "Failed to add buffer for rx queue %d\n", rx_ring->qid); break; } @@ -1028,7 +1028,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) rx_ring->rx_stats.refil_partial++; u64_stats_update_end(&rx_ring->syncp); netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, - "refilled rx qid %d with only %d buffers (from %d)\n", + "Refilled rx qid %d with only %d buffers (from %d)\n", rx_ring->qid, i, num); } @@ -1070,7 +1070,7 @@ static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) if (unlikely(rc != bufs_num)) netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, - "refilling Queue %d failed. allocated %d buffers from: %d\n", + "Refilling Queue %d failed. allocated %d buffers from: %d\n", i, rc, bufs_num); } } @@ -1129,12 +1129,12 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) if (print_once) { netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev, - "free uncompleted tx skb qid %d idx 0x%x\n", + "Free uncompleted tx skb qid %d idx 0x%x\n", tx_ring->qid, i); print_once = false; } else { netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev, - "free uncompleted tx skb qid %d idx 0x%x\n", + "Free uncompleted tx skb qid %d idx 0x%x\n", tx_ring->qid, i); } @@ -1387,7 +1387,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, return NULL; netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, - "rx allocated small packet. len %d. data_len %d\n", + "RX allocated small packet. len %d. data_len %d\n", skb->len, skb->data_len); /* sync this buffer for CPU use */ @@ -1424,7 +1424,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, rx_info->page_offset = 0; netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, - "rx skb updated. len %d. data_len %d\n", + "RX skb updated. len %d. data_len %d\n", skb->len, skb->data_len); rx_info->page = NULL; @@ -1631,6 +1631,11 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, &next_to_clean); if (unlikely(!skb)) { + /* The page might not actually be freed here since the + * page reference count is incremented in + * ena_xdp_xmit_buff(), and it will be decreased only + * when send completion was received from the device + */ if (xdp_verdict == XDP_TX) ena_free_rx_page(rx_ring, &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]); @@ -1758,6 +1763,7 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring, u64_stats_update_begin(&tx_ring->syncp); tx_ring->tx_stats.unmask_interrupt++; u64_stats_update_end(&tx_ring->syncp); + /* It is a shared MSI-X. * Tx and Rx CQ have pointer to it. * So we use one of them to reach the intr reg @@ -1975,7 +1981,7 @@ static int ena_enable_msix(struct ena_adapter *adapter) /* Reserved the max msix vectors we might need */ msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); netif_dbg(adapter, probe, adapter->netdev, - "trying to enable MSI-X, vectors %d\n", msix_vecs); + "Trying to enable MSI-X, vectors %d\n", msix_vecs); irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC, msix_vecs, PCI_IRQ_MSIX); @@ -1988,7 +1994,7 @@ static int ena_enable_msix(struct ena_adapter *adapter) if (irq_cnt != msix_vecs) { netif_notice(adapter, probe, adapter->netdev, - "enable only %d MSI-X (out of %d), reduce the number of queues\n", + "Enable only %d MSI-X (out of %d), reduce the number of queues\n", irq_cnt, msix_vecs); adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC; } @@ -2058,12 +2064,12 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter) irq->data); if (rc) { netif_err(adapter, probe, adapter->netdev, - "failed to request admin irq\n"); + "Failed to request admin irq\n"); return rc; } netif_dbg(adapter, probe, adapter->netdev, - "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n", + "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n", irq->affinity_hint_mask.bits[0], irq->vector); irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); @@ -2096,7 +2102,7 @@ static int ena_request_io_irq(struct ena_adapter *adapter) } netif_dbg(adapter, ifup, adapter->netdev, - "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n", + "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n", i, irq->affinity_hint_mask.bits[0], irq->vector); irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); @@ -2943,7 +2949,7 @@ error_report_dma_error: u64_stats_update_begin(&tx_ring->syncp); tx_ring->tx_stats.dma_mapping_err++; u64_stats_update_end(&tx_ring->syncp); - netif_warn(adapter, tx_queued, adapter->netdev, "failed to map skb\n"); + netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n"); tx_info->skb = NULL; @@ -3353,7 +3359,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, rc = ena_com_mmio_reg_read_request_init(ena_dev); if (rc) { - dev_err(dev, "failed to init mmio read less\n"); + dev_err(dev, "Failed to init mmio read less\n"); return rc; } @@ -3371,7 +3377,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, rc = ena_com_validate_version(ena_dev); if (rc) { - dev_err(dev, "device version is too low\n"); + dev_err(dev, "Device version is too low\n"); goto err_mmio_read_less; } @@ -3440,7 +3446,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, &llq_config); if (rc) { - dev_err(&pdev->dev, "ena device init failed\n"); + dev_err(dev, "ENA device init failed\n"); goto err_admin_init; } @@ -3781,7 +3787,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter) u64_stats_update_end(&rx_ring->syncp); netif_err(adapter, drv, adapter->netdev, - "trigger refill for ring %d\n", i); + "Trigger refill for ring %d\n", i); napi_schedule(rx_ring->napi); rx_ring->empty_rx_queue = 0; @@ -4182,7 +4188,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_resource_start(pdev, ENA_REG_BAR), pci_resource_len(pdev, ENA_REG_BAR)); if (!ena_dev->reg_bar) { - dev_err(&pdev->dev, "failed to remap regs bar\n"); + dev_err(&pdev->dev, "Failed to remap regs bar\n"); rc = -EFAULT; goto err_free_region; } @@ -4193,7 +4199,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); if (rc) { - dev_err(&pdev->dev, "ena device init failed\n"); + dev_err(&pdev->dev, "ENA device init failed\n"); if (rc == -ETIME) rc = -EPROBE_DEFER; goto err_free_region; @@ -4201,7 +4207,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); if (rc) { - dev_err(&pdev->dev, "ena llq bar mapping failed\n"); + dev_err(&pdev->dev, "ENA llq bar mapping failed\n"); goto err_free_ena_dev; } @@ -4466,7 +4472,7 @@ static int __maybe_unused ena_suspend(struct device *dev_d) rtnl_lock(); if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { dev_err(&pdev->dev, - "ignoring device reset request as the device is being suspended\n"); + "Ignoring device reset request as the device is being suspended\n"); clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); } ena_destroy_device(adapter, true); @@ -4585,7 +4591,7 @@ static void ena_notification(void *adapter_data, aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION); - switch (aenq_e->aenq_common_desc.syndrom) { + switch (aenq_e->aenq_common_desc.syndrome) { case ENA_ADMIN_UPDATE_HINTS: hints = (struct ena_admin_ena_hw_hints *) (&aenq_e->inline_data_w4); @@ -4594,7 +4600,7 @@ static void ena_notification(void *adapter_data, default: netif_err(adapter, drv, adapter->netdev, "Invalid aenq notification link state %d\n", - aenq_e->aenq_common_desc.syndrom); + aenq_e->aenq_common_desc.syndrome); } } |