summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/xilinx
diff options
context:
space:
mode:
authorRobert Hancock <robert.hancock@calian.com>2022-05-12 20:18:52 +0300
committerDavid S. Miller <davem@davemloft.net>2022-05-13 14:22:11 +0300
commitf0cf4000f5867ec4325d19d32bd83cf583065667 (patch)
treec2c09a341e6e456612642c389edc734114d81eda /drivers/net/ethernet/xilinx
parent4915d50e300e96929d2462041d6f6c6f061167fd (diff)
downloadlinux-f0cf4000f5867ec4325d19d32bd83cf583065667.tar.xz
net: axienet: Be more careful about updating tx_bd_tail
The axienet_start_xmit function was updating the tx_bd_tail variable multiple times, with potential rollbacks on error or invalid intermediate positions, even though this variable is also used in the TX completion path. Use READ_ONCE where this variable is read and WRITE_ONCE where it is written to make this update more atomic, and move the write before the MMIO write to start the transfer, so it is protected by that implicit write barrier. Signed-off-by: Robert Hancock <robert.hancock@calian.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/xilinx')
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index d6fc3f7acdf0..1e0f70e53991 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -746,7 +746,8 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
/* Ensure we see all descriptor updates from device or TX IRQ path */
rmb();
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
+ cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
+ lp->tx_bd_num];
if (cur_p->cntrl)
return NETDEV_TX_BUSY;
return 0;
@@ -807,12 +808,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 csum_index_off;
skb_frag_t *frag;
dma_addr_t tail_p, phys;
+ u32 orig_tail_ptr, new_tail_ptr;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- u32 orig_tail_ptr = lp->tx_bd_tail;
+
+ orig_tail_ptr = lp->tx_bd_tail;
+ new_tail_ptr = orig_tail_ptr;
num_frag = skb_shinfo(skb)->nr_frags;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ cur_p = &lp->tx_bd_v[orig_tail_ptr];
if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
/* Should not happen as last start_xmit call should have
@@ -852,9 +856,9 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
for (ii = 0; ii < num_frag; ii++) {
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ if (++new_tail_ptr >= lp->tx_bd_num)
+ new_tail_ptr = 0;
+ cur_p = &lp->tx_bd_v[new_tail_ptr];
frag = &skb_shinfo(skb)->frags[ii];
phys = dma_map_single(lp->dev,
skb_frag_address(frag),
@@ -866,8 +870,6 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ndev->stats.tx_dropped++;
axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
NULL);
- lp->tx_bd_tail = orig_tail_ptr;
-
return NETDEV_TX_OK;
}
desc_set_phys_addr(lp, phys, cur_p);
@@ -877,11 +879,13 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
cur_p->skb = skb;
- tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
+ if (++new_tail_ptr >= lp->tx_bd_num)
+ new_tail_ptr = 0;
+ WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
+
/* Start the transfer */
axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
/* Stop queue if next transmit may not have space */
if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {