summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sun
diff options
context:
space:
mode:
authorDwight Engen <dwight.engen@oracle.com>2014-09-19 17:43:02 +0400
committerDavid S. Miller <davem@davemloft.net>2014-10-01 01:37:35 +0400
commitd0aedcd4f14a22e23b313f42b7e6e6ebfc0fbc31 (patch)
treefda0cda8ed2b6e255146ef35e19f8b205d2a6b11 /drivers/net/ethernet/sun
parent5eed69ffd248c9f68f56c710caf07db134aef28b (diff)
downloadlinux-d0aedcd4f14a22e23b313f42b7e6e6ebfc0fbc31.tar.xz
vio: fix reuse of vio_dring slot
vio_dring_avail() will allow use of every dring entry, but when the last entry is allocated then dr->prod == dr->cons which is indistinguishable from the ring empty condition. This causes the next allocation to reuse an entry. When this happens in sunvdc, the server side vds driver begins nack'ing the messages and ends up resetting the ldc channel. This problem does not effect sunvnet since it checks for < 2. The fix here is to just never allocate the very last dring slot so that full and empty are not the same condition. The request start path was changed to check for the ring being full a bit earlier, and to stop the blk_queue if there is no space left. The blk_queue will be restarted once the ring is only half full again. The number of ring entries was increased to 512 which matches the sunvnet and Solaris vdc drivers, and greatly reduces the frequency of hitting the ring full condition and the associated blk_queue stop/starting. The checks in sunvent were adjusted to account for vio_dring_avail() returning 1 less. Orabug: 19441666 OraBZ: 14983 Signed-off-by: Dwight Engen <dwight.engen@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/sun')
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 23c89ab5a6ad..b7cca71fbc90 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -690,7 +690,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&port->vio.lock, flags);
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+ if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
@@ -746,7 +746,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
- if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+ if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
netif_stop_queue(dev);
if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
netif_wake_queue(dev);