summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2015-02-24 14:17:59 +0300
committerDavid S. Miller <davem@davemloft.net>2015-02-25 00:24:22 +0300
commit7fbb9d8415d4a51cf542e87cf3a717a9f7e6aedc (patch)
treede9e7b9db0b88366ca381c2c405d32ba75a7a454 /drivers
parent41a50d621a321b4c15273cc1b5ed41437f4acdfb (diff)
downloadlinux-7fbb9d8415d4a51cf542e87cf3a717a9f7e6aedc.tar.xz
xen-netback: release pending index before pushing Tx responses
If the pending indexes are released /after/ pushing the Tx response then a stale pending index may be used if a new Tx request is immediately pushed by the frontend. The may cause various WARNINGs or BUGs if the stale pending index is actually still in use. Fix this by releasing the pending index before pushing the Tx response. The full barrier for the pending ring update is not required since the the Tx response push already has a suitable write barrier. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Reviewed-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/xen-netback/netback.c29
1 files changed, 21 insertions, 8 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f7a31d2cb3f1..c4d68d768408 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
unsigned long flags;
do {
+ int notify;
+
spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
spin_unlock_irqrestore(&queue->response_lock, flags);
+ if (notify)
+ notify_remote_via_irq(queue->tx_irq);
+
if (cons == end)
break;
txp = RING_GET_REQUEST(&queue->tx, cons++);
@@ -1649,17 +1655,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
{
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index;
+ int notify;
unsigned long flags;
pending_tx_info = &queue->pending_tx_info[pending_idx];
+
spin_lock_irqsave(&queue->response_lock, flags);
+
make_tx_response(queue, &pending_tx_info->req, status);
- index = pending_index(queue->pending_prod);
+
+ /* Release the pending index before pusing the Tx response so
+ * its available before a new Tx request is pushed by the
+ * frontend.
+ */
+ index = pending_index(queue->pending_prod++);
queue->pending_ring[index] = pending_idx;
- /* TX shouldn't use the index before we give it back here */
- mb();
- queue->pending_prod++;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+
spin_unlock_irqrestore(&queue->response_lock, flags);
+
+ if (notify)
+ notify_remote_via_irq(queue->tx_irq);
}
@@ -1669,7 +1686,6 @@ static void make_tx_response(struct xenvif_queue *queue,
{
RING_IDX i = queue->tx.rsp_prod_pvt;
struct xen_netif_tx_response *resp;
- int notify;
resp = RING_GET_RESPONSE(&queue->tx, i);
resp->id = txp->id;
@@ -1679,9 +1695,6 @@ static void make_tx_response(struct xenvif_queue *queue,
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
queue->tx.rsp_prod_pvt = ++i;
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
- if (notify)
- notify_remote_via_irq(queue->tx_irq);
}
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,