summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMitch Williams <mitch.a.williams@intel.com>2019-05-14 20:37:09 +0300
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2019-06-18 01:39:26 +0300
commitefa14c3985828da3163f5372137cb64d992b0f79 (patch)
tree3b2fa40bc5029a871cc45ebaf97b772f08edb7b6
parent68dfe6348f06945f900bfea803e1e030e7e158e6 (diff)
downloadlinux-efa14c3985828da3163f5372137cb64d992b0f79.tar.xz
iavf: allow null RX descriptors
In some circumstances, the hardware can hand us a null receive descriptor, with no data attached but otherwise valid. Unfortunately, the driver was ill-equipped to handle such an event, and would stop processing packets at that point. To fix this, use the Descriptor Done bit instead of the size to determine whether or not a descriptor is ready to be processed. Add some checks to allow for unused buffers. Signed-off-by: Mitch Williams <mitch.a.williams@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index d28b57937245..1cde1601bc32 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1236,6 +1236,9 @@ static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
#endif
+ if (!size)
+ return;
+
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);
@@ -1260,6 +1263,9 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
{
struct iavf_rx_buffer *rx_buffer;
+ if (!size)
+ return NULL;
+
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
@@ -1299,6 +1305,8 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
unsigned int headlen;
struct sk_buff *skb;
+ if (!rx_buffer)
+ return NULL;
/* prefetch first cache line of first page */
prefetch(va);
#if L1_CACHE_BYTES < 128
@@ -1363,6 +1371,8 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
#endif
struct sk_buff *skb;
+ if (!rx_buffer)
+ return NULL;
/* prefetch first cache line of first page */
prefetch(va);
#if L1_CACHE_BYTES < 128
@@ -1398,6 +1408,9 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
struct iavf_rx_buffer *rx_buffer)
{
+ if (!rx_buffer)
+ return;
+
if (iavf_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
iavf_reuse_rx_page(rx_ring, rx_buffer);
@@ -1496,11 +1509,12 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
* verified the descriptor has been written back.
*/
dma_rmb();
+#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
+ if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
+ break;
size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
- if (!size)
- break;
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = iavf_get_rx_buffer(rx_ring, size);
@@ -1516,7 +1530,8 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- rx_buffer->pagecnt_bias++;
+ if (rx_buffer)
+ rx_buffer->pagecnt_bias++;
break;
}