summaryrefslogtreecommitdiff
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c272
1 files changed, 162 insertions, 110 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fe7f314d65c9..d16f592c2061 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -81,24 +81,24 @@ struct virtnet_stat_desc {
struct virtnet_sq_stats {
struct u64_stats_sync syncp;
- u64 packets;
- u64 bytes;
- u64 xdp_tx;
- u64 xdp_tx_drops;
- u64 kicks;
- u64 tx_timeouts;
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ u64_stats_t xdp_tx;
+ u64_stats_t xdp_tx_drops;
+ u64_stats_t kicks;
+ u64_stats_t tx_timeouts;
};
struct virtnet_rq_stats {
struct u64_stats_sync syncp;
- u64 packets;
- u64 bytes;
- u64 drops;
- u64 xdp_packets;
- u64 xdp_tx;
- u64 xdp_redirects;
- u64 xdp_drops;
- u64 kicks;
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ u64_stats_t drops;
+ u64_stats_t xdp_packets;
+ u64_stats_t xdp_tx;
+ u64_stats_t xdp_redirects;
+ u64_stats_t xdp_drops;
+ u64_stats_t kicks;
};
#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
@@ -607,16 +607,16 @@ static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
--dma->ref;
- if (dma->ref) {
- if (dma->need_sync && len) {
- offset = buf - (head + sizeof(*dma));
+ if (dma->need_sync && len) {
+ offset = buf - (head + sizeof(*dma));
- virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, offset,
- len, DMA_FROM_DEVICE);
- }
+ virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
+ offset, len,
+ DMA_FROM_DEVICE);
+ }
+ if (dma->ref)
return;
- }
virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
@@ -775,8 +775,8 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
return;
u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.bytes += bytes;
- sq->stats.packets += packets;
+ u64_stats_add(&sq->stats.bytes, bytes);
+ u64_stats_add(&sq->stats.packets, packets);
u64_stats_update_end(&sq->stats.syncp);
}
@@ -975,11 +975,11 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
out:
u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.bytes += bytes;
- sq->stats.packets += packets;
- sq->stats.xdp_tx += n;
- sq->stats.xdp_tx_drops += n - nxmit;
- sq->stats.kicks += kicks;
+ u64_stats_add(&sq->stats.bytes, bytes);
+ u64_stats_add(&sq->stats.packets, packets);
+ u64_stats_add(&sq->stats.xdp_tx, n);
+ u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
+ u64_stats_add(&sq->stats.kicks, kicks);
u64_stats_update_end(&sq->stats.syncp);
virtnet_xdp_put_sq(vi, sq);
@@ -1011,14 +1011,14 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
- stats->xdp_packets++;
+ u64_stats_inc(&stats->xdp_packets);
switch (act) {
case XDP_PASS:
return act;
case XDP_TX:
- stats->xdp_tx++;
+ u64_stats_inc(&stats->xdp_tx);
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf)) {
netdev_dbg(dev, "convert buff to frame failed for xdp\n");
@@ -1036,7 +1036,7 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
return act;
case XDP_REDIRECT:
- stats->xdp_redirects++;
+ u64_stats_inc(&stats->xdp_redirects);
err = xdp_do_redirect(dev, xdp, xdp_prog);
if (err)
return XDP_DROP;
@@ -1232,9 +1232,9 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
return skb;
err_xdp:
- stats->xdp_drops++;
+ u64_stats_inc(&stats->xdp_drops);
err:
- stats->drops++;
+ u64_stats_inc(&stats->drops);
put_page(page);
xdp_xmit:
return NULL;
@@ -1253,12 +1253,12 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct sk_buff *skb;
len -= vi->hdr_len;
- stats->bytes += len;
+ u64_stats_add(&stats->bytes, len);
if (unlikely(len > GOOD_PACKET_LEN)) {
pr_debug("%s: rx error: len %u exceeds max size %d\n",
dev->name, len, GOOD_PACKET_LEN);
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
goto err;
}
@@ -1282,7 +1282,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
return skb;
err:
- stats->drops++;
+ u64_stats_inc(&stats->drops);
put_page(page);
return NULL;
}
@@ -1298,14 +1298,14 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct sk_buff *skb =
page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
- stats->bytes += len - vi->hdr_len;
+ u64_stats_add(&stats->bytes, len - vi->hdr_len);
if (unlikely(!skb))
goto err;
return skb;
err:
- stats->drops++;
+ u64_stats_inc(&stats->drops);
give_pages(rq, page);
return NULL;
}
@@ -1323,10 +1323,10 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
dev->name, num_buf);
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
break;
}
- stats->bytes += len;
+ u64_stats_add(&stats->bytes, len);
page = virt_to_head_page(buf);
put_page(page);
}
@@ -1432,11 +1432,11 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, *num_buf,
virtio16_to_cpu(vi->vdev, hdr->num_buffers));
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
goto err;
}
- stats->bytes += len;
+ u64_stats_add(&stats->bytes, len);
page = virt_to_head_page(buf);
offset = buf - page_address(page);
@@ -1451,7 +1451,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
put_page(page);
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
dev->name, len, (unsigned long)(truesize - room));
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
goto err;
}
@@ -1600,8 +1600,8 @@ err_xdp:
put_page(page);
mergeable_buf_free(rq, num_buf, dev, stats);
- stats->xdp_drops++;
- stats->drops++;
+ u64_stats_inc(&stats->xdp_drops);
+ u64_stats_inc(&stats->drops);
return NULL;
}
@@ -1625,12 +1625,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
head_skb = NULL;
- stats->bytes += len - vi->hdr_len;
+ u64_stats_add(&stats->bytes, len - vi->hdr_len);
if (unlikely(len > truesize - room)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
dev->name, len, (unsigned long)(truesize - room));
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
goto err_skb;
}
@@ -1662,11 +1662,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
dev->name, num_buf,
virtio16_to_cpu(vi->vdev,
hdr->num_buffers));
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
goto err_buf;
}
- stats->bytes += len;
+ u64_stats_add(&stats->bytes, len);
page = virt_to_head_page(buf);
truesize = mergeable_ctx_to_truesize(ctx);
@@ -1676,7 +1676,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(len > truesize - room)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
dev->name, len, (unsigned long)(truesize - room));
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
goto err_skb;
}
@@ -1718,7 +1718,7 @@ err_skb:
mergeable_buf_free(rq, num_buf, dev, stats);
err_buf:
- stats->drops++;
+ u64_stats_inc(&stats->drops);
dev_kfree_skb(head_skb);
return NULL;
}
@@ -1763,7 +1763,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
- dev->stats.rx_length_errors++;
+ DEV_STATS_INC(dev, rx_length_errors);
virtnet_rq_free_unused_buf(rq->vq, buf);
return;
}
@@ -1803,7 +1803,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
return;
frame_err:
- dev->stats.rx_frame_errors++;
+ DEV_STATS_INC(dev, rx_frame_errors);
dev_kfree_skb(skb);
}
@@ -1985,7 +1985,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
unsigned long flags;
flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
- rq->stats.kicks++;
+ u64_stats_inc(&rq->stats.kicks);
u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
}
@@ -2065,22 +2065,23 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
struct virtnet_info *vi = rq->vq->vdev->priv;
struct virtnet_rq_stats stats = {};
unsigned int len;
+ int packets = 0;
void *buf;
int i;
if (!vi->big_packets || vi->mergeable_rx_bufs) {
void *ctx;
- while (stats.packets < budget &&
+ while (packets < budget &&
(buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
- stats.packets++;
+ packets++;
}
} else {
- while (stats.packets < budget &&
+ while (packets < budget &&
(buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
- stats.packets++;
+ packets++;
}
}
@@ -2093,17 +2094,19 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
}
}
+ u64_stats_set(&stats.packets, packets);
u64_stats_update_begin(&rq->stats.syncp);
for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
size_t offset = virtnet_rq_stats_desc[i].offset;
- u64 *item;
+ u64_stats_t *item, *src;
- item = (u64 *)((u8 *)&rq->stats + offset);
- *item += *(u64 *)((u8 *)&stats + offset);
+ item = (u64_stats_t *)((u8 *)&rq->stats + offset);
+ src = (u64_stats_t *)((u8 *)&stats + offset);
+ u64_stats_add(item, u64_stats_read(src));
}
u64_stats_update_end(&rq->stats.syncp);
- return stats.packets;
+ return packets;
}
static void virtnet_poll_cleantx(struct receive_queue *rq)
@@ -2158,7 +2161,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
sq = virtnet_xdp_get_sq(vi);
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.kicks++;
+ u64_stats_inc(&sq->stats.kicks);
u64_stats_update_end(&sq->stats.syncp);
}
virtnet_xdp_put_sq(vi, sq);
@@ -2349,12 +2352,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/* This should not happen! */
if (unlikely(err)) {
- dev->stats.tx_fifo_errors++;
+ DEV_STATS_INC(dev, tx_fifo_errors);
if (net_ratelimit())
dev_warn(&dev->dev,
"Unexpected TXQ (%d) queue failure: %d\n",
qnum, err);
- dev->stats.tx_dropped++;
+ DEV_STATS_INC(dev, tx_dropped);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2370,7 +2373,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
if (kick || netif_xmit_stopped(txq)) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.kicks++;
+ u64_stats_inc(&sq->stats.kicks);
u64_stats_update_end(&sq->stats.syncp);
}
}
@@ -2553,16 +2556,16 @@ static void virtnet_stats(struct net_device *dev,
do {
start = u64_stats_fetch_begin(&sq->stats.syncp);
- tpackets = sq->stats.packets;
- tbytes = sq->stats.bytes;
- terrors = sq->stats.tx_timeouts;
+ tpackets = u64_stats_read(&sq->stats.packets);
+ tbytes = u64_stats_read(&sq->stats.bytes);
+ terrors = u64_stats_read(&sq->stats.tx_timeouts);
} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
do {
start = u64_stats_fetch_begin(&rq->stats.syncp);
- rpackets = rq->stats.packets;
- rbytes = rq->stats.bytes;
- rdrops = rq->stats.drops;
+ rpackets = u64_stats_read(&rq->stats.packets);
+ rbytes = u64_stats_read(&rq->stats.bytes);
+ rdrops = u64_stats_read(&rq->stats.drops);
} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
tot->rx_packets += rpackets;
@@ -2573,10 +2576,10 @@ static void virtnet_stats(struct net_device *dev,
tot->tx_errors += terrors;
}
- tot->tx_dropped = dev->stats.tx_dropped;
- tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
- tot->rx_length_errors = dev->stats.rx_length_errors;
- tot->rx_frame_errors = dev->stats.rx_frame_errors;
+ tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
+ tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
+ tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
+ tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
}
static void virtnet_ack_link_announce(struct virtnet_info *vi)
@@ -2855,6 +2858,9 @@ static void virtnet_get_ringparam(struct net_device *dev,
ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
}
+static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
+ u16 vqn, u32 max_usecs, u32 max_packets);
+
static int virtnet_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -2890,12 +2896,36 @@ static int virtnet_set_ringparam(struct net_device *dev,
err = virtnet_tx_resize(vi, sq, ring->tx_pending);
if (err)
return err;
+
+ /* Upon disabling and re-enabling a transmit virtqueue, the device must
+ * set the coalescing parameters of the virtqueue to those configured
+ * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
+ * did not set any TX coalescing parameters, to 0.
+ */
+ err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i),
+ vi->intr_coal_tx.max_usecs,
+ vi->intr_coal_tx.max_packets);
+ if (err)
+ return err;
+
+ vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs;
+ vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets;
}
if (ring->rx_pending != rx_pending) {
err = virtnet_rx_resize(vi, rq, ring->rx_pending);
if (err)
return err;
+
+ /* The reason is same as the transmit virtqueue reset */
+ err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i),
+ vi->intr_coal_rx.max_usecs,
+ vi->intr_coal_rx.max_packets);
+ if (err)
+ return err;
+
+ vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs;
+ vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets;
}
}
@@ -3164,17 +3194,19 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
unsigned int idx = 0, start, i, j;
const u8 *stats_base;
+ const u64_stats_t *p;
size_t offset;
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
- stats_base = (u8 *)&rq->stats;
+ stats_base = (const u8 *)&rq->stats;
do {
start = u64_stats_fetch_begin(&rq->stats.syncp);
for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
offset = virtnet_rq_stats_desc[j].offset;
- data[idx + j] = *(u64 *)(stats_base + offset);
+ p = (const u64_stats_t *)(stats_base + offset);
+ data[idx + j] = u64_stats_read(p);
}
} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
idx += VIRTNET_RQ_STATS_LEN;
@@ -3183,12 +3215,13 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct send_queue *sq = &vi->sq[i];
- stats_base = (u8 *)&sq->stats;
+ stats_base = (const u8 *)&sq->stats;
do {
start = u64_stats_fetch_begin(&sq->stats.syncp);
for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
offset = virtnet_sq_stats_desc[j].offset;
- data[idx + j] = *(u64 *)(stats_base + offset);
+ p = (const u64_stats_t *)(stats_base + offset);
+ data[idx + j] = u64_stats_read(p);
}
} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
idx += VIRTNET_SQ_STATS_LEN;
@@ -3233,6 +3266,7 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
struct scatterlist sgs_tx, sgs_rx;
+ int i;
vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
@@ -3246,6 +3280,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
/* Save parameters */
vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
+ vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
+ }
vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
@@ -3259,6 +3297,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
/* Save parameters */
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
+ vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+ }
return 0;
}
@@ -3287,27 +3329,23 @@ static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
{
int err;
- if (ec->rx_coalesce_usecs || ec->rx_max_coalesced_frames) {
- err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
- ec->rx_coalesce_usecs,
- ec->rx_max_coalesced_frames);
- if (err)
- return err;
- /* Save parameters */
- vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
- vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
- }
+ err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
+ ec->rx_coalesce_usecs,
+ ec->rx_max_coalesced_frames);
+ if (err)
+ return err;
- if (ec->tx_coalesce_usecs || ec->tx_max_coalesced_frames) {
- err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
- ec->tx_coalesce_usecs,
- ec->tx_max_coalesced_frames);
- if (err)
- return err;
- /* Save parameters */
- vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
- vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
- }
+ vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
+ vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+
+ err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
+ ec->tx_coalesce_usecs,
+ ec->tx_max_coalesced_frames);
+ if (err)
+ return err;
+
+ vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
+ vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
return 0;
}
@@ -3315,7 +3353,7 @@ static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
{
/* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
- * feature is negotiated.
+ * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated.
*/
if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
return -EOPNOTSUPP;
@@ -3453,7 +3491,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
} else {
ec->rx_max_coalesced_frames = 1;
- if (vi->sq[0].napi.weight)
+ if (vi->sq[queue].napi.weight)
ec->tx_max_coalesced_frames = 1;
}
@@ -3866,7 +3904,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.tx_timeouts++;
+ u64_stats_inc(&sq->stats.tx_timeouts);
u64_stats_update_end(&sq->stats.syncp);
netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
@@ -4442,13 +4480,6 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
}
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
- vi->intr_coal_rx.max_usecs = 0;
- vi->intr_coal_tx.max_usecs = 0;
- vi->intr_coal_tx.max_packets = 0;
- vi->intr_coal_rx.max_packets = 0;
- }
-
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
vi->has_rss_hash_report = true;
@@ -4523,6 +4554,27 @@ static int virtnet_probe(struct virtio_device *vdev)
if (err)
goto free;
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ vi->intr_coal_rx.max_usecs = 0;
+ vi->intr_coal_tx.max_usecs = 0;
+ vi->intr_coal_rx.max_packets = 0;
+
+ /* Keep the default values of the coalescing parameters
+ * aligned with the default napi_tx state.
+ */
+ if (vi->sq[0].napi.weight)
+ vi->intr_coal_tx.max_packets = 1;
+ else
+ vi->intr_coal_tx.max_packets = 0;
+ }
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
+ /* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ if (vi->sq[i].napi.weight)
+ vi->sq[i].intr_coal.max_packets = 1;
+ }
+
#ifdef CONFIG_SYSFS
if (vi->mergeable_rx_bufs)
dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;