summaryrefslogtreecommitdiff
path: root/drivers/net/xen-netback
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2015-09-08 16:25:14 +0300
committerDavid S. Miller <davem@davemloft.net>2015-09-09 22:34:35 +0300
commit1d5d48523900a4b0f25d6b52f1a93c84bd671186 (patch)
treec95e0fefd5685977313ff4c0794d22906a163253 /drivers/net/xen-netback
parent9b57ab8b58571ee57030210b74e2aabd1a18cff2 (diff)
downloadlinux-1d5d48523900a4b0f25d6b52f1a93c84bd671186.tar.xz
xen-netback: require fewer guest Rx slots when not using GSO
Commit f48da8b14d04ca87ffcffe68829afd45f926ec6a (xen-netback: fix unlimited guest Rx internal queue and carrier flapping) introduced a regression. The PV frontend in IPXE only places 4 requests on the guest Rx ring. Since netback required at least (MAX_SKB_FRAGS + 1) slots, IPXE could not receive any packets. a) If GSO is not enabled on the VIF, fewer guest Rx slots are required for the largest possible packet. Calculate the required slots based on the maximum GSO size or the MTU. This calculation of the number of required slots relies on 1650d5455bd2 (xen-netback: always fully coalesce guest Rx packets) which present in 4.0-rc1 and later. b) Reduce the Rx stall detection to checking for at least one available Rx request. This is fine since we're predominately concerned with detecting interfaces which are down and thus have zero available Rx requests. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Reviewed-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r--drivers/net/xen-netback/common.h10
-rw-r--r--drivers/net/xen-netback/netback.c23
2 files changed, 16 insertions, 17 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 6dc76c1e807b..a7bf74727116 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -200,11 +200,6 @@ struct xenvif_queue { /* Per-queue data for xenvif */
struct xenvif_stats stats;
};
-/* Maximum number of Rx slots a to-guest packet may use, including the
- * slot needed for GSO meta-data.
- */
-#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
-
enum state_bit_shift {
/* This bit marks that the vif is connected */
VIF_STATUS_CONNECTED,
@@ -317,11 +312,6 @@ int xenvif_dealloc_kthread(void *data);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
-/* Determine whether the needed number of slots (req) are available,
- * and set req_event if not.
- */
-bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
-
void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 42569b994ea8..b588b1a08cd4 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -149,9 +149,20 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1);
}
-bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
+static int xenvif_rx_ring_slots_needed(struct xenvif *vif)
+{
+ if (vif->gso_mask)
+ return DIV_ROUND_UP(vif->dev->gso_max_size, PAGE_SIZE) + 1;
+ else
+ return DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+}
+
+static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
{
RING_IDX prod, cons;
+ int needed;
+
+ needed = xenvif_rx_ring_slots_needed(queue->vif);
do {
prod = queue->rx.sring->req_prod;
@@ -513,7 +524,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
skb_queue_head_init(&rxq);
- while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
+ while (xenvif_rx_ring_slots_available(queue)
&& (skb = xenvif_rx_dequeue(queue)) != NULL) {
queue->last_rx_time = jiffies;
@@ -1938,8 +1949,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
- return !queue->stalled
- && prod - cons < XEN_NETBK_RX_SLOTS_MAX
+ return !queue->stalled && prod - cons < 1
&& time_after(jiffies,
queue->last_rx_time + queue->vif->stall_timeout);
}
@@ -1951,14 +1961,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
- return queue->stalled
- && prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
+ return queue->stalled && prod - cons >= 1;
}
static bool xenvif_have_rx_work(struct xenvif_queue *queue)
{
return (!skb_queue_empty(&queue->rx_queue)
- && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
+ && xenvif_rx_ring_slots_available(queue))
|| (queue->vif->stall_timeout &&
(xenvif_rx_queue_stalled(queue)
|| xenvif_rx_queue_ready(queue)))