summaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-03-31 16:56:43 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-31 16:56:43 -0400
commit0b70195e0c3206103be991e196c26fcf168d0334 (patch)
tree61902c09bfa14034a82c151f46a4616a2f013653 /drivers/net/xen-netback
parentce22bb6122e849e0d3a6857d03eb69a25bfdc6e9 (diff)
parent17e84a9253467552fb06f99c009bb0bc1d7bfd39 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/xen-netback/netback.c A bug fix overlapped with changing how the netback SKB control block is implemented. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r--drivers/net/xen-netback/netback.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index cb784fe5220..ae34f5fc7fb 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -191,8 +191,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* into multiple copies tend to give large frags their
* own buffers as before.
*/
- if ((offset + size > MAX_BUFFER_OFFSET) &&
- (size <= MAX_BUFFER_OFFSET) && offset && !head)
+ BUG_ON(size > MAX_BUFFER_OFFSET);
+ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
return true;
return false;
@@ -511,6 +511,8 @@ static void xenvif_rx_action(struct xenvif *vif)
while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
RING_IDX max_slots_needed;
+ RING_IDX old_req_cons;
+ RING_IDX ring_slots_used;
int i;
/* We need a cheap worse case estimate for the number of
@@ -522,9 +524,28 @@ static void xenvif_rx_action(struct xenvif *vif)
PAGE_SIZE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned int size;
+ unsigned int offset;
+
size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
+ offset = skb_shinfo(skb)->frags[i].page_offset;
+
+ /* For a worse-case estimate we need to factor in
+ * the fragment page offset as this will affect the
+ * number of times xenvif_gop_frag_copy() will
+ * call start_new_rx_buffer().
+ */
+ max_slots_needed += DIV_ROUND_UP(offset + size,
+ PAGE_SIZE);
}
+
+ /* To avoid the estimate becoming too pessimal for some
+ * frontends that limit posted rx requests, cap the estimate
+ * at MAX_SKB_FRAGS.
+ */
+ if (max_slots_needed > MAX_SKB_FRAGS)
+ max_slots_needed = MAX_SKB_FRAGS;
+
+ /* We may need one more slot for GSO metadata */
if (skb_is_gso(skb) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
@@ -539,8 +560,11 @@ static void xenvif_rx_action(struct xenvif *vif)
} else
vif->rx_last_skb_slots = 0;
+ old_req_cons = vif->rx.req_cons;
XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
- BUG_ON(XENVIF_RX_CB(skb)->meta_slots_used > max_slots_needed);
+ ring_slots_used = vif->rx.req_cons - old_req_cons;
+
+ BUG_ON(ring_slots_used > max_slots_needed);
__skb_queue_tail(&rxq, skb);
}