From 44cc8ed17e203479827ef6b6d37404d2665da6fb Mon Sep 17 00:00:00 2001 From: Zoltan Kiss Date: Tue, 28 Oct 2014 15:29:31 +0000 Subject: xen-netback: Remove __GFP_COLD This flag is unnecessary, it came from some old code. Suggested-by: Eric Dumazet Signed-off-by: Zoltan Kiss Signed-off-by: David Vrabel Acked-by: Wei Liu Signed-off-by: David S. Miller --- drivers/net/xen-netback/netback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/xen-netback/netback.c') diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 08f65996534..85fec0fb4ec 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1494,7 +1494,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s unsigned int len; BUG_ON(i >= MAX_SKB_FRAGS); - page = alloc_page(GFP_ATOMIC|__GFP_COLD); + page = alloc_page(GFP_ATOMIC); if (!page) { int j; skb->truesize += skb->data_len; -- cgit v1.2.3-70-g09d2 From 7e5d7753956b374516530e156c5e8aa19652398d Mon Sep 17 00:00:00 2001 From: Malcolm Crossley Date: Wed, 5 Nov 2014 10:50:22 +0000 Subject: xen-netback: remove unconditional __pskb_pull_tail() in guest Tx path Unconditionally pulling 128 bytes into the linear area is not required for: - security: Every protocol demux starts with pskb_may_pull() to pull frag data into the linear area, if necessary, before looking at headers. - performance: Netback has already grant copied up-to 128 bytes from the first slot of a packet into the linear area. The first slot normally contain all the IPv4/IPv6 and TCP/UDP headers. The unconditional pull would often copy frag data unnecessarily. This is a performance problem when running on a version of Xen where grant unmap avoids TLB flushes for pages which are not accessed. TLB flushes can now be avoided for > 99% of unmaps (it was 0% before). Grant unmap TLB flush avoidance will be available in a future version of Xen (probably 4.6). Signed-off-by: Malcolm Crossley Signed-off-by: David Vrabel Acked-by: Ian Campbell Signed-off-by: David S. Miller --- drivers/net/xen-netback/netback.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) (limited to 'drivers/net/xen-netback/netback.c') diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 45755f9aa3f..4a509f715fe 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -82,6 +82,16 @@ MODULE_PARM_DESC(max_queues, static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; module_param(fatal_skb_slots, uint, 0444); +/* The amount to copy out of the first guest Tx slot into the skb's + * linear area. If the first slot has more data, it will be mapped + * and put into the first frag. + * + * This is sized to avoid pulling headers from the frags for most + * TCP/IP packets. + */ +#define XEN_NETBACK_TX_COPY_LEN 128 + + static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, u8 status); @@ -125,13 +135,6 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) pending_tx_info[0]); } -/* This is a miniumum size for the linear area to avoid lots of - * calls to __pskb_pull_tail() as we set up checksum offsets. The - * value 128 was chosen as it covers all IPv4 and most likely - * IPv6 headers. - */ -#define PKT_PROT_LEN 128 - static u16 frag_get_pending_idx(skb_frag_t *frag) { return (u16)frag->page_offset; @@ -1446,9 +1449,9 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, index = pending_index(queue->pending_cons); pending_idx = queue->pending_ring[index]; - data_len = (txreq.size > PKT_PROT_LEN && + data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN && ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? - PKT_PROT_LEN : txreq.size; + XEN_NETBACK_TX_COPY_LEN : txreq.size; skb = xenvif_alloc_skb(data_len); if (unlikely(skb == NULL)) { @@ -1653,11 +1656,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) } } - if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) { - int target = min_t(int, skb->len, PKT_PROT_LEN); - __pskb_pull_tail(skb, target - skb_headlen(skb)); - } - skb->dev = queue->vif->dev; skb->protocol = eth_type_trans(skb, skb->dev); skb_reset_network_header(skb); -- cgit v1.2.3-70-g09d2 From 26c0e102585d5a4d311f5d6eb7f524d288e7f6b7 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Thu, 18 Dec 2014 11:13:06 +0000 Subject: xen-netback: support frontends without feature-rx-notify again Commit bc96f648df1bbc2729abbb84513cf4f64273a1f1 (xen-netback: make feature-rx-notify mandatory) incorrectly assumed that there were no frontends in use that did not support this feature. But the frontend driver in MiniOS does not and since this is used by (qemu) stubdoms, these stopped working. Netback sort of works as-is in this mode except: - If there are no Rx requests and the internal Rx queue fills, only the drain timeout will wake the thread. The default drain timeout of 10 s would give unacceptable pauses. - If an Rx stall was detected and the internal Rx queue is drained, then the Rx thread would never wake. Handle these two cases (when feature-rx-notify is disabled) by: - Reducing the drain timeout to 30 ms. - Disabling Rx stall detection. Reported-by: John Tested-by: John Signed-off-by: David Vrabel Reviewed-by: Wei Liu Signed-off-by: David S. Miller --- drivers/net/xen-netback/common.h | 4 +++- drivers/net/xen-netback/interface.c | 4 +++- drivers/net/xen-netback/netback.c | 27 ++++++++++++++------------- drivers/net/xen-netback/xenbus.c | 12 +++++++++--- 4 files changed, 29 insertions(+), 18 deletions(-) (limited to 'drivers/net/xen-netback/netback.c') diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 083ecc93fe5..5f1fda44882 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -230,6 +230,8 @@ struct xenvif { */ bool disabled; unsigned long status; + unsigned long drain_timeout; + unsigned long stall_timeout; /* Queues */ struct xenvif_queue *queues; @@ -328,7 +330,7 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id); extern bool separate_tx_rx_irq; extern unsigned int rx_drain_timeout_msecs; -extern unsigned int rx_drain_timeout_jiffies; +extern unsigned int rx_stall_timeout_msecs; extern unsigned int xenvif_max_queues; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index a6a32d337bb..9259a732e8a 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -166,7 +166,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; cb = XENVIF_RX_CB(skb); - cb->expires = jiffies + rx_drain_timeout_jiffies; + cb->expires = jiffies + vif->drain_timeout; xenvif_rx_queue_tail(queue, skb); xenvif_kick_thread(queue); @@ -414,6 +414,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, vif->ip_csum = 1; vif->dev = dev; vif->disabled = false; + vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs); + vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs); /* Start out with no queues. */ vif->queues = NULL; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 4a509f715fe..908e65e9b82 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -60,14 +60,12 @@ module_param(separate_tx_rx_irq, bool, 0644); */ unsigned int rx_drain_timeout_msecs = 10000; module_param(rx_drain_timeout_msecs, uint, 0444); -unsigned int rx_drain_timeout_jiffies; /* The length of time before the frontend is considered unresponsive * because it isn't providing Rx slots. */ -static unsigned int rx_stall_timeout_msecs = 60000; +unsigned int rx_stall_timeout_msecs = 60000; module_param(rx_stall_timeout_msecs, uint, 0444); -static unsigned int rx_stall_timeout_jiffies; unsigned int xenvif_max_queues; module_param_named(max_queues, xenvif_max_queues, uint, 0644); @@ -2020,7 +2018,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) return !queue->stalled && prod - cons < XEN_NETBK_RX_SLOTS_MAX && time_after(jiffies, - queue->last_rx_time + rx_stall_timeout_jiffies); + queue->last_rx_time + queue->vif->stall_timeout); } static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) @@ -2038,8 +2036,9 @@ static bool xenvif_have_rx_work(struct xenvif_queue *queue) { return (!skb_queue_empty(&queue->rx_queue) && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)) - || xenvif_rx_queue_stalled(queue) - || xenvif_rx_queue_ready(queue) + || (queue->vif->stall_timeout && + (xenvif_rx_queue_stalled(queue) + || xenvif_rx_queue_ready(queue))) || kthread_should_stop() || queue->vif->disabled; } @@ -2092,6 +2091,9 @@ int xenvif_kthread_guest_rx(void *data) struct xenvif_queue *queue = data; struct xenvif *vif = queue->vif; + if (!vif->stall_timeout) + xenvif_queue_carrier_on(queue); + for (;;) { xenvif_wait_for_rx_work(queue); @@ -2118,10 +2120,12 @@ int xenvif_kthread_guest_rx(void *data) * while it's probably not responsive, drop the * carrier so packets are dropped earlier. */ - if (xenvif_rx_queue_stalled(queue)) - xenvif_queue_carrier_off(queue); - else if (xenvif_rx_queue_ready(queue)) - xenvif_queue_carrier_on(queue); + if (vif->stall_timeout) { + if (xenvif_rx_queue_stalled(queue)) + xenvif_queue_carrier_off(queue); + else if (xenvif_rx_queue_ready(queue)) + xenvif_queue_carrier_on(queue); + } /* Queued packets may have foreign pages from other * domains. These cannot be queued indefinitely as @@ -2192,9 +2196,6 @@ static int __init netback_init(void) if (rc) goto failed_init; - rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs); - rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs); - #ifdef CONFIG_DEBUG_FS xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); if (IS_ERR_OR_NULL(xen_netback_dbg_root)) diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index d44cd19169b..efbaf2ae199 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -887,9 +887,15 @@ static int read_xenbus_vif_flags(struct backend_info *be) return -EOPNOTSUPP; if (xenbus_scanf(XBT_NIL, dev->otherend, - "feature-rx-notify", "%d", &val) < 0 || val == 0) { - xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory"); - return -EINVAL; + "feature-rx-notify", "%d", &val) < 0) + val = 0; + if (!val) { + /* - Reduce drain timeout to poll more frequently for + * Rx requests. + * - Disable Rx stall detection. + */ + be->vif->drain_timeout = msecs_to_jiffies(30); + be->vif->stall_timeout = 0; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", -- cgit v1.2.3-70-g09d2