From 076bb0c82a44fbe46fe2c8527a5b5b64b69f679d Mon Sep 17 00:00:00 2001
From: Eliezer Tamir <eliezer.tamir@linux.intel.com>
Date: Wed, 10 Jul 2013 17:13:17 +0300
Subject: net: rename include/net/ll_poll.h to include/net/busy_poll.h

Rename the file and correct all the places where it is included.

Signed-off-by: Eliezer Tamir <eliezer.tamir@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/net/busy_poll.h | 183 ++++++++++++++++++++++++++++++++++++++++++++++++
 include/net/ll_poll.h   | 183 ------------------------------------------------
 2 files changed, 183 insertions(+), 183 deletions(-)
 create mode 100644 include/net/busy_poll.h
 delete mode 100644 include/net/ll_poll.h

(limited to 'include')

diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
new file mode 100644
index 00000000000..76f03408774
--- /dev/null
+++ b/include/net/busy_poll.h
@@ -0,0 +1,183 @@
+/*
+ * Low Latency Sockets
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Author: Eliezer Tamir
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ */
+
+#ifndef _LINUX_NET_LL_POLL_H
+#define _LINUX_NET_LL_POLL_H
+
+#include <linux/netdevice.h>
+#include <net/ip.h>
+
+#ifdef CONFIG_NET_LL_RX_POLL
+
+struct napi_struct;
+extern unsigned int sysctl_net_ll_read __read_mostly;
+extern unsigned int sysctl_net_ll_poll __read_mostly;
+
+/* return values from ndo_ll_poll */
+#define LL_FLUSH_FAILED		-1
+#define LL_FLUSH_BUSY		-2
+
+static inline bool net_busy_loop_on(void)
+{
+	return sysctl_net_ll_poll;
+}
+
+/* a wrapper to make debug_smp_processor_id() happy
+ * we can use sched_clock() because we don't care much about precision
+ * we only care that the average is bounded
+ */
+#ifdef CONFIG_DEBUG_PREEMPT
+static inline u64 busy_loop_us_clock(void)
+{
+	u64 rc;
+
+	preempt_disable_notrace();
+	rc = sched_clock();
+	preempt_enable_no_resched_notrace();
+
+	return rc >> 10;
+}
+#else /* CONFIG_DEBUG_PREEMPT */
+static inline u64 busy_loop_us_clock(void)
+{
+	return sched_clock() >> 10;
+}
+#endif /* CONFIG_DEBUG_PREEMPT */
+
+static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
+{
+	return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
+}
+
+/* in poll/select we use the global sysctl_net_ll_poll value */
+static inline unsigned long busy_loop_end_time(void)
+{
+	return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_ll_poll);
+}
+
+static inline bool sk_can_busy_loop(struct sock *sk)
+{
+	return sk->sk_ll_usec && sk->sk_napi_id &&
+	       !need_resched() && !signal_pending(current);
+}
+
+
+static inline bool busy_loop_timeout(unsigned long end_time)
+{
+	unsigned long now = busy_loop_us_clock();
+
+	return time_after(now, end_time);
+}
+
+/* when used in sock_poll() nonblock is known at compile time to be true
+ * so the loop and end_time will be optimized out
+ */
+static inline bool sk_busy_loop(struct sock *sk, int nonblock)
+{
+	unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
+	const struct net_device_ops *ops;
+	struct napi_struct *napi;
+	int rc = false;
+
+	/*
+	 * rcu read lock for napi hash
+	 * bh so we don't race with net_rx_action
+	 */
+	rcu_read_lock_bh();
+
+	napi = napi_by_id(sk->sk_napi_id);
+	if (!napi)
+		goto out;
+
+	ops = napi->dev->netdev_ops;
+	if (!ops->ndo_ll_poll)
+		goto out;
+
+	do {
+		rc = ops->ndo_ll_poll(napi);
+
+		if (rc == LL_FLUSH_FAILED)
+			break; /* permanent failure */
+
+		if (rc > 0)
+			/* local bh are disabled so it is ok to use _BH */
+			NET_ADD_STATS_BH(sock_net(sk),
+					 LINUX_MIB_LOWLATENCYRXPACKETS, rc);
+
+	} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
+		 !need_resched() && !busy_loop_timeout(end_time));
+
+	rc = !skb_queue_empty(&sk->sk_receive_queue);
+out:
+	rcu_read_unlock_bh();
+	return rc;
+}
+
+/* used in the NIC receive handler to mark the skb */
+static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
+{
+	skb->napi_id = napi->napi_id;
+}
+
+/* used in the protocol hanlder to propagate the napi_id to the socket */
+static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
+{
+	sk->sk_napi_id = skb->napi_id;
+}
+
+#else /* CONFIG_NET_LL_RX_POLL */
+static inline unsigned long net_busy_loop_on(void)
+{
+	return 0;
+}
+
+static inline unsigned long busy_loop_end_time(void)
+{
+	return 0;
+}
+
+static inline bool sk_can_busy_loop(struct sock *sk)
+{
+	return false;
+}
+
+static inline bool sk_busy_poll(struct sock *sk, int nonblock)
+{
+	return false;
+}
+
+static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
+{
+}
+
+static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
+{
+}
+
+static inline bool busy_loop_timeout(unsigned long end_time)
+{
+	return true;
+}
+
+#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* _LINUX_NET_LL_POLL_H */
diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h
deleted file mode 100644
index 76f03408774..00000000000
--- a/include/net/ll_poll.h
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Low Latency Sockets
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Author: Eliezer Tamir
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- */
-
-#ifndef _LINUX_NET_LL_POLL_H
-#define _LINUX_NET_LL_POLL_H
-
-#include <linux/netdevice.h>
-#include <net/ip.h>
-
-#ifdef CONFIG_NET_LL_RX_POLL
-
-struct napi_struct;
-extern unsigned int sysctl_net_ll_read __read_mostly;
-extern unsigned int sysctl_net_ll_poll __read_mostly;
-
-/* return values from ndo_ll_poll */
-#define LL_FLUSH_FAILED		-1
-#define LL_FLUSH_BUSY		-2
-
-static inline bool net_busy_loop_on(void)
-{
-	return sysctl_net_ll_poll;
-}
-
-/* a wrapper to make debug_smp_processor_id() happy
- * we can use sched_clock() because we don't care much about precision
- * we only care that the average is bounded
- */
-#ifdef CONFIG_DEBUG_PREEMPT
-static inline u64 busy_loop_us_clock(void)
-{
-	u64 rc;
-
-	preempt_disable_notrace();
-	rc = sched_clock();
-	preempt_enable_no_resched_notrace();
-
-	return rc >> 10;
-}
-#else /* CONFIG_DEBUG_PREEMPT */
-static inline u64 busy_loop_us_clock(void)
-{
-	return sched_clock() >> 10;
-}
-#endif /* CONFIG_DEBUG_PREEMPT */
-
-static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
-{
-	return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
-}
-
-/* in poll/select we use the global sysctl_net_ll_poll value */
-static inline unsigned long busy_loop_end_time(void)
-{
-	return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_ll_poll);
-}
-
-static inline bool sk_can_busy_loop(struct sock *sk)
-{
-	return sk->sk_ll_usec && sk->sk_napi_id &&
-	       !need_resched() && !signal_pending(current);
-}
-
-
-static inline bool busy_loop_timeout(unsigned long end_time)
-{
-	unsigned long now = busy_loop_us_clock();
-
-	return time_after(now, end_time);
-}
-
-/* when used in sock_poll() nonblock is known at compile time to be true
- * so the loop and end_time will be optimized out
- */
-static inline bool sk_busy_loop(struct sock *sk, int nonblock)
-{
-	unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
-	const struct net_device_ops *ops;
-	struct napi_struct *napi;
-	int rc = false;
-
-	/*
-	 * rcu read lock for napi hash
-	 * bh so we don't race with net_rx_action
-	 */
-	rcu_read_lock_bh();
-
-	napi = napi_by_id(sk->sk_napi_id);
-	if (!napi)
-		goto out;
-
-	ops = napi->dev->netdev_ops;
-	if (!ops->ndo_ll_poll)
-		goto out;
-
-	do {
-		rc = ops->ndo_ll_poll(napi);
-
-		if (rc == LL_FLUSH_FAILED)
-			break; /* permanent failure */
-
-		if (rc > 0)
-			/* local bh are disabled so it is ok to use _BH */
-			NET_ADD_STATS_BH(sock_net(sk),
-					 LINUX_MIB_LOWLATENCYRXPACKETS, rc);
-
-	} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
-		 !need_resched() && !busy_loop_timeout(end_time));
-
-	rc = !skb_queue_empty(&sk->sk_receive_queue);
-out:
-	rcu_read_unlock_bh();
-	return rc;
-}
-
-/* used in the NIC receive handler to mark the skb */
-static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
-{
-	skb->napi_id = napi->napi_id;
-}
-
-/* used in the protocol hanlder to propagate the napi_id to the socket */
-static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
-{
-	sk->sk_napi_id = skb->napi_id;
-}
-
-#else /* CONFIG_NET_LL_RX_POLL */
-static inline unsigned long net_busy_loop_on(void)
-{
-	return 0;
-}
-
-static inline unsigned long busy_loop_end_time(void)
-{
-	return 0;
-}
-
-static inline bool sk_can_busy_loop(struct sock *sk)
-{
-	return false;
-}
-
-static inline bool sk_busy_poll(struct sock *sk, int nonblock)
-{
-	return false;
-}
-
-static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
-{
-}
-
-static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
-{
-}
-
-static inline bool busy_loop_timeout(unsigned long end_time)
-{
-	return true;
-}
-
-#endif /* CONFIG_NET_LL_RX_POLL */
-#endif /* _LINUX_NET_LL_POLL_H */
-- 
cgit v1.2.3-70-g09d2


From 8b80cda536ea9bceec0364e897868a30ee13b992 Mon Sep 17 00:00:00 2001
From: Eliezer Tamir <eliezer.tamir@linux.intel.com>
Date: Wed, 10 Jul 2013 17:13:26 +0300
Subject: net: rename ll methods to busy-poll

Rename ndo_ll_poll to ndo_busy_poll.
Rename sk_mark_ll to sk_mark_napi_id.
Rename skb_mark_ll to skb_mark_napi_id.
Correct all useres of these functions.
Update comments and defines  in include/net/busy_poll.h

Signed-off-by: Eliezer Tamir <eliezer.tamir@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c  |  2 +-
 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c |  2 +-
 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c    |  4 ++--
 drivers/net/ethernet/mellanox/mlx4/en_netdev.c   |  2 +-
 drivers/net/ethernet/mellanox/mlx4/en_rx.c       |  2 +-
 include/linux/netdevice.h                        |  2 +-
 include/net/busy_poll.h                          | 22 ++++++++++++----------
 net/ipv4/tcp_ipv4.c                              |  2 +-
 net/ipv4/udp.c                                   |  2 +-
 net/ipv6/tcp_ipv6.c                              |  2 +-
 net/ipv6/udp.c                                   |  2 +-
 11 files changed, 23 insertions(+), 21 deletions(-)

(limited to 'include')

diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 05b6b4e8b07..3353efe7919 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -990,7 +990,7 @@ reuse_rx:
 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 					       le16_to_cpu(cqe_fp->vlan_tag));
 
-		skb_mark_ll(skb, &fp->napi);
+		skb_mark_napi_id(skb, &fp->napi);
 
 		if (bnx2x_fp_ll_polling(fp))
 			netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 15a528bda87..e5da07858a2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12027,7 +12027,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
 #endif
 
 #ifdef CONFIG_NET_LL_RX_POLL
-	.ndo_ll_poll		= bnx2x_low_latency_recv,
+	.ndo_busy_poll		= bnx2x_low_latency_recv,
 #endif
 };
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 047ebaaf014..bad8f14b194 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1978,7 +1978,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 		}
 
 #endif /* IXGBE_FCOE */
-		skb_mark_ll(skb, &q_vector->napi);
+		skb_mark_napi_id(skb, &q_vector->napi);
 		ixgbe_rx_skb(q_vector, skb);
 
 		/* update budget accounting */
@@ -7228,7 +7228,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
 	.ndo_poll_controller	= ixgbe_netpoll,
 #endif
 #ifdef CONFIG_NET_LL_RX_POLL
-	.ndo_ll_poll		= ixgbe_low_latency_recv,
+	.ndo_busy_poll		= ixgbe_low_latency_recv,
 #endif
 #ifdef IXGBE_FCOE
 	.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 0fb2438dc2c..5eac871399d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2141,7 +2141,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
 	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
 #endif
 #ifdef CONFIG_NET_LL_RX_POLL
-	.ndo_ll_poll		= mlx4_en_low_latency_recv,
+	.ndo_busy_poll		= mlx4_en_low_latency_recv,
 #endif
 };
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 90746d37ac9..dec455c8f62 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -767,7 +767,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 					       timestamp);
 		}
 
-		skb_mark_ll(skb, &cq->napi);
+		skb_mark_napi_id(skb, &cq->napi);
 
 		/* Push it up the stack */
 		netif_receive_skb(skb);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index bb82871b849..0741a1e919a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -974,7 +974,7 @@ struct net_device_ops {
 	void			(*ndo_netpoll_cleanup)(struct net_device *dev);
 #endif
 #ifdef CONFIG_NET_LL_RX_POLL
-	int			(*ndo_ll_poll)(struct napi_struct *dev);
+	int			(*ndo_busy_poll)(struct napi_struct *dev);
 #endif
 	int			(*ndo_set_vf_mac)(struct net_device *dev,
 						  int queue, u8 *mac);
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 76f03408774..4ff71908fd4 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -1,5 +1,5 @@
 /*
- * Low Latency Sockets
+ * net busy poll support
  * Copyright(c) 2013 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
@@ -21,8 +21,8 @@
  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  */
 
-#ifndef _LINUX_NET_LL_POLL_H
-#define _LINUX_NET_LL_POLL_H
+#ifndef _LINUX_NET_BUSY_POLL_H
+#define _LINUX_NET_BUSY_POLL_H
 
 #include <linux/netdevice.h>
 #include <net/ip.h>
@@ -110,11 +110,11 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
 		goto out;
 
 	ops = napi->dev->netdev_ops;
-	if (!ops->ndo_ll_poll)
+	if (!ops->ndo_busy_poll)
 		goto out;
 
 	do {
-		rc = ops->ndo_ll_poll(napi);
+		rc = ops->ndo_busy_poll(napi);
 
 		if (rc == LL_FLUSH_FAILED)
 			break; /* permanent failure */
@@ -134,13 +134,14 @@ out:
 }
 
 /* used in the NIC receive handler to mark the skb */
-static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
+static inline void skb_mark_napi_id(struct sk_buff *skb,
+				    struct napi_struct *napi)
 {
 	skb->napi_id = napi->napi_id;
 }
 
 /* used in the protocol hanlder to propagate the napi_id to the socket */
-static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
+static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
 {
 	sk->sk_napi_id = skb->napi_id;
 }
@@ -166,11 +167,12 @@ static inline bool sk_busy_poll(struct sock *sk, int nonblock)
 	return false;
 }
 
-static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
+static inline void skb_mark_napi_id(struct sk_buff *skb,
+				    struct napi_struct *napi)
 {
 }
 
-static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
+static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
 {
 }
 
@@ -180,4 +182,4 @@ static inline bool busy_loop_timeout(unsigned long end_time)
 }
 
 #endif /* CONFIG_NET_LL_RX_POLL */
-#endif /* _LINUX_NET_LL_POLL_H */
+#endif /* _LINUX_NET_BUSY_POLL_H */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3a261b41a00..b299da5ff49 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1994,7 +1994,7 @@ process:
 	if (sk_filter(sk, skb))
 		goto discard_and_relse;
 
-	sk_mark_ll(sk, skb);
+	sk_mark_napi_id(sk, skb);
 	skb->dev = NULL;
 
 	bh_lock_sock_nested(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index bcc0ff2c16d..a0d7151ffbd 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1713,7 +1713,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 	if (sk != NULL) {
 		int ret;
 
-		sk_mark_ll(sk, skb);
+		sk_mark_napi_id(sk, skb);
 		ret = udp_queue_rcv_skb(sk, skb);
 		sock_put(sk);
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 345bd92d4dd..6e1649d5853 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1499,7 +1499,7 @@ process:
 	if (sk_filter(sk, skb))
 		goto discard_and_relse;
 
-	sk_mark_ll(sk, skb);
+	sk_mark_napi_id(sk, skb);
 	skb->dev = NULL;
 
 	bh_lock_sock_nested(sk);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 40e72034da0..f4058150262 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -844,7 +844,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 	if (sk != NULL) {
 		int ret;
 
-		sk_mark_ll(sk, skb);
+		sk_mark_napi_id(sk, skb);
 		ret = udpv6_queue_rcv_skb(sk, skb);
 		sock_put(sk);
 
-- 
cgit v1.2.3-70-g09d2


From 64b0dc517ea1b35d02565a779e6cb77ae9045685 Mon Sep 17 00:00:00 2001
From: Eliezer Tamir <eliezer.tamir@linux.intel.com>
Date: Wed, 10 Jul 2013 17:13:36 +0300
Subject: net: rename busy poll socket op and globals

Rename LL_SO to BUSY_POLL_SO
Rename sysctl_net_ll_{read,poll} to sysctl_busy_{read,poll}
Fix up users of these variables.
Fix documentation for sysctl.

a patch for the socket.7  man page will follow separately,
because of limitations of my mail setup.

Signed-off-by: Eliezer Tamir <eliezer.tamir@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 Documentation/sysctl/net.txt           | 17 +++++++++--------
 arch/alpha/include/uapi/asm/socket.h   |  2 +-
 arch/avr32/include/uapi/asm/socket.h   |  2 +-
 arch/cris/include/uapi/asm/socket.h    |  2 +-
 arch/frv/include/uapi/asm/socket.h     |  2 +-
 arch/h8300/include/uapi/asm/socket.h   |  2 +-
 arch/ia64/include/uapi/asm/socket.h    |  2 +-
 arch/m32r/include/uapi/asm/socket.h    |  2 +-
 arch/mips/include/uapi/asm/socket.h    |  2 +-
 arch/mn10300/include/uapi/asm/socket.h |  2 +-
 arch/parisc/include/uapi/asm/socket.h  |  2 +-
 arch/powerpc/include/uapi/asm/socket.h |  2 +-
 arch/s390/include/uapi/asm/socket.h    |  2 +-
 arch/sparc/include/uapi/asm/socket.h   |  2 +-
 arch/xtensa/include/uapi/asm/socket.h  |  2 +-
 include/net/busy_poll.h                |  8 ++++----
 include/uapi/asm-generic/socket.h      |  2 +-
 net/core/sock.c                        |  6 +++---
 net/core/sysctl_net_core.c             |  8 ++++----
 net/socket.c                           |  4 ++--
 20 files changed, 37 insertions(+), 36 deletions(-)

(limited to 'include')

diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index d69e14c9002..1c15043aaee 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -50,26 +50,27 @@ The maximum number of packets that kernel can handle on a NAPI interrupt,
 it's a Per-CPU variable.
 Default: 64
 
-low_latency_read
+busy_read
 ----------------
 Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL)
 Approximate time in us to busy loop waiting for packets on the device queue.
-This sets the default value of the SO_LL socket option.
-Can be set or overridden per socket by setting socket option SO_LL, which is
-the preferred method of enabling.
-If you need to enable the feature globally via sysctl, a value of 50 is recommended.
+This sets the default value of the SO_BUSY_POLL socket option.
+Can be set or overridden per socket by setting socket option SO_BUSY_POLL,
+which is the preferred method of enabling. If you need to enable the feature
+globally via sysctl, a value of 50 is recommended.
 Will increase power usage.
 Default: 0 (off)
 
-low_latency_poll
+busy_poll
 ----------------
 Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL)
 Approximate time in us to busy loop waiting for events.
 Recommended value depends on the number of sockets you poll on.
 For several sockets 50, for several hundreds 100.
 For more than that you probably want to use epoll.
-Note that only sockets with SO_LL set will be busy polled, so you want to either
-selectively set SO_LL on those sockets or set sysctl.net.low_latency_read globally.
+Note that only sockets with SO_BUSY_POLL set will be busy polled,
+so you want to either selectively set SO_BUSY_POLL on those sockets or set
+sysctl.net.busy_read globally.
 Will increase power usage.
 Default: 0 (off)
 
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 4885825e498..467de010ea7 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -81,6 +81,6 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_LL			46
+#define SO_BUSY_POLL			46
 
 #endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index 79b61798ebf..11c4259c62f 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -74,6 +74,6 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_LL			46
+#define SO_BUSY_POLL		46
 
 #endif /* __ASM_AVR32_SOCKET_H */
diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h
index 47b1ec55092..eb723e51554 100644
--- a/arch/cris/include/uapi/asm/socket.h
+++ b/arch/cris/include/uapi/asm/socket.h
@@ -76,7 +76,7 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_LL			46
+#define SO_BUSY_POLL		46
 
 #endif /* _ASM_SOCKET_H */
 
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index dbc08520f22..f0cb1c34116 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -74,7 +74,7 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_LL			46
+#define SO_BUSY_POLL		46
 
 #endif /* _ASM_SOCKET_H */
 
diff --git a/arch/h8300/include/uapi/asm/socket.h b/arch/h8300/include/uapi/asm/socket.h
index a38d38a6520..9490758c5e2 100644
--- a/arch/h8300/include/uapi/asm/socket.h
+++ b/arch/h8300/include/uapi/asm/socket.h
@@ -74,6 +74,6 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_LL			46
+#define SO_BUSY_POLL		46
 
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index d3358b76068..556d0701a15 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -83,6 +83,6 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_LL			46
+#define SO_BUSY_POLL		46
 
 #endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index 44aaf4639a4..24be7c8da86 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -74,6 +74,6 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_LL			46
+#define SO_BUSY_POLL		46
 
 #endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 6a07992ba6c..61c01f054d1 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -92,6 +92,6 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_LL			46
+#define SO_BUSY_POLL		46
 
 #endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index db80fd3e398..e2a2b203eb0 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -74,6 +74,6 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_LL			46
+#define SO_BUSY_POLL		46
 
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index f866fff9a00..71700e636a8 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -73,7 +73,7 @@
 
 #define SO_SELECT_ERR_QUEUE	0x4026
 
-#define SO_LL			0x4027
+#define SO_BUSY_POLL		0x4027
 
 /* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
  * have to define SOCK_NONBLOCK to a different value here.
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index 405fb09bda9..a6d74467c9e 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -81,6 +81,6 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_LL			46
+#define SO_BUSY_POLL		46
 
 #endif	/* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 0c5105fbaaf..92494494692 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -80,6 +80,6 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_LL			46
+#define SO_BUSY_POLL		46
 
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index b46c3fa0b26..4e1d66c3ce7 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -70,7 +70,7 @@
 
 #define SO_SELECT_ERR_QUEUE	0x0029
 
-#define SO_LL			0x0030
+#define SO_BUSY_POLL		0x0030
 
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION		0x5001
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index b21ace4fc9b..c114483010c 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -85,6 +85,6 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_LL			46
+#define SO_BUSY_POLL		46
 
 #endif	/* _XTENSA_SOCKET_H */
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 4ff71908fd4..a14339c2985 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -30,8 +30,8 @@
 #ifdef CONFIG_NET_LL_RX_POLL
 
 struct napi_struct;
-extern unsigned int sysctl_net_ll_read __read_mostly;
-extern unsigned int sysctl_net_ll_poll __read_mostly;
+extern unsigned int sysctl_net_busy_read __read_mostly;
+extern unsigned int sysctl_net_busy_poll __read_mostly;
 
 /* return values from ndo_ll_poll */
 #define LL_FLUSH_FAILED		-1
@@ -39,7 +39,7 @@ extern unsigned int sysctl_net_ll_poll __read_mostly;
 
 static inline bool net_busy_loop_on(void)
 {
-	return sysctl_net_ll_poll;
+	return sysctl_net_busy_poll;
 }
 
 /* a wrapper to make debug_smp_processor_id() happy
@@ -72,7 +72,7 @@ static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
 /* in poll/select we use the global sysctl_net_ll_poll value */
 static inline unsigned long busy_loop_end_time(void)
 {
-	return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_ll_poll);
+	return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll);
 }
 
 static inline bool sk_can_busy_loop(struct sock *sk)
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index ca3a20d772a..f04b69b6abf 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -76,6 +76,6 @@
 
 #define SO_SELECT_ERR_QUEUE	45
 
-#define SO_LL			46
+#define SO_BUSY_POLL		46
 
 #endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/net/core/sock.c b/net/core/sock.c
index 9bfe83f4d67..548d716c5f6 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -901,7 +901,7 @@ set_rcvbuf:
 		break;
 
 #ifdef CONFIG_NET_LL_RX_POLL
-	case SO_LL:
+	case SO_BUSY_POLL:
 		/* allow unprivileged users to decrease the value */
 		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
 			ret = -EPERM;
@@ -1171,7 +1171,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
 		break;
 
 #ifdef CONFIG_NET_LL_RX_POLL
-	case SO_LL:
+	case SO_BUSY_POLL:
 		v.val = sk->sk_ll_usec;
 		break;
 #endif
@@ -2294,7 +2294,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 
 #ifdef CONFIG_NET_LL_RX_POLL
 	sk->sk_napi_id		=	0;
-	sk->sk_ll_usec		=	sysctl_net_ll_read;
+	sk->sk_ll_usec		=	sysctl_net_busy_read;
 #endif
 
 	/*
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 1a298cb3dae..66096861663 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -300,15 +300,15 @@ static struct ctl_table net_core_table[] = {
 #endif /* CONFIG_NET_FLOW_LIMIT */
 #ifdef CONFIG_NET_LL_RX_POLL
 	{
-		.procname	= "low_latency_poll",
-		.data		= &sysctl_net_ll_poll,
+		.procname	= "busy_poll",
+		.data		= &sysctl_net_busy_poll,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec
 	},
 	{
-		.procname	= "low_latency_read",
-		.data		= &sysctl_net_ll_read,
+		.procname	= "busy_read",
+		.data		= &sysctl_net_busy_read,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec
diff --git a/net/socket.c b/net/socket.c
index 6a3e9a3f50a..829b460acb8 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -107,8 +107,8 @@
 #include <net/busy_poll.h>
 
 #ifdef CONFIG_NET_LL_RX_POLL
-unsigned int sysctl_net_ll_read __read_mostly;
-unsigned int sysctl_net_ll_poll __read_mostly;
+unsigned int sysctl_net_busy_read __read_mostly;
+unsigned int sysctl_net_busy_poll __read_mostly;
 #endif
 
 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
-- 
cgit v1.2.3-70-g09d2