From 3cc7587b30032b7c4dd9610a55a77519e84da7db Mon Sep 17 00:00:00 2001 From: Rami Rosen Date: Fri, 17 May 2013 09:10:34 +0000 Subject: Documentation/sysctl/net.txt: fix (attribute removal). This patch removes mentioning the sysfsf net_device weight attribute (class/net//weight) in Documentation/sysctl/net.txt, since the net sysfs weight attribute was removed by the following patch: [NET]: Make NAPI polling independent of struct net_device objects bea3348eef27e6044b6161fd04c3152215f96411 Signed-off-by: Rami Rosen Signed-off-by: David S. Miller --- Documentation/sysctl/net.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'Documentation/sysctl') diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 98335b7a533..c1f8640c2fc 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -93,8 +93,7 @@ netdev_budget Maximum number of packets taken from all interfaces in one polling cycle (NAPI poll). In one polling cycle interfaces which are registered to polling are -probed in a round-robin manner. The limit of packets in one such probe can be -set per-device via sysfs class/net//weight . +probed in a round-robin manner. netdev_max_backlog ------------------ -- cgit v1.2.3-70-g09d2 From 060212928670593fb89243640bf05cf89560b023 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Mon, 10 Jun 2013 11:39:50 +0300 Subject: net: add low latency socket poll Adds an ndo_ll_poll method and the code that supports it. This method can be used by low latency applications to busy-poll Ethernet device queues directly from the socket code. sysctl_net_ll_poll controls how many microseconds to poll. Default is zero (disabled). Individual protocol support will be added by subsequent patches. Signed-off-by: Alexander Duyck Signed-off-by: Jesse Brandeburg Signed-off-by: Eliezer Tamir Acked-by: Eric Dumazet Tested-by: Willem de Bruijn Signed-off-by: David S. Miller --- Documentation/sysctl/net.txt | 7 ++ include/linux/netdevice.h | 3 + include/linux/skbuff.h | 8 ++- include/net/ll_poll.h | 148 +++++++++++++++++++++++++++++++++++++++++++ include/net/sock.h | 4 ++ include/uapi/linux/snmp.h | 1 + net/Kconfig | 12 ++++ net/core/skbuff.c | 4 ++ net/core/sock.c | 6 ++ net/core/sysctl_net_core.c | 10 +++ net/ipv4/proc.c | 1 + net/socket.c | 6 ++ 12 files changed, 208 insertions(+), 2 deletions(-) create mode 100644 include/net/ll_poll.h (limited to 'Documentation/sysctl') diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index c1f8640c2fc..85ab72dcdc3 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -50,6 +50,13 @@ The maximum number of packets that kernel can handle on a NAPI interrupt, it's a Per-CPU variable. Default: 64 +low_latency_poll +---------------- +Low latency busy poll timeout. (needs CONFIG_NET_LL_RX_POLL) +Approximate time in us to spin waiting for packets on the device queue. +Recommended value is 50. May increase power usage. +Default: 0 (off) + rmem_default ------------ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 39bbd462d68..2ecb96d9a1e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -971,6 +971,9 @@ struct net_device_ops { struct netpoll_info *info, gfp_t gfp); void (*ndo_netpoll_cleanup)(struct net_device *dev); +#endif +#ifdef CONFIG_NET_LL_RX_POLL + int (*ndo_ll_poll)(struct napi_struct *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, int queue, u8 *mac); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 9995834d2cb..400d82ae2b0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -386,6 +386,7 @@ typedef unsigned char *sk_buff_data_t; * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions + * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark * @dropcount: total number of sk_receive_queue overflows @@ -500,8 +501,11 @@ struct sk_buff { /* 7/9 bit hole (depending on ndisc_nodetype presence) */ kmemcheck_bitfield_end(flags2); -#ifdef CONFIG_NET_DMA - dma_cookie_t dma_cookie; +#if defined CONFIG_NET_DMA || defined CONFIG_NET_LL_RX_POLL + union { + unsigned int napi_id; + dma_cookie_t dma_cookie; + }; #endif #ifdef CONFIG_NETWORK_SECMARK __u32 secmark; diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h new file mode 100644 index 00000000000..bc262f88173 --- /dev/null +++ b/include/net/ll_poll.h @@ -0,0 +1,148 @@ +/* + * Low Latency Sockets + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Author: Eliezer Tamir + * + * Contact Information: + * e1000-devel Mailing List + */ + +/* + * For now this depends on CONFIG_X86_TSC + */ + +#ifndef _LINUX_NET_LL_POLL_H +#define _LINUX_NET_LL_POLL_H + +#include +#include + +#ifdef CONFIG_NET_LL_RX_POLL + +struct napi_struct; +extern unsigned long sysctl_net_ll_poll __read_mostly; + +/* return values from ndo_ll_poll */ +#define LL_FLUSH_FAILED -1 +#define LL_FLUSH_BUSY -2 + +/* we don't mind a ~2.5% imprecision */ +#define TSC_MHZ (tsc_khz >> 10) + +static inline cycles_t ll_end_time(void) +{ + return TSC_MHZ * ACCESS_ONCE(sysctl_net_ll_poll) + get_cycles(); +} + +static inline bool sk_valid_ll(struct sock *sk) +{ + return sysctl_net_ll_poll && sk->sk_napi_id && + !need_resched() && !signal_pending(current); +} + +static inline bool can_poll_ll(cycles_t end_time) +{ + return !time_after((unsigned long)get_cycles(), + (unsigned long)end_time); +} + +static inline bool sk_poll_ll(struct sock *sk, int nonblock) +{ + cycles_t end_time = ll_end_time(); + const struct net_device_ops *ops; + struct napi_struct *napi; + int rc = false; + + /* + * rcu read lock for napi hash + * bh so we don't race with net_rx_action + */ + rcu_read_lock_bh(); + + napi = napi_by_id(sk->sk_napi_id); + if (!napi) + goto out; + + ops = napi->dev->netdev_ops; + if (!ops->ndo_ll_poll) + goto out; + + do { + + rc = ops->ndo_ll_poll(napi); + + if (rc == LL_FLUSH_FAILED) + break; /* permanent failure */ + + if (rc > 0) + /* local bh are disabled so it is ok to use _BH */ + NET_ADD_STATS_BH(sock_net(sk), + LINUX_MIB_LOWLATENCYRXPACKETS, rc); + + } while (skb_queue_empty(&sk->sk_receive_queue) + && can_poll_ll(end_time) && !nonblock); + + rc = !skb_queue_empty(&sk->sk_receive_queue); +out: + rcu_read_unlock_bh(); + return rc; +} + +/* used in the NIC receive handler to mark the skb */ +static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi) +{ + skb->napi_id = napi->napi_id; +} + +/* used in the protocol hanlder to propagate the napi_id to the socket */ +static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) +{ + sk->sk_napi_id = skb->napi_id; +} + +#else /* CONFIG_NET_LL_RX_POLL */ + +static inline cycles_t ll_end_time(void) +{ + return 0; +} + +static inline bool sk_valid_ll(struct sock *sk) +{ + return false; +} + +static inline bool sk_poll_ll(struct sock *sk, int nonblock) +{ + return false; +} + +static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi) +{ +} + +static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) +{ +} + +static inline bool can_poll_ll(cycles_t end_time) +{ + return false; +} + +#endif /* CONFIG_NET_LL_RX_POLL */ +#endif /* _LINUX_NET_LL_POLL_H */ diff --git a/include/net/sock.h b/include/net/sock.h index 66772cf8c3c..ac8e1818380 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -229,6 +229,7 @@ struct cg_proto; * @sk_omem_alloc: "o" is "option" or "other" * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward + * @sk_napi_id: id of the last napi context to receive data for sk * @sk_allocation: allocation mode * @sk_sndbuf: size of send buffer in bytes * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, @@ -324,6 +325,9 @@ struct sock { int sk_forward_alloc; #ifdef CONFIG_RPS __u32 sk_rxhash; +#endif +#ifdef CONFIG_NET_LL_RX_POLL + unsigned int sk_napi_id; #endif atomic_t sk_drops; int sk_rcvbuf; diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index df2e8b4f9c0..26cbf76f805 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -253,6 +253,7 @@ enum LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ + LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */ __LINUX_MIB_MAX }; diff --git a/net/Kconfig b/net/Kconfig index 523e43e6da1..d6a9ce6e180 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -243,6 +243,18 @@ config NETPRIO_CGROUP Cgroup subsystem for use in assigning processes to network priorities on a per-interface basis +config NET_LL_RX_POLL + bool "Low Latency Receive Poll" + depends on X86_TSC + default n + ---help--- + Support Low Latency Receive Queue Poll. + (For network card drivers which support this option.) + When waiting for data in read or poll call directly into the the device driver + to flush packets which may be pending on the device queues into the stack. + + If unsure, say N. + config BQL boolean depends on SYSFS diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 73f57a0e152..4a4181e16c1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -733,6 +733,10 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->vlan_tci = old->vlan_tci; skb_copy_secmark(new, old); + +#ifdef CONFIG_NET_LL_RX_POLL + new->napi_id = old->napi_id; +#endif } /* diff --git a/net/core/sock.c b/net/core/sock.c index 88868a9d21d..788c0da5eed 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -139,6 +139,8 @@ #include #endif +#include + static DEFINE_MUTEX(proto_list_mutex); static LIST_HEAD(proto_list); @@ -2284,6 +2286,10 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_stamp = ktime_set(-1L, 0); +#ifdef CONFIG_NET_LL_RX_POLL + sk->sk_napi_id = 0; +#endif + /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 741db5fc780..4b48f39582b 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -19,6 +19,7 @@ #include #include #include +#include static int one = 1; @@ -284,6 +285,15 @@ static struct ctl_table net_core_table[] = { .proc_handler = flow_limit_table_len_sysctl }, #endif /* CONFIG_NET_FLOW_LIMIT */ +#ifdef CONFIG_NET_LL_RX_POLL + { + .procname = "low_latency_poll", + .data = &sysctl_net_ll_poll, + .maxlen = sizeof(unsigned long), + .mode = 0644, + .proc_handler = proc_doulongvec_minmax + }, +#endif #endif /* CONFIG_NET */ { .procname = "netdev_budget", diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 2a5bf86d241..6577a1149a4 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -273,6 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), + SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS), SNMP_MIB_SENTINEL }; diff --git a/net/socket.c b/net/socket.c index 3ebdcb805c5..21fd29f63ed 100644 --- a/net/socket.c +++ b/net/socket.c @@ -104,6 +104,12 @@ #include #include #include +#include + +#ifdef CONFIG_NET_LL_RX_POLL +unsigned long sysctl_net_ll_poll __read_mostly; +EXPORT_SYMBOL_GPL(sysctl_net_ll_poll); +#endif static int sock_no_open(struct inode *irrelevant, struct file *dontcare); static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, -- cgit v1.2.3-70-g09d2 From cc79dd1ba9c1021c2ac6ae200a65ec38ee8db351 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Mon, 17 Jun 2013 10:54:37 -0400 Subject: tipc: change socket buffer overflow control to respect sk_rcvbuf As per feedback from the netdev community, we change the buffer overflow protection algorithm in receiving sockets so that it always respects the nominal upper limit set in sk_rcvbuf. Instead of scaling up from a small sk_rcvbuf value, which leads to violation of the configured sk_rcvbuf limit, we now calculate the weighted per-message limit by scaling down from a much bigger value, still in the same field, according to the importance priority of the received message. To allow for administrative tunability of the socket receive buffer size, we create a tipc_rmem sysctl variable to allow the user to configure an even bigger value via sysctl command. It is a size of three (min/default/max) to be consistent with things like tcp_rmem. By default, the value initialized in tipc_rmem[1] is equal to the receive socket size needed by a TIPC_CRITICAL_IMPORTANCE message. This value is also set as the default value of sk_rcvbuf. Originally-by: Jon Maloy Cc: Neil Horman Cc: Jon Maloy [Ying: added sysctl variation to Jon's original patch] Signed-off-by: Ying Xue [PG: don't compile sysctl.c if not config'd; add Documentation] Signed-off-by: Paul Gortmaker Signed-off-by: David S. Miller --- Documentation/sysctl/net.txt | 17 +++++++++++- net/tipc/Makefile | 1 + net/tipc/core.c | 12 +++++++-- net/tipc/core.h | 9 +++++++ net/tipc/port.h | 2 ++ net/tipc/socket.c | 19 ++++++------- net/tipc/sysctl.c | 64 ++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 112 insertions(+), 12 deletions(-) create mode 100644 net/tipc/sysctl.c (limited to 'Documentation/sysctl') diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 85ab72dcdc3..5369879eafe 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -26,7 +26,7 @@ Table : Subdirectories in /proc/sys/net ipv4 IP version 4 x25 X.25 protocol ipx IPX token-ring IBM token ring bridge Bridging decnet DEC net - ipv6 IP version 6 + ipv6 IP version 6 tipc TIPC .............................................................................. 1. /proc/sys/net/core - Network core options @@ -207,3 +207,18 @@ IPX. The /proc/net/ipx_route table holds a list of IPX routes. For each route it gives the destination network, the router node (or Directly) and the network address of the router (or Connected) for internal networks. + +6. TIPC +------------------------------------------------------- + +The TIPC protocol now has a tunable for the receive memory, similar to the +tcp_rmem - i.e. a vector of 3 INTEGERs: (min, default, max) + + # cat /proc/sys/net/tipc/tipc_rmem + 4252725 34021800 68043600 + # + +The max value is set to CONN_OVERLOAD_LIMIT, and the default and min values +are scaled (shifted) versions of that same value. Note that the min value +is not at this point in time used in any meaningful way, but the triplet is +preserved in order to be consistent with things like tcp_rmem. diff --git a/net/tipc/Makefile b/net/tipc/Makefile index 4df8e02d900..02636d0a90c 100644 --- a/net/tipc/Makefile +++ b/net/tipc/Makefile @@ -11,3 +11,4 @@ tipc-y += addr.o bcast.o bearer.o config.o \ socket.o log.o eth_media.o tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o +tipc-$(CONFIG_SYSCTL) += sysctl.o diff --git a/net/tipc/core.c b/net/tipc/core.c index 7ec2c1eb94f..b0e42a08729 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -39,6 +39,7 @@ #include "name_table.h" #include "subscr.h" #include "config.h" +#include "port.h" #include @@ -50,7 +51,7 @@ u32 tipc_own_addr __read_mostly; int tipc_max_ports __read_mostly; int tipc_net_id __read_mostly; int tipc_remote_management __read_mostly; - +int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */ /** * tipc_buf_acquire - creates a TIPC message buffer @@ -118,6 +119,7 @@ static void tipc_core_stop(void) tipc_nametbl_stop(); tipc_ref_table_stop(); tipc_socket_stop(); + tipc_unregister_sysctl(); } /** @@ -142,13 +144,14 @@ static int tipc_core_start(void) res = tipc_netlink_start(); if (!res) res = tipc_socket_init(); + if (!res) + res = tipc_register_sysctl(); if (res) tipc_core_stop(); return res; } - static int __init tipc_init(void) { int res; @@ -160,6 +163,11 @@ static int __init tipc_init(void) tipc_max_ports = CONFIG_TIPC_PORTS; tipc_net_id = 4711; + sysctl_tipc_rmem[0] = CONN_OVERLOAD_LIMIT >> 4 << TIPC_LOW_IMPORTANCE; + sysctl_tipc_rmem[1] = CONN_OVERLOAD_LIMIT >> 4 << + TIPC_CRITICAL_IMPORTANCE; + sysctl_tipc_rmem[2] = CONN_OVERLOAD_LIMIT; + res = tipc_core_start(); if (res) pr_err("Unable to start in single node mode\n"); diff --git a/net/tipc/core.h b/net/tipc/core.h index 0207db04179..fe7f2b7c19f 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -80,6 +80,7 @@ extern u32 tipc_own_addr __read_mostly; extern int tipc_max_ports __read_mostly; extern int tipc_net_id __read_mostly; extern int tipc_remote_management __read_mostly; +extern int sysctl_tipc_rmem[3] __read_mostly; /* * Other global variables @@ -97,6 +98,14 @@ extern void tipc_netlink_stop(void); extern int tipc_socket_init(void); extern void tipc_socket_stop(void); +#ifdef CONFIG_SYSCTL +extern int tipc_register_sysctl(void); +extern void tipc_unregister_sysctl(void); +#else +#define tipc_register_sysctl() 0 +#define tipc_unregister_sysctl() +#endif + /* * TIPC timer and signal code */ diff --git a/net/tipc/port.h b/net/tipc/port.h index fb66e2e5f4d..2485649c408 100644 --- a/net/tipc/port.h +++ b/net/tipc/port.h @@ -43,6 +43,8 @@ #include "node_subscr.h" #define TIPC_FLOW_CONTROL_WIN 512 +#define CONN_OVERLOAD_LIMIT ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \ + SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) typedef void (*tipc_msg_err_event) (void *usr_handle, u32 portref, struct sk_buff **buf, unsigned char const *data, diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 515ce38e4f4..aba4255f297 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -43,8 +43,6 @@ #define SS_LISTENING -1 /* socket is listening */ #define SS_READY -2 /* socket is connectionless */ -#define CONN_OVERLOAD_LIMIT ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \ - SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ struct tipc_sock { @@ -203,6 +201,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, sock_init_data(sock, sk); sk->sk_backlog_rcv = backlog_rcv; + sk->sk_rcvbuf = sysctl_tipc_rmem[1]; sk->sk_data_ready = tipc_data_ready; sk->sk_write_space = tipc_write_space; tipc_sk(sk)->p = tp_ptr; @@ -1233,10 +1232,10 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf) * For all connectionless messages, by default new queue limits are * as belows: * - * TIPC_LOW_IMPORTANCE (5MB) - * TIPC_MEDIUM_IMPORTANCE (10MB) - * TIPC_HIGH_IMPORTANCE (20MB) - * TIPC_CRITICAL_IMPORTANCE (40MB) + * TIPC_LOW_IMPORTANCE (4 MB) + * TIPC_MEDIUM_IMPORTANCE (8 MB) + * TIPC_HIGH_IMPORTANCE (16 MB) + * TIPC_CRITICAL_IMPORTANCE (32 MB) * * Returns overload limit according to corresponding message importance */ @@ -1246,9 +1245,10 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf) unsigned int limit; if (msg_connected(msg)) - limit = CONN_OVERLOAD_LIMIT; + limit = sysctl_tipc_rmem[2]; else - limit = sk->sk_rcvbuf << (msg_importance(msg) + 5); + limit = sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE << + msg_importance(msg); return limit; } @@ -1847,7 +1847,8 @@ static const struct net_proto_family tipc_family_ops = { static struct proto tipc_proto = { .name = "TIPC", .owner = THIS_MODULE, - .obj_size = sizeof(struct tipc_sock) + .obj_size = sizeof(struct tipc_sock), + .sysctl_rmem = sysctl_tipc_rmem }; /** diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c new file mode 100644 index 00000000000..f3fef93325a --- /dev/null +++ b/net/tipc/sysctl.c @@ -0,0 +1,64 @@ +/* + * net/tipc/sysctl.c: sysctl interface to TIPC subsystem + * + * Copyright (c) 2013, Wind River Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "core.h" + +#include + +static struct ctl_table_header *tipc_ctl_hdr; + +static struct ctl_table tipc_table[] = { + { + .procname = "tipc_rmem", + .data = &sysctl_tipc_rmem, + .maxlen = sizeof(sysctl_tipc_rmem), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + {} +}; + +int tipc_register_sysctl(void) +{ + tipc_ctl_hdr = register_net_sysctl(&init_net, "net/tipc", tipc_table); + if (tipc_ctl_hdr == NULL) + return -ENOMEM; + return 0; +} + +void tipc_unregister_sysctl(void) +{ + unregister_net_sysctl_table(tipc_ctl_hdr); +} -- cgit v1.2.3-70-g09d2 From 2d48d67fa8cd129ea85ea02d91b4a793286866f8 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Mon, 24 Jun 2013 10:28:03 +0300 Subject: net: poll/select low latency socket support select/poll busy-poll support. Split sysctl value into two separate ones, one for read and one for poll. updated Documentation/sysctl/net.txt Add a new poll flag POLL_LL. When this flag is set, sock_poll will call sk_poll_ll if possible. sock_poll sets this flag in its return value to indicate to select/poll when a socket that can busy poll is found. When poll/select have nothing to report, call the low-level sock_poll again until we are out of time or we find something. Once the system call finds something, it stops setting POLL_LL, so it can return the result to the user ASAP. Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- Documentation/sysctl/net.txt | 18 ++++++++++++++++-- fs/select.c | 34 +++++++++++++++++++++++++++++----- include/net/ll_poll.h | 35 ++++++++++++++++++++++------------- include/uapi/asm-generic/poll.h | 2 ++ net/core/sock.c | 2 +- net/core/sysctl_net_core.c | 8 ++++++++ net/socket.c | 14 +++++++++++++- 7 files changed, 91 insertions(+), 22 deletions(-) (limited to 'Documentation/sysctl') diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 5369879eafe..e658bbfb641 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -50,13 +50,27 @@ The maximum number of packets that kernel can handle on a NAPI interrupt, it's a Per-CPU variable. Default: 64 -low_latency_poll +low_latency_read ---------------- -Low latency busy poll timeout. (needs CONFIG_NET_LL_RX_POLL) +Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL) Approximate time in us to spin waiting for packets on the device queue. +This sets the default value of the SO_LL socket option. +Can be set or overridden per socket by setting socket option SO_LL. Recommended value is 50. May increase power usage. Default: 0 (off) +low_latency_poll +---------------- +Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL) +Approximate time in us to spin waiting for packets on the device queue. +Recommended value depends on the number of sockets you poll on. +For several sockets 50, for several hundreds 100. +For more than that you probably want to use epoll. +Note that only sockets with SO_LL set will be busy polled, so you want to either +selectively set SO_LL on those sockets or set sysctl.net.low_latency_read globally. +May increase power usage. +Default: 0 (off) + rmem_default ------------ diff --git a/fs/select.c b/fs/select.c index 8c1c96c2706..79b876eb91d 100644 --- a/fs/select.c +++ b/fs/select.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -384,9 +385,10 @@ get_max: #define POLLEX_SET (POLLPRI) static inline void wait_key_set(poll_table *wait, unsigned long in, - unsigned long out, unsigned long bit) + unsigned long out, unsigned long bit, + unsigned int ll_flag) { - wait->_key = POLLEX_SET; + wait->_key = POLLEX_SET | ll_flag; if (in & bit) wait->_key |= POLLIN_SET; if (out & bit) @@ -400,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) poll_table *wait; int retval, i, timed_out = 0; unsigned long slack = 0; + unsigned int ll_flag = POLL_LL; + u64 ll_time = ll_end_time(); rcu_read_lock(); retval = max_select_fd(n, fds); @@ -422,6 +426,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) retval = 0; for (;;) { unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; + bool can_ll = false; inp = fds->in; outp = fds->out; exp = fds->ex; rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; @@ -449,7 +454,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) f_op = f.file->f_op; mask = DEFAULT_POLLMASK; if (f_op && f_op->poll) { - wait_key_set(wait, in, out, bit); + wait_key_set(wait, in, out, + bit, ll_flag); mask = (*f_op->poll)(f.file, wait); } fdput(f); @@ -468,6 +474,11 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) retval++; wait->_qproc = NULL; } + if (mask & POLL_LL) + can_ll = true; + /* got something, stop busy polling */ + if (retval) + ll_flag = 0; } } if (res_in) @@ -486,6 +497,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) break; } + if (can_ll && can_poll_ll(ll_time)) + continue; + /* * If this is the first loop and we have a timeout * given, then we convert to ktime_t and set the to @@ -717,7 +731,8 @@ struct poll_list { * pwait poll_table will be used by the fd-provided poll handler for waiting, * if pwait->_qproc is non-NULL. */ -static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) +static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait, + bool *can_ll, unsigned int ll_flag) { unsigned int mask; int fd; @@ -731,7 +746,10 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) mask = DEFAULT_POLLMASK; if (f.file->f_op && f.file->f_op->poll) { pwait->_key = pollfd->events|POLLERR|POLLHUP; + pwait->_key |= ll_flag; mask = f.file->f_op->poll(f.file, pwait); + if (mask & POLL_LL) + *can_ll = true; } /* Mask out unneeded events. */ mask &= pollfd->events | POLLERR | POLLHUP; @@ -750,6 +768,8 @@ static int do_poll(unsigned int nfds, struct poll_list *list, ktime_t expire, *to = NULL; int timed_out = 0, count = 0; unsigned long slack = 0; + unsigned int ll_flag = POLL_LL; + u64 ll_time = ll_end_time(); /* Optimise the no-wait case */ if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { @@ -762,6 +782,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, for (;;) { struct poll_list *walk; + bool can_ll = false; for (walk = list; walk != NULL; walk = walk->next) { struct pollfd * pfd, * pfd_end; @@ -776,9 +797,10 @@ static int do_poll(unsigned int nfds, struct poll_list *list, * this. They'll get immediately deregistered * when we break out and return. */ - if (do_pollfd(pfd, pt)) { + if (do_pollfd(pfd, pt, &can_ll, ll_flag)) { count++; pt->_qproc = NULL; + ll_flag = 0; } } } @@ -795,6 +817,8 @@ static int do_poll(unsigned int nfds, struct poll_list *list, if (count || timed_out) break; + if (can_ll && can_poll_ll(ll_time)) + continue; /* * If this is the first loop and we have a timeout * given, then we convert to ktime_t and set the to diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index fcc7c365cee..5bf2b3a6129 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -30,6 +30,7 @@ #ifdef CONFIG_NET_LL_RX_POLL struct napi_struct; +extern unsigned int sysctl_net_ll_read __read_mostly; extern unsigned int sysctl_net_ll_poll __read_mostly; /* return values from ndo_ll_poll */ @@ -38,17 +39,18 @@ extern unsigned int sysctl_net_ll_poll __read_mostly; /* we can use sched_clock() because we don't care much about precision * we only care that the average is bounded + * we don't mind a ~2.5% imprecision so <<10 instead of *1000 + * sk->sk_ll_usec is a u_int so this can't overflow */ -static inline u64 ll_end_time(struct sock *sk) +static inline u64 ll_sk_end_time(struct sock *sk) { - u64 end_time = ACCESS_ONCE(sk->sk_ll_usec); - - /* we don't mind a ~2.5% imprecision - * sk->sk_ll_usec is a u_int so this can't overflow - */ - end_time = (end_time << 10) + sched_clock(); + return ((u64)ACCESS_ONCE(sk->sk_ll_usec) << 10) + sched_clock(); +} - return end_time; +/* in poll/select we use the global sysctl_net_ll_poll value */ +static inline u64 ll_end_time(void) +{ + return ((u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10) + sched_clock(); } static inline bool sk_valid_ll(struct sock *sk) @@ -62,10 +64,13 @@ static inline bool can_poll_ll(u64 end_time) return !time_after64(sched_clock(), end_time); } +/* when used in sock_poll() nonblock is known at compile time to be true + * so the loop and end_time will be optimized out + */ static inline bool sk_poll_ll(struct sock *sk, int nonblock) { + u64 end_time = nonblock ? 0 : ll_sk_end_time(sk); const struct net_device_ops *ops; - u64 end_time = ll_end_time(sk); struct napi_struct *napi; int rc = false; @@ -84,7 +89,6 @@ static inline bool sk_poll_ll(struct sock *sk, int nonblock) goto out; do { - rc = ops->ndo_ll_poll(napi); if (rc == LL_FLUSH_FAILED) @@ -95,8 +99,8 @@ static inline bool sk_poll_ll(struct sock *sk, int nonblock) NET_ADD_STATS_BH(sock_net(sk), LINUX_MIB_LOWLATENCYRXPACKETS, rc); - } while (skb_queue_empty(&sk->sk_receive_queue) - && can_poll_ll(end_time) && !nonblock); + } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && + can_poll_ll(end_time)); rc = !skb_queue_empty(&sk->sk_receive_queue); out: @@ -118,7 +122,12 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) #else /* CONFIG_NET_LL_RX_POLL */ -static inline u64 ll_end_time(struct sock *sk) +static inline u64 sk_ll_end_time(struct sock *sk) +{ + return 0; +} + +static inline u64 ll_end_time(void) { return 0; } diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h index 9ce7f44aebd..4aee586979c 100644 --- a/include/uapi/asm-generic/poll.h +++ b/include/uapi/asm-generic/poll.h @@ -30,6 +30,8 @@ #define POLLFREE 0x4000 /* currently only for epoll */ +#define POLL_LL 0x8000 + struct pollfd { int fd; short events; diff --git a/net/core/sock.c b/net/core/sock.c index 1e744b12fda..b6c619f4d47 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2307,7 +2307,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) #ifdef CONFIG_NET_LL_RX_POLL sk->sk_napi_id = 0; - sk->sk_ll_usec = sysctl_net_ll_poll; + sk->sk_ll_usec = sysctl_net_ll_read; #endif /* diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 62702c2053d..afc677eadd9 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -306,6 +306,14 @@ static struct ctl_table net_core_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "low_latency_read", + .data = &sysctl_net_ll_read, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec + }, +# #endif #endif /* CONFIG_NET */ { diff --git a/net/socket.c b/net/socket.c index 3eec3f76b49..4da14cbd49b 100644 --- a/net/socket.c +++ b/net/socket.c @@ -107,6 +107,7 @@ #include #ifdef CONFIG_NET_LL_RX_POLL +unsigned int sysctl_net_ll_read __read_mostly; unsigned int sysctl_net_ll_poll __read_mostly; #endif @@ -1147,13 +1148,24 @@ EXPORT_SYMBOL(sock_create_lite); /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) { + unsigned int ll_flag = 0; struct socket *sock; /* * We can't return errors to poll, so it's either yes or no. */ sock = file->private_data; - return sock->ops->poll(file, sock, wait); + + if (sk_valid_ll(sock->sk)) { + /* this socket can poll_ll so tell the system call */ + ll_flag = POLL_LL; + + /* once, only if requested by syscall */ + if (wait && (wait->_key & POLL_LL)) + sk_poll_ll(sock->sk, 1); + } + + return ll_flag | sock->ops->poll(file, sock, wait); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) -- cgit v1.2.3-70-g09d2 From cbf55001b2ddb814329735641be5d29b08c82b08 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Mon, 8 Jul 2013 16:20:34 +0300 Subject: net: rename low latency sockets functions to busy poll Rename functions in include/net/ll_poll.h to busy wait. Clarify documentation about expected power use increase. Rename POLL_LL to POLL_BUSY_LOOP. Add need_resched() testing to poll/select busy loops. Note, that in select and poll can_busy_poll is dynamic and is updated continuously to reflect the existence of supported sockets with valid queue information. Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- Documentation/sysctl/net.txt | 12 +++++---- fs/select.c | 60 +++++++++++++++++++++++++---------------- include/net/ll_poll.h | 46 ++++++++++++++++--------------- include/uapi/asm-generic/poll.h | 2 +- net/core/datagram.c | 3 ++- net/ipv4/tcp.c | 6 ++--- net/socket.c | 12 ++++----- 7 files changed, 80 insertions(+), 61 deletions(-) (limited to 'Documentation/sysctl') diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index e658bbfb641..7323b88e26b 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -53,22 +53,24 @@ Default: 64 low_latency_read ---------------- Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL) -Approximate time in us to spin waiting for packets on the device queue. +Approximate time in us to busy loop waiting for packets on the device queue. This sets the default value of the SO_LL socket option. -Can be set or overridden per socket by setting socket option SO_LL. -Recommended value is 50. May increase power usage. +Can be set or overridden per socket by setting socket option SO_LL, which is +the preferred method of enabling. +If you need to enable the feature globally via sysctl, a value of 50 is recommended. +Will increase power usage. Default: 0 (off) low_latency_poll ---------------- Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL) -Approximate time in us to spin waiting for packets on the device queue. +Approximate time in us to busy loop waiting for events. Recommended value depends on the number of sockets you poll on. For several sockets 50, for several hundreds 100. For more than that you probably want to use epoll. Note that only sockets with SO_LL set will be busy polled, so you want to either selectively set SO_LL on those sockets or set sysctl.net.low_latency_read globally. -May increase power usage. +Will increase power usage. Default: 0 (off) rmem_default diff --git a/fs/select.c b/fs/select.c index f28a5859272..25cac5faf6d 100644 --- a/fs/select.c +++ b/fs/select.c @@ -402,9 +402,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) poll_table *wait; int retval, i, timed_out = 0; unsigned long slack = 0; - unsigned int ll_flag = ll_get_flag(); - u64 ll_start = ll_start_time(ll_flag); - u64 ll_time = ll_run_time(); + unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; + u64 busy_start = busy_loop_start_time(busy_flag); + u64 busy_end = busy_loop_end_time(); rcu_read_lock(); retval = max_select_fd(n, fds); @@ -427,7 +427,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) retval = 0; for (;;) { unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; - bool can_ll = false; + bool can_busy_loop = false; inp = fds->in; outp = fds->out; exp = fds->ex; rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; @@ -456,7 +456,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) mask = DEFAULT_POLLMASK; if (f_op && f_op->poll) { wait_key_set(wait, in, out, - bit, ll_flag); + bit, busy_flag); mask = (*f_op->poll)(f.file, wait); } fdput(f); @@ -475,11 +475,18 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) retval++; wait->_qproc = NULL; } - if (mask & POLL_LL) - can_ll = true; /* got something, stop busy polling */ - if (retval) - ll_flag = 0; + if (retval) { + can_busy_loop = false; + busy_flag = 0; + + /* + * only remember a returned + * POLL_BUSY_LOOP if we asked for it + */ + } else if (busy_flag & mask) + can_busy_loop = true; + } } if (res_in) @@ -498,8 +505,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) break; } - /* only if on, have sockets with POLL_LL and not out of time */ - if (ll_flag && can_ll && can_poll_ll(ll_start, ll_time)) + /* only if found POLL_BUSY_LOOP sockets && not out of time */ + if (!need_resched() && can_busy_loop && + busy_loop_range(busy_start, busy_end)) continue; /* @@ -734,7 +742,8 @@ struct poll_list { * if pwait->_qproc is non-NULL. */ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait, - bool *can_ll, unsigned int ll_flag) + bool *can_busy_poll, + unsigned int busy_flag) { unsigned int mask; int fd; @@ -748,10 +757,10 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait, mask = DEFAULT_POLLMASK; if (f.file->f_op && f.file->f_op->poll) { pwait->_key = pollfd->events|POLLERR|POLLHUP; - pwait->_key |= ll_flag; + pwait->_key |= busy_flag; mask = f.file->f_op->poll(f.file, pwait); - if (mask & POLL_LL) - *can_ll = true; + if (mask & busy_flag) + *can_busy_poll = true; } /* Mask out unneeded events. */ mask &= pollfd->events | POLLERR | POLLHUP; @@ -770,9 +779,10 @@ static int do_poll(unsigned int nfds, struct poll_list *list, ktime_t expire, *to = NULL; int timed_out = 0, count = 0; unsigned long slack = 0; - unsigned int ll_flag = ll_get_flag(); - u64 ll_start = ll_start_time(ll_flag); - u64 ll_time = ll_run_time(); + unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; + u64 busy_start = busy_loop_start_time(busy_flag); + u64 busy_end = busy_loop_end_time(); + /* Optimise the no-wait case */ if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { @@ -785,7 +795,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, for (;;) { struct poll_list *walk; - bool can_ll = false; + bool can_busy_loop = false; for (walk = list; walk != NULL; walk = walk->next) { struct pollfd * pfd, * pfd_end; @@ -800,10 +810,13 @@ static int do_poll(unsigned int nfds, struct poll_list *list, * this. They'll get immediately deregistered * when we break out and return. */ - if (do_pollfd(pfd, pt, &can_ll, ll_flag)) { + if (do_pollfd(pfd, pt, &can_busy_loop, + busy_flag)) { count++; pt->_qproc = NULL; - ll_flag = 0; + /* found something, stop busy polling */ + busy_flag = 0; + can_busy_loop = false; } } } @@ -820,8 +833,9 @@ static int do_poll(unsigned int nfds, struct poll_list *list, if (count || timed_out) break; - /* only if on, have sockets with POLL_LL and not out of time */ - if (ll_flag && can_ll && can_poll_ll(ll_start, ll_time)) + /* only if found POLL_BUSY_LOOP sockets && not out of time */ + if (!need_resched() && can_busy_loop && + busy_loop_range(busy_start, busy_end)) continue; /* diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index 0d620ba19bc..f14dd88dafc 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -37,9 +37,9 @@ extern unsigned int sysctl_net_ll_poll __read_mostly; #define LL_FLUSH_FAILED -1 #define LL_FLUSH_BUSY -2 -static inline unsigned int ll_get_flag(void) +static inline bool net_busy_loop_on(void) { - return sysctl_net_ll_poll ? POLL_LL : 0; + return sysctl_net_ll_poll; } /* a wrapper to make debug_smp_processor_id() happy @@ -47,7 +47,7 @@ static inline unsigned int ll_get_flag(void) * we only care that the average is bounded */ #ifdef CONFIG_DEBUG_PREEMPT -static inline u64 ll_sched_clock(void) +static inline u64 busy_loop_sched_clock(void) { u64 rc; @@ -58,7 +58,7 @@ static inline u64 ll_sched_clock(void) return rc; } #else /* CONFIG_DEBUG_PREEMPT */ -static inline u64 ll_sched_clock(void) +static inline u64 busy_loop_sched_clock(void) { return sched_clock(); } @@ -67,7 +67,7 @@ static inline u64 ll_sched_clock(void) /* we don't mind a ~2.5% imprecision so <<10 instead of *1000 * sk->sk_ll_usec is a u_int so this can't overflow */ -static inline u64 ll_sk_run_time(struct sock *sk) +static inline u64 sk_busy_loop_end_time(struct sock *sk) { return (u64)ACCESS_ONCE(sk->sk_ll_usec) << 10; } @@ -75,27 +75,29 @@ static inline u64 ll_sk_run_time(struct sock *sk) /* in poll/select we use the global sysctl_net_ll_poll value * only call sched_clock() if enabled */ -static inline u64 ll_run_time(void) +static inline u64 busy_loop_end_time(void) { return (u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10; } -/* if flag is not set we don't need to know the time */ -static inline u64 ll_start_time(unsigned int flag) +/* if flag is not set we don't need to know the time + * so we want to avoid a potentially expensive sched_clock() + */ +static inline u64 busy_loop_start_time(unsigned int flag) { - return flag ? ll_sched_clock() : 0; + return flag ? busy_loop_sched_clock() : 0; } -static inline bool sk_valid_ll(struct sock *sk) +static inline bool sk_can_busy_loop(struct sock *sk) { return sk->sk_ll_usec && sk->sk_napi_id && !need_resched() && !signal_pending(current); } /* careful! time_in_range64 will evaluate now twice */ -static inline bool can_poll_ll(u64 start_time, u64 run_time) +static inline bool busy_loop_range(u64 start_time, u64 run_time) { - u64 now = ll_sched_clock(); + u64 now = busy_loop_sched_clock(); return time_in_range64(now, start_time, start_time + run_time); } @@ -103,10 +105,10 @@ static inline bool can_poll_ll(u64 start_time, u64 run_time) /* when used in sock_poll() nonblock is known at compile time to be true * so the loop and end_time will be optimized out */ -static inline bool sk_poll_ll(struct sock *sk, int nonblock) +static inline bool sk_busy_loop(struct sock *sk, int nonblock) { - u64 start_time = ll_start_time(!nonblock); - u64 run_time = ll_sk_run_time(sk); + u64 start_time = busy_loop_start_time(!nonblock); + u64 end_time = sk_busy_loop_end_time(sk); const struct net_device_ops *ops; struct napi_struct *napi; int rc = false; @@ -137,7 +139,7 @@ static inline bool sk_poll_ll(struct sock *sk, int nonblock) LINUX_MIB_LOWLATENCYRXPACKETS, rc); } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && - can_poll_ll(start_time, run_time)); + busy_loop_range(start_time, end_time)); rc = !skb_queue_empty(&sk->sk_receive_queue); out: @@ -158,27 +160,27 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) } #else /* CONFIG_NET_LL_RX_POLL */ -static inline unsigned long ll_get_flag(void) +static inline unsigned long net_busy_loop_on(void) { return 0; } -static inline u64 ll_start_time(unsigned int flag) +static inline u64 busy_loop_start_time(unsigned int flag) { return 0; } -static inline u64 ll_run_time(void) +static inline u64 busy_loop_end_time(void) { return 0; } -static inline bool sk_valid_ll(struct sock *sk) +static inline bool sk_can_busy_loop(struct sock *sk) { return false; } -static inline bool sk_poll_ll(struct sock *sk, int nonblock) +static inline bool sk_busy_poll(struct sock *sk, int nonblock) { return false; } @@ -191,7 +193,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) { } -static inline bool can_poll_ll(u64 start_time, u64 run_time) +static inline bool busy_loop_range(u64 start_time, u64 run_time) { return false; } diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h index 4aee586979c..a9694982689 100644 --- a/include/uapi/asm-generic/poll.h +++ b/include/uapi/asm-generic/poll.h @@ -30,7 +30,7 @@ #define POLLFREE 0x4000 /* currently only for epoll */ -#define POLL_LL 0x8000 +#define POLL_BUSY_LOOP 0x8000 struct pollfd { int fd; diff --git a/net/core/datagram.c b/net/core/datagram.c index 9cbaba98ce4..6e9ab31e457 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -208,7 +208,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, } spin_unlock_irqrestore(&queue->lock, cpu_flags); - if (sk_valid_ll(sk) && sk_poll_ll(sk, flags & MSG_DONTWAIT)) + if (sk_can_busy_loop(sk) && + sk_busy_loop(sk, flags & MSG_DONTWAIT)) continue; /* User doesn't want to wait */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 46ed9afd1f5..15cbfa94bd8 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1554,9 +1554,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, struct sk_buff *skb; u32 urg_hole = 0; - if (sk_valid_ll(sk) && skb_queue_empty(&sk->sk_receive_queue) - && (sk->sk_state == TCP_ESTABLISHED)) - sk_poll_ll(sk, nonblock); + if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && + (sk->sk_state == TCP_ESTABLISHED)) + sk_busy_loop(sk, nonblock); lock_sock(sk); diff --git a/net/socket.c b/net/socket.c index 4da14cbd49b..45afa648364 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1148,7 +1148,7 @@ EXPORT_SYMBOL(sock_create_lite); /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) { - unsigned int ll_flag = 0; + unsigned int busy_flag = 0; struct socket *sock; /* @@ -1156,16 +1156,16 @@ static unsigned int sock_poll(struct file *file, poll_table *wait) */ sock = file->private_data; - if (sk_valid_ll(sock->sk)) { + if (sk_can_busy_loop(sock->sk)) { /* this socket can poll_ll so tell the system call */ - ll_flag = POLL_LL; + busy_flag = POLL_BUSY_LOOP; /* once, only if requested by syscall */ - if (wait && (wait->_key & POLL_LL)) - sk_poll_ll(sock->sk, 1); + if (wait && (wait->_key & POLL_BUSY_LOOP)) + sk_busy_loop(sock->sk, 1); } - return ll_flag | sock->ops->poll(file, sock, wait); + return busy_flag | sock->ops->poll(file, sock, wait); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) -- cgit v1.2.3-70-g09d2