diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 170 | ||||
-rw-r--r-- | net/core/ethtool.c | 29 | ||||
-rw-r--r-- | net/core/link_watch.c | 6 | ||||
-rw-r--r-- | net/core/neighbour.c | 1 | ||||
-rw-r--r-- | net/core/net-sysfs.c | 1 | ||||
-rw-r--r-- | net/core/netpoll.c | 36 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 3 | ||||
-rw-r--r-- | net/core/skbuff.c | 187 | ||||
-rw-r--r-- | net/core/sock.c | 12 | ||||
-rw-r--r-- | net/core/sysctl_net_core.c | 1 |
10 files changed, 388 insertions, 58 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index ab39fe17cb5..066a60a7528 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -76,7 +76,6 @@ #include <asm/system.h> #include <linux/bitops.h> #include <linux/capability.h> -#include <linux/config.h> #include <linux/cpu.h> #include <linux/types.h> #include <linux/kernel.h> @@ -116,6 +115,7 @@ #include <asm/current.h> #include <linux/audit.h> #include <linux/dmaengine.h> +#include <linux/err.h> /* * The list of packet types we will receive (as opposed to discard) @@ -229,7 +229,7 @@ extern void netdev_unregister_sysfs(struct net_device *); * For efficiency */ -int netdev_nit; +static int netdev_nit; /* * Add a protocol ID to the list. Now that the input handler is @@ -1048,7 +1048,7 @@ static inline void net_timestamp(struct sk_buff *skb) * taps currently in use. */ -void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) +static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) { struct packet_type *ptype; @@ -1186,6 +1186,45 @@ out: return ret; } +/** + * skb_gso_segment - Perform segmentation on skb. + * @skb: buffer to segment + * @features: features for the output path (see dev->features) + * + * This function segments the given skb and returns a list of segments. + * + * It may return NULL if the skb requires no segmentation. This is + * only possible when GSO is used for verifying header integrity. + */ +struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) +{ + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); + struct packet_type *ptype; + int type = skb->protocol; + + BUG_ON(skb_shinfo(skb)->frag_list); + BUG_ON(skb->ip_summed != CHECKSUM_HW); + + skb->mac.raw = skb->data; + skb->mac_len = skb->nh.raw - skb->data; + __skb_pull(skb, skb->mac_len); + + rcu_read_lock(); + list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { + if (ptype->type == type && !ptype->dev && ptype->gso_segment) { + segs = ptype->gso_segment(skb, features); + break; + } + } + rcu_read_unlock(); + + __skb_push(skb, skb->data - skb->mac.raw); + + return segs; +} + +EXPORT_SYMBOL(skb_gso_segment); + /* Take action when hardware reception checksum errors are detected. */ #ifdef CONFIG_BUG void netdev_rx_csum_fault(struct net_device *dev) @@ -1199,7 +1238,6 @@ void netdev_rx_csum_fault(struct net_device *dev) EXPORT_SYMBOL(netdev_rx_csum_fault); #endif -#ifdef CONFIG_HIGHMEM /* Actually, we should eliminate this check as soon as we know, that: * 1. IOMMU is present and allows to map all the memory. * 2. No high memory really exists on this machine. @@ -1207,6 +1245,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault); static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) { +#ifdef CONFIG_HIGHMEM int i; if (dev->features & NETIF_F_HIGHDMA) @@ -1216,11 +1255,102 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) if (PageHighMem(skb_shinfo(skb)->frags[i].page)) return 1; +#endif + return 0; +} + +struct dev_gso_cb { + void (*destructor)(struct sk_buff *skb); +}; + +#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) + +static void dev_gso_skb_destructor(struct sk_buff *skb) +{ + struct dev_gso_cb *cb; + + do { + struct sk_buff *nskb = skb->next; + + skb->next = nskb->next; + nskb->next = NULL; + kfree_skb(nskb); + } while (skb->next); + + cb = DEV_GSO_CB(skb); + if (cb->destructor) + cb->destructor(skb); +} + +/** + * dev_gso_segment - Perform emulated hardware segmentation on skb. + * @skb: buffer to segment + * + * This function segments the given skb and stores the list of segments + * in skb->next. + */ +static int dev_gso_segment(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + struct sk_buff *segs; + int features = dev->features & ~(illegal_highdma(dev, skb) ? + NETIF_F_SG : 0); + + segs = skb_gso_segment(skb, features); + + /* Verifying header integrity only. */ + if (!segs) + return 0; + + if (unlikely(IS_ERR(segs))) + return PTR_ERR(segs); + + skb->next = segs; + DEV_GSO_CB(skb)->destructor = skb->destructor; + skb->destructor = dev_gso_skb_destructor; + + return 0; +} + +int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + if (likely(!skb->next)) { + if (netdev_nit) + dev_queue_xmit_nit(skb, dev); + + if (netif_needs_gso(dev, skb)) { + if (unlikely(dev_gso_segment(skb))) + goto out_kfree_skb; + if (skb->next) + goto gso; + } + + return dev->hard_start_xmit(skb, dev); + } + +gso: + do { + struct sk_buff *nskb = skb->next; + int rc; + + skb->next = nskb->next; + nskb->next = NULL; + rc = dev->hard_start_xmit(nskb, dev); + if (unlikely(rc)) { + nskb->next = skb->next; + skb->next = nskb; + return rc; + } + if (unlikely(netif_queue_stopped(dev) && skb->next)) + return NETDEV_TX_BUSY; + } while (skb->next); + + skb->destructor = DEV_GSO_CB(skb)->destructor; + +out_kfree_skb: + kfree_skb(skb); return 0; } -#else -#define illegal_highdma(dev, skb) (0) -#endif #define HARD_TX_LOCK(dev, cpu) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ @@ -1266,6 +1396,10 @@ int dev_queue_xmit(struct sk_buff *skb) struct Qdisc *q; int rc = -ENOMEM; + /* GSO will handle the following emulations directly. */ + if (netif_needs_gso(dev, skb)) + goto gso; + if (skb_shinfo(skb)->frag_list && !(dev->features & NETIF_F_FRAGLIST) && __skb_linearize(skb)) @@ -1290,12 +1424,13 @@ int dev_queue_xmit(struct sk_buff *skb) if (skb_checksum_help(skb, 0)) goto out_kfree_skb; +gso: spin_lock_prefetch(&dev->queue_lock); /* Disable soft irqs for various locks below. Also * stops preemption for RCU. */ - local_bh_disable(); + rcu_read_lock_bh(); /* Updates of qdisc are serialized by queue_lock. * The struct Qdisc which is pointed to by qdisc is now a @@ -1346,11 +1481,8 @@ int dev_queue_xmit(struct sk_buff *skb) HARD_TX_LOCK(dev, cpu); if (!netif_queue_stopped(dev)) { - if (netdev_nit) - dev_queue_xmit_nit(skb, dev); - rc = 0; - if (!dev->hard_start_xmit(skb, dev)) { + if (!dev_hard_start_xmit(skb, dev)) { HARD_TX_UNLOCK(dev); goto out; } @@ -1369,13 +1501,13 @@ int dev_queue_xmit(struct sk_buff *skb) } rc = -ENETDOWN; - local_bh_enable(); + rcu_read_unlock_bh(); out_kfree_skb: kfree_skb(skb); return rc; out: - local_bh_enable(); + rcu_read_unlock_bh(); return rc; } @@ -2980,7 +3112,7 @@ static void netdev_wait_allrefs(struct net_device *dev) static DEFINE_MUTEX(net_todo_run_mutex); void netdev_run_todo(void) { - struct list_head list = LIST_HEAD_INIT(list); + struct list_head list; /* Need to guard against multiple cpu's getting out of order. */ mutex_lock(&net_todo_run_mutex); @@ -2995,9 +3127,9 @@ void netdev_run_todo(void) /* Snapshot list, allow later requests */ spin_lock(&net_todo_list_lock); - list_splice_init(&net_todo_list, &list); + list_replace_init(&net_todo_list, &list); spin_unlock(&net_todo_list_lock); - + while (!list_empty(&list)) { struct net_device *dev = list_entry(list.next, struct net_device, todo_list); @@ -3301,8 +3433,8 @@ static void net_dma_rebalance(void) /** * netdev_dma_event - event callback for the net_dma_client * @client: should always be net_dma_client - * @chan: - * @event: + * @chan: DMA channel for the event + * @event: event type */ static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan, enum dma_event event) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 33ce7ed6afc..27ce1683caf 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -614,6 +614,29 @@ static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr) return dev->ethtool_ops->set_ufo(dev, edata.data); } +static int ethtool_get_gso(struct net_device *dev, char __user *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GGSO }; + + edata.data = dev->features & NETIF_F_GSO; + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_gso(struct net_device *dev, char __user *useraddr) +{ + struct ethtool_value edata; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + if (edata.data) + dev->features |= NETIF_F_GSO; + else + dev->features &= ~NETIF_F_GSO; + return 0; +} + static int ethtool_self_test(struct net_device *dev, char __user *useraddr) { struct ethtool_test test; @@ -905,6 +928,12 @@ int dev_ethtool(struct ifreq *ifr) case ETHTOOL_SUFO: rc = ethtool_set_ufo(dev, useraddr); break; + case ETHTOOL_GGSO: + rc = ethtool_get_gso(dev, useraddr); + break; + case ETHTOOL_SGSO: + rc = ethtool_set_gso(dev, useraddr); + break; default: rc = -EOPNOTSUPP; } diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 646937cc2d8..4b36114744c 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -11,7 +11,6 @@ * */ -#include <linux/config.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/if.h> @@ -91,11 +90,10 @@ static void rfc2863_policy(struct net_device *dev) /* Must be called with the rtnl semaphore held */ void linkwatch_run_queue(void) { - LIST_HEAD(head); - struct list_head *n, *next; + struct list_head head, *n, *next; spin_lock_irq(&lweventlist_lock); - list_splice_init(&lweventlist, &head); + list_replace_init(&lweventlist, &head); spin_unlock_irq(&lweventlist_lock); list_for_each_safe(n, next, &head) { diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 50a8c73caf9..7ad681f5e71 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -15,7 +15,6 @@ * Harald Welte Add neighbour cache statistics like rtstat */ -#include <linux/config.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/module.h> diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 47a6fceb677..13472762b18 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -10,7 +10,6 @@ */ #include <linux/capability.h> -#include <linux/config.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/if_arp.h> diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 9cb78183038..471da451cd4 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -54,6 +54,7 @@ static atomic_t trapped; sizeof(struct iphdr) + sizeof(struct ethhdr)) static void zap_completion_queue(void); +static void arp_reply(struct sk_buff *skb); static void queue_process(void *p) { @@ -153,6 +154,22 @@ static void poll_napi(struct netpoll *np) } } +static void service_arp_queue(struct netpoll_info *npi) +{ + struct sk_buff *skb; + + if (unlikely(!npi)) + return; + + skb = skb_dequeue(&npi->arp_tx); + + while (skb != NULL) { + arp_reply(skb); + skb = skb_dequeue(&npi->arp_tx); + } + return; +} + void netpoll_poll(struct netpoll *np) { if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller) @@ -163,6 +180,8 @@ void netpoll_poll(struct netpoll *np) if (np->dev->poll) poll_napi(np); + service_arp_queue(np->dev->npinfo); + zap_completion_queue(); } @@ -279,14 +298,10 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) * network drivers do not expect to be called if the queue is * stopped. */ - if (netif_queue_stopped(np->dev)) { - netif_tx_unlock(np->dev); - netpoll_poll(np); - udelay(50); - continue; - } + status = NETDEV_TX_BUSY; + if (!netif_queue_stopped(np->dev)) + status = np->dev->hard_start_xmit(skb, np->dev); - status = np->dev->hard_start_xmit(skb, np->dev); netif_tx_unlock(np->dev); /* success */ @@ -446,7 +461,9 @@ int __netpoll_rx(struct sk_buff *skb) int proto, len, ulen; struct iphdr *iph; struct udphdr *uh; - struct netpoll *np = skb->dev->npinfo->rx_np; + struct netpoll_info *npi = skb->dev->npinfo; + struct netpoll *np = npi->rx_np; + if (!np) goto out; @@ -456,7 +473,7 @@ int __netpoll_rx(struct sk_buff *skb) /* check if netpoll clients need ARP */ if (skb->protocol == __constant_htons(ETH_P_ARP) && atomic_read(&trapped)) { - arp_reply(skb); + skb_queue_tail(&npi->arp_tx, skb); return 1; } @@ -651,6 +668,7 @@ int netpoll_setup(struct netpoll *np) npinfo->poll_owner = -1; npinfo->tries = MAX_RETRIES; spin_lock_init(&npinfo->rx_lock); + skb_queue_head_init(&npinfo->arp_tx); } else npinfo = ndev->npinfo; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 3fcfa9c59e1..20e5bb73f14 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -16,7 +16,6 @@ * Vitaly E. Lavrov RTA_OK arithmetics was wrong. */ -#include <linux/config.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/types.h> @@ -663,7 +662,7 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) sz_idx = type>>2; kind = type&3; - if (kind != 2 && security_netlink_recv(skb)) { + if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) { *errp = -EPERM; return -1; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index bb7210f4005..7cfbdb215ba 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -38,7 +38,6 @@ * The functions in this file will not compile correctly with gcc 2.4.x */ -#include <linux/config.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> @@ -172,9 +171,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, shinfo = skb_shinfo(skb); atomic_set(&shinfo->dataref, 1); shinfo->nr_frags = 0; - shinfo->tso_size = 0; - shinfo->tso_segs = 0; - shinfo->ufo_size = 0; + shinfo->gso_size = 0; + shinfo->gso_segs = 0; + shinfo->gso_type = 0; shinfo->ip6_frag_id = 0; shinfo->frag_list = NULL; @@ -238,8 +237,9 @@ struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, atomic_set(&(skb_shinfo(skb)->dataref), 1); skb_shinfo(skb)->nr_frags = 0; - skb_shinfo(skb)->tso_size = 0; - skb_shinfo(skb)->tso_segs = 0; + skb_shinfo(skb)->gso_size = 0; + skb_shinfo(skb)->gso_segs = 0; + skb_shinfo(skb)->gso_type = 0; skb_shinfo(skb)->frag_list = NULL; out: return skb; @@ -271,7 +271,7 @@ static void skb_clone_fraglist(struct sk_buff *skb) skb_get(list); } -void skb_release_data(struct sk_buff *skb) +static void skb_release_data(struct sk_buff *skb) { if (!skb->cloned || !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, @@ -528,8 +528,9 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) #endif skb_copy_secmark(new, old); atomic_set(&new->users, 1); - skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size; - skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs; + skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; + skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; + skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; } /** @@ -781,24 +782,40 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, * filled. Used by network drivers which may DMA or transfer data * beyond the buffer end onto the wire. * - * May return NULL in out of memory cases. + * May return error in out of memory cases. The skb is freed on error. */ -struct sk_buff *skb_pad(struct sk_buff *skb, int pad) +int skb_pad(struct sk_buff *skb, int pad) { - struct sk_buff *nskb; + int err; + int ntail; /* If the skbuff is non linear tailroom is always zero.. */ - if (skb_tailroom(skb) >= pad) { + if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { memset(skb->data+skb->len, 0, pad); - return skb; + return 0; } - - nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC); + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); + if (unlikely(err)) + goto free_skb; + } + + /* FIXME: The use of this function with non-linear skb's really needs + * to be audited. + */ + err = skb_linearize(skb); + if (unlikely(err)) + goto free_skb; + + memset(skb->data + skb->len, 0, pad); + return 0; + +free_skb: kfree_skb(skb); - if (nskb) - memset(nskb->data+nskb->len, 0, pad); - return nskb; + return err; } /* Trims skb to length len. It can change skb pointers. @@ -1721,12 +1738,15 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, unsigned int to, struct ts_config *config, struct ts_state *state) { + unsigned int ret; + config->get_next_block = skb_ts_get_next_block; config->finish = skb_ts_finish; skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); - return textsearch_find(config, state); + ret = textsearch_find(config, state); + return (ret <= to - from ? ret : UINT_MAX); } /** @@ -1824,6 +1844,133 @@ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) EXPORT_SYMBOL_GPL(skb_pull_rcsum); +/** + * skb_segment - Perform protocol segmentation on skb. + * @skb: buffer to segment + * @features: features for the output path (see dev->features) + * + * This function performs segmentation on the given skb. It returns + * the segment at the given position. It returns NULL if there are + * no more segments to generate, or when an error is encountered. + */ +struct sk_buff *skb_segment(struct sk_buff *skb, int features) +{ + struct sk_buff *segs = NULL; + struct sk_buff *tail = NULL; + unsigned int mss = skb_shinfo(skb)->gso_size; + unsigned int doffset = skb->data - skb->mac.raw; + unsigned int offset = doffset; + unsigned int headroom; + unsigned int len; + int sg = features & NETIF_F_SG; + int nfrags = skb_shinfo(skb)->nr_frags; + int err = -ENOMEM; + int i = 0; + int pos; + + __skb_push(skb, doffset); + headroom = skb_headroom(skb); + pos = skb_headlen(skb); + + do { + struct sk_buff *nskb; + skb_frag_t *frag; + int hsize, nsize; + int k; + int size; + + len = skb->len - offset; + if (len > mss) + len = mss; + + hsize = skb_headlen(skb) - offset; + if (hsize < 0) + hsize = 0; + nsize = hsize + doffset; + if (nsize > len + doffset || !sg) + nsize = len + doffset; + + nskb = alloc_skb(nsize + headroom, GFP_ATOMIC); + if (unlikely(!nskb)) + goto err; + + if (segs) + tail->next = nskb; + else + segs = nskb; + tail = nskb; + + nskb->dev = skb->dev; + nskb->priority = skb->priority; + nskb->protocol = skb->protocol; + nskb->dst = dst_clone(skb->dst); + memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); + nskb->pkt_type = skb->pkt_type; + nskb->mac_len = skb->mac_len; + + skb_reserve(nskb, headroom); + nskb->mac.raw = nskb->data; + nskb->nh.raw = nskb->data + skb->mac_len; + nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw); + memcpy(skb_put(nskb, doffset), skb->data, doffset); + + if (!sg) { + nskb->csum = skb_copy_and_csum_bits(skb, offset, + skb_put(nskb, len), + len, 0); + continue; + } + + frag = skb_shinfo(nskb)->frags; + k = 0; + + nskb->ip_summed = CHECKSUM_HW; + nskb->csum = skb->csum; + memcpy(skb_put(nskb, hsize), skb->data + offset, hsize); + + while (pos < offset + len) { + BUG_ON(i >= nfrags); + + *frag = skb_shinfo(skb)->frags[i]; + get_page(frag->page); + size = frag->size; + + if (pos < offset) { + frag->page_offset += offset - pos; + frag->size -= offset - pos; + } + + k++; + + if (pos + size <= offset + len) { + i++; + pos += size; + } else { + frag->size -= pos + size - (offset + len); + break; + } + + frag++; + } + + skb_shinfo(nskb)->nr_frags = k; + nskb->data_len = len - hsize; + nskb->len += nskb->data_len; + nskb->truesize += nskb->data_len; + } while ((offset += len) < skb->len); + + return segs; + +err: + while ((skb = segs)) { + segs = skb->next; + kfree(skb); + } + return ERR_PTR(err); +} + +EXPORT_SYMBOL_GPL(skb_segment); + void __init skb_init(void) { skbuff_head_cache = kmem_cache_create("skbuff_head_cache", diff --git a/net/core/sock.c b/net/core/sock.c index 5d820c37665..533b9317144 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -92,7 +92,6 @@ */ #include <linux/capability.h> -#include <linux/config.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> @@ -565,6 +564,13 @@ set_rcvbuf: ret = -ENONET; break; + case SO_PASSSEC: + if (valbool) + set_bit(SOCK_PASSSEC, &sock->flags); + else + clear_bit(SOCK_PASSSEC, &sock->flags); + break; + /* We implement the SO_SNDLOWAT etc to not be settable (1003.1g 5.3) */ default: @@ -723,6 +729,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sk->sk_state == TCP_LISTEN; break; + case SO_PASSSEC: + v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; + break; + case SO_PEERSEC: return security_socket_getpeersec_stream(sock, optval, optlen, len); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 71045365672..02534131d88 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -7,7 +7,6 @@ #include <linux/mm.h> #include <linux/sysctl.h> -#include <linux/config.h> #include <linux/module.h> #include <linux/socket.h> #include <net/sock.h> |