diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 16 | ||||
-rw-r--r-- | net/core/drop_monitor.c | 2 | ||||
-rw-r--r-- | net/core/filter.c | 9 | ||||
-rw-r--r-- | net/core/flow_dissector.c | 36 | ||||
-rw-r--r-- | net/core/skbuff.c | 37 | ||||
-rw-r--r-- | net/core/sock.c | 2 |
6 files changed, 61 insertions, 41 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 4699dcfdc4a..b793e3521a3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2153,7 +2153,7 @@ static inline void __netif_reschedule(struct Qdisc *q) unsigned long flags; local_irq_save(flags); - sd = &__get_cpu_var(softnet_data); + sd = this_cpu_ptr(&softnet_data); q->next_sched = NULL; *sd->output_queue_tailp = q; sd->output_queue_tailp = &q->next_sched; @@ -2675,7 +2675,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device if (skb->encapsulation) features &= dev->hw_enc_features; - if (netif_needs_gso(skb, features)) { + if (netif_needs_gso(dev, skb, features)) { struct sk_buff *segs; segs = skb_gso_segment(skb, features); @@ -3233,7 +3233,7 @@ static void rps_trigger_softirq(void *data) static int rps_ipi_queued(struct softnet_data *sd) { #ifdef CONFIG_RPS - struct softnet_data *mysd = &__get_cpu_var(softnet_data); + struct softnet_data *mysd = this_cpu_ptr(&softnet_data); if (sd != mysd) { sd->rps_ipi_next = mysd->rps_ipi_list; @@ -3260,7 +3260,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) if (qlen < (netdev_max_backlog >> 1)) return false; - sd = &__get_cpu_var(softnet_data); + sd = this_cpu_ptr(&softnet_data); rcu_read_lock(); fl = rcu_dereference(sd->flow_limit); @@ -3407,7 +3407,7 @@ EXPORT_SYMBOL(netif_rx_ni); static void net_tx_action(struct softirq_action *h) { - struct softnet_data *sd = &__get_cpu_var(softnet_data); + struct softnet_data *sd = this_cpu_ptr(&softnet_data); if (sd->completion_queue) { struct sk_buff *clist; @@ -3832,7 +3832,7 @@ EXPORT_SYMBOL(netif_receive_skb); static void flush_backlog(void *arg) { struct net_device *dev = arg; - struct softnet_data *sd = &__get_cpu_var(softnet_data); + struct softnet_data *sd = this_cpu_ptr(&softnet_data); struct sk_buff *skb, *tmp; rps_lock(sd); @@ -4379,7 +4379,7 @@ void __napi_schedule(struct napi_struct *n) unsigned long flags; local_irq_save(flags); - ____napi_schedule(&__get_cpu_var(softnet_data), n); + ____napi_schedule(this_cpu_ptr(&softnet_data), n); local_irq_restore(flags); } EXPORT_SYMBOL(__napi_schedule); @@ -4500,7 +4500,7 @@ EXPORT_SYMBOL(netif_napi_del); static void net_rx_action(struct softirq_action *h) { - struct softnet_data *sd = &__get_cpu_var(softnet_data); + struct softnet_data *sd = this_cpu_ptr(&softnet_data); unsigned long time_limit = jiffies + 2; int budget = netdev_budget; void *have; diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 50f9a9db579..252e155c837 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -146,7 +146,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location) unsigned long flags; local_irq_save(flags); - data = &__get_cpu_var(dm_cpu_data); + data = this_cpu_ptr(&dm_cpu_data); spin_lock(&data->lock); dskb = data->skb; diff --git a/net/core/filter.c b/net/core/filter.c index fcd3f6742a6..647b12265e1 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -51,9 +51,9 @@ * @skb: buffer to filter * * Run the filter code and then cut skb->data to correct size returned by - * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller + * SK_RUN_FILTER. If pkt_len is 0 we toss packet. If skb->len is smaller * than pkt_len we keep whole skb->data. This is the socket level - * wrapper to sk_run_filter. It returns 0 if the packet should + * wrapper to SK_RUN_FILTER. It returns 0 if the packet should * be accepted or -EPERM if the packet should be tossed. * */ @@ -566,11 +566,8 @@ err: /* Security: * - * A BPF program is able to use 16 cells of memory to store intermediate - * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()). - * * As we dont want to clear mem[] array for each packet going through - * sk_run_filter(), we check that filter loaded by user never try to read + * __bpf_prog_run(), we check that filter loaded by user never try to read * a cell if not previously written, and we check all branches to be sure * a malicious user doesn't try to abuse us. */ diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 8560dea5880..45084938c40 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -100,6 +100,13 @@ ip: if (ip_is_fragment(iph)) ip_proto = 0; + /* skip the address processing if skb is NULL. The assumption + * here is that if there is no skb we are not looking for flow + * info but lengths and protocols. + */ + if (!skb) + break; + iph_to_flow_copy_addrs(flow, iph); break; } @@ -114,17 +121,15 @@ ipv6: return false; ip_proto = iph->nexthdr; - flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr); - flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr); nhoff += sizeof(struct ipv6hdr); - /* skip the flow label processing if skb is NULL. The - * assumption here is that if there is no skb we are not - * looking for flow info as much as we are length. - */ + /* see comment above in IPv4 section */ if (!skb) break; + flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr); + flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr); + flow_label = ip6_flowlabel(iph); if (flow_label) { /* Awesome, IPv6 packet has a flow label so we can @@ -231,9 +236,13 @@ ipv6: flow->n_proto = proto; flow->ip_proto = ip_proto; - flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen); flow->thoff = (u16) nhoff; + /* unless skb is set we don't need to record port info */ + if (skb) + flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, + data, hlen); + return true; } EXPORT_SYMBOL(__skb_flow_dissect); @@ -334,15 +343,16 @@ u32 __skb_get_poff(const struct sk_buff *skb, void *data, switch (keys->ip_proto) { case IPPROTO_TCP: { - const struct tcphdr *tcph; - struct tcphdr _tcph; + /* access doff as u8 to avoid unaligned access */ + const u8 *doff; + u8 _doff; - tcph = __skb_header_pointer(skb, poff, sizeof(_tcph), - data, hlen, &_tcph); - if (!tcph) + doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff), + data, hlen, &_doff); + if (!doff) return poff; - poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4); + poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2); break; } case IPPROTO_UDP: diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7b3df0d518a..61059a05ec9 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -345,7 +345,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) unsigned long flags; local_irq_save(flags); - nc = &__get_cpu_var(netdev_alloc_cache); + nc = this_cpu_ptr(&netdev_alloc_cache); if (unlikely(!nc->frag.page)) { refill: for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { @@ -360,18 +360,29 @@ refill: goto end; } nc->frag.size = PAGE_SIZE << order; -recycle: - atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); + /* Even if we own the page, we do not use atomic_set(). + * This would break get_page_unless_zero() users. + */ + atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1, + &nc->frag.page->_count); nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; nc->frag.offset = 0; } if (nc->frag.offset + fragsz > nc->frag.size) { - /* avoid unnecessary locked operations if possible */ - if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || - atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) - goto recycle; - goto refill; + if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) { + if (!atomic_sub_and_test(nc->pagecnt_bias, + &nc->frag.page->_count)) + goto refill; + /* OK, page count is 0, we can safely set it */ + atomic_set(&nc->frag.page->_count, + NETDEV_PAGECNT_MAX_BIAS); + } else { + atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias, + &nc->frag.page->_count); + } + nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; + nc->frag.offset = 0; } data = page_address(nc->frag.page) + nc->frag.offset; @@ -4126,11 +4137,11 @@ EXPORT_SYMBOL(skb_vlan_untag); /** * alloc_skb_with_frags - allocate skb with page frags * - * header_len: size of linear part - * data_len: needed length in frags - * max_page_order: max page order desired. - * errcode: pointer to error code if any - * gfp_mask: allocation mask + * @header_len: size of linear part + * @data_len: needed length in frags + * @max_page_order: max page order desired. + * @errcode: pointer to error code if any + * @gfp_mask: allocation mask * * This can be used to allocate a paged skb, given a maximal order for frags. */ diff --git a/net/core/sock.c b/net/core/sock.c index b4f3ea2fce6..15e0c67b106 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1718,6 +1718,8 @@ EXPORT_SYMBOL(sock_kmalloc); */ void sock_kfree_s(struct sock *sk, void *mem, int size) { + if (WARN_ON_ONCE(!mem)) + return; kfree(mem); atomic_sub(size, &sk->sk_omem_alloc); } |