diff options
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/Kconfig | 10 | ||||
-rw-r--r-- | net/sched/Makefile | 1 | ||||
-rw-r--r-- | net/sched/act_csum.c | 595 | ||||
-rw-r--r-- | net/sched/act_ipt.c | 14 | ||||
-rw-r--r-- | net/sched/cls_cgroup.c | 2 | ||||
-rw-r--r-- | net/sched/cls_flow.c | 74 | ||||
-rw-r--r-- | net/sched/em_meta.c | 6 | ||||
-rw-r--r-- | net/sched/sch_api.c | 44 | ||||
-rw-r--r-- | net/sched/sch_atm.c | 5 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 12 | ||||
-rw-r--r-- | net/sched/sch_drr.c | 4 | ||||
-rw-r--r-- | net/sched/sch_dsmark.c | 6 | ||||
-rw-r--r-- | net/sched/sch_fifo.c | 3 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 24 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 8 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 12 | ||||
-rw-r--r-- | net/sched/sch_mq.c | 2 | ||||
-rw-r--r-- | net/sched/sch_multiq.c | 3 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 3 | ||||
-rw-r--r-- | net/sched/sch_prio.c | 2 | ||||
-rw-r--r-- | net/sched/sch_sfq.c | 33 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 8 |
22 files changed, 753 insertions, 118 deletions
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 2f691fb180d..a36270a994d 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -518,6 +518,16 @@ config NET_ACT_SKBEDIT To compile this code as a module, choose M here: the module will be called act_skbedit. +config NET_ACT_CSUM + tristate "Checksum Updating" + depends on NET_CLS_ACT && INET + ---help--- + Say Y here to update some common checksum after some direct + packet alterations. + + To compile this code as a module, choose M here: the + module will be called act_csum. + config NET_CLS_IND bool "Incoming device classification" depends on NET_CLS_U32 || NET_CLS_FW diff --git a/net/sched/Makefile b/net/sched/Makefile index f14e71bfa58..960f5dba630 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_NET_ACT_NAT) += act_nat.o obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o +obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c new file mode 100644 index 00000000000..67dc7ce9b63 --- /dev/null +++ b/net/sched/act_csum.c @@ -0,0 +1,595 @@ +/* + * Checksum updating actions + * + * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <linux/types.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/spinlock.h> + +#include <linux/netlink.h> +#include <net/netlink.h> +#include <linux/rtnetlink.h> + +#include <linux/skbuff.h> + +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/icmp.h> +#include <linux/icmpv6.h> +#include <linux/igmp.h> +#include <net/tcp.h> +#include <net/udp.h> +#include <net/ip6_checksum.h> + +#include <net/act_api.h> + +#include <linux/tc_act/tc_csum.h> +#include <net/tc_act/tc_csum.h> + +#define CSUM_TAB_MASK 15 +static struct tcf_common *tcf_csum_ht[CSUM_TAB_MASK + 1]; +static u32 csum_idx_gen; +static DEFINE_RWLOCK(csum_lock); + +static struct tcf_hashinfo csum_hash_info = { + .htab = tcf_csum_ht, + .hmask = CSUM_TAB_MASK, + .lock = &csum_lock, +}; + +static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = { + [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), }, +}; + +static int tcf_csum_init(struct nlattr *nla, struct nlattr *est, + struct tc_action *a, int ovr, int bind) +{ + struct nlattr *tb[TCA_CSUM_MAX + 1]; + struct tc_csum *parm; + struct tcf_common *pc; + struct tcf_csum *p; + int ret = 0, err; + + if (nla == NULL) + return -EINVAL; + + err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy); + if (err < 0) + return err; + + if (tb[TCA_CSUM_PARMS] == NULL) + return -EINVAL; + parm = nla_data(tb[TCA_CSUM_PARMS]); + + pc = tcf_hash_check(parm->index, a, bind, &csum_hash_info); + if (!pc) { + pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, + &csum_idx_gen, &csum_hash_info); + if (IS_ERR(pc)) + return PTR_ERR(pc); + p = to_tcf_csum(pc); + ret = ACT_P_CREATED; + } else { + p = to_tcf_csum(pc); + if (!ovr) { + tcf_hash_release(pc, bind, &csum_hash_info); + return -EEXIST; + } + } + + spin_lock_bh(&p->tcf_lock); + p->tcf_action = parm->action; + p->update_flags = parm->update_flags; + spin_unlock_bh(&p->tcf_lock); + + if (ret == ACT_P_CREATED) + tcf_hash_insert(pc, &csum_hash_info); + + return ret; +} + +static int tcf_csum_cleanup(struct tc_action *a, int bind) +{ + struct tcf_csum *p = a->priv; + return tcf_hash_release(&p->common, bind, &csum_hash_info); +} + +/** + * tcf_csum_skb_nextlayer - Get next layer pointer + * @skb: sk_buff to use + * @ihl: previous summed headers length + * @ipl: complete packet length + * @jhl: next header length + * + * Check the expected next layer availability in the specified sk_buff. + * Return the next layer pointer if pass, NULL otherwise. + */ +static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, + unsigned int ihl, unsigned int ipl, + unsigned int jhl) +{ + int ntkoff = skb_network_offset(skb); + int hl = ihl + jhl; + + if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) || + (skb_cloned(skb) && + !skb_clone_writable(skb, hl + ntkoff) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) + return NULL; + else + return (void *)(skb_network_header(skb) + ihl); +} + +static int tcf_csum_ipv4_icmp(struct sk_buff *skb, + unsigned int ihl, unsigned int ipl) +{ + struct icmphdr *icmph; + + icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph)); + if (icmph == NULL) + return 0; + + icmph->checksum = 0; + skb->csum = csum_partial(icmph, ipl - ihl, 0); + icmph->checksum = csum_fold(skb->csum); + + skb->ip_summed = CHECKSUM_NONE; + + return 1; +} + +static int tcf_csum_ipv4_igmp(struct sk_buff *skb, + unsigned int ihl, unsigned int ipl) +{ + struct igmphdr *igmph; + + igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph)); + if (igmph == NULL) + return 0; + + igmph->csum = 0; + skb->csum = csum_partial(igmph, ipl - ihl, 0); + igmph->csum = csum_fold(skb->csum); + + skb->ip_summed = CHECKSUM_NONE; + + return 1; +} + +static int tcf_csum_ipv6_icmp(struct sk_buff *skb, struct ipv6hdr *ip6h, + unsigned int ihl, unsigned int ipl) +{ + struct icmp6hdr *icmp6h; + + icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h)); + if (icmp6h == NULL) + return 0; + + icmp6h->icmp6_cksum = 0; + skb->csum = csum_partial(icmp6h, ipl - ihl, 0); + icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + ipl - ihl, IPPROTO_ICMPV6, + skb->csum); + + skb->ip_summed = CHECKSUM_NONE; + + return 1; +} + +static int tcf_csum_ipv4_tcp(struct sk_buff *skb, struct iphdr *iph, + unsigned int ihl, unsigned int ipl) +{ + struct tcphdr *tcph; + + tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); + if (tcph == NULL) + return 0; + + tcph->check = 0; + skb->csum = csum_partial(tcph, ipl - ihl, 0); + tcph->check = tcp_v4_check(ipl - ihl, + iph->saddr, iph->daddr, skb->csum); + + skb->ip_summed = CHECKSUM_NONE; + + return 1; +} + +static int tcf_csum_ipv6_tcp(struct sk_buff *skb, struct ipv6hdr *ip6h, + unsigned int ihl, unsigned int ipl) +{ + struct tcphdr *tcph; + + tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); + if (tcph == NULL) + return 0; + + tcph->check = 0; + skb->csum = csum_partial(tcph, ipl - ihl, 0); + tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + ipl - ihl, IPPROTO_TCP, + skb->csum); + + skb->ip_summed = CHECKSUM_NONE; + + return 1; +} + +static int tcf_csum_ipv4_udp(struct sk_buff *skb, struct iphdr *iph, + unsigned int ihl, unsigned int ipl, int udplite) +{ + struct udphdr *udph; + u16 ul; + + /* + * Support both UDP and UDPLITE checksum algorithms, Don't use + * udph->len to get the real length without any protocol check, + * UDPLITE uses udph->len for another thing, + * Use iph->tot_len, or just ipl. + */ + + udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); + if (udph == NULL) + return 0; + + ul = ntohs(udph->len); + + if (udplite || udph->check) { + + udph->check = 0; + + if (udplite) { + if (ul == 0) + skb->csum = csum_partial(udph, ipl - ihl, 0); + else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) + skb->csum = csum_partial(udph, ul, 0); + else + goto ignore_obscure_skb; + } else { + if (ul != ipl - ihl) + goto ignore_obscure_skb; + + skb->csum = csum_partial(udph, ul, 0); + } + + udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + ul, iph->protocol, + skb->csum); + + if (!udph->check) + udph->check = CSUM_MANGLED_0; + } + + skb->ip_summed = CHECKSUM_NONE; + +ignore_obscure_skb: + return 1; +} + +static int tcf_csum_ipv6_udp(struct sk_buff *skb, struct ipv6hdr *ip6h, + unsigned int ihl, unsigned int ipl, int udplite) +{ + struct udphdr *udph; + u16 ul; + + /* + * Support both UDP and UDPLITE checksum algorithms, Don't use + * udph->len to get the real length without any protocol check, + * UDPLITE uses udph->len for another thing, + * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl. + */ + + udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); + if (udph == NULL) + return 0; + + ul = ntohs(udph->len); + + udph->check = 0; + + if (udplite) { + if (ul == 0) + skb->csum = csum_partial(udph, ipl - ihl, 0); + + else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) + skb->csum = csum_partial(udph, ul, 0); + + else + goto ignore_obscure_skb; + } else { + if (ul != ipl - ihl) + goto ignore_obscure_skb; + + skb->csum = csum_partial(udph, ul, 0); + } + + udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul, + udplite ? IPPROTO_UDPLITE : IPPROTO_UDP, + skb->csum); + + if (!udph->check) + udph->check = CSUM_MANGLED_0; + + skb->ip_summed = CHECKSUM_NONE; + +ignore_obscure_skb: + return 1; +} + +static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) +{ + struct iphdr *iph; + int ntkoff; + + ntkoff = skb_network_offset(skb); + + if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff)) + goto fail; + + iph = ip_hdr(skb); + + switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { + case IPPROTO_ICMP: + if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) + if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4, + ntohs(iph->tot_len))) + goto fail; + break; + case IPPROTO_IGMP: + if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP) + if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4, + ntohs(iph->tot_len))) + goto fail; + break; + case IPPROTO_TCP: + if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) + if (!tcf_csum_ipv4_tcp(skb, iph, iph->ihl * 4, + ntohs(iph->tot_len))) + goto fail; + break; + case IPPROTO_UDP: + if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) + if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4, + ntohs(iph->tot_len), 0)) + goto fail; + break; + case IPPROTO_UDPLITE: + if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) + if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4, + ntohs(iph->tot_len), 1)) + goto fail; + break; + } + + if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { + if (skb_cloned(skb) && + !skb_clone_writable(skb, sizeof(*iph) + ntkoff) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + goto fail; + + ip_send_check(iph); + } + + return 1; + +fail: + return 0; +} + +static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, + unsigned int ixhl, unsigned int *pl) +{ + int off, len, optlen; + unsigned char *xh = (void *)ip6xh; + + off = sizeof(*ip6xh); + len = ixhl - off; + + while (len > 1) { + switch (xh[off]) { + case IPV6_TLV_PAD0: + optlen = 1; + break; + case IPV6_TLV_JUMBO: + optlen = xh[off + 1] + 2; + if (optlen != 6 || len < 6 || (off & 3) != 2) + /* wrong jumbo option length/alignment */ + return 0; + *pl = ntohl(*(__be32 *)(xh + off + 2)); + goto done; + default: + optlen = xh[off + 1] + 2; + if (optlen > len) + /* ignore obscure options */ + goto done; + break; + } + off += optlen; + len -= optlen; + } + +done: + return 1; +} + +static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags) +{ + struct ipv6hdr *ip6h; + struct ipv6_opt_hdr *ip6xh; + unsigned int hl, ixhl; + unsigned int pl; + int ntkoff; + u8 nexthdr; + + ntkoff = skb_network_offset(skb); + + hl = sizeof(*ip6h); + + if (!pskb_may_pull(skb, hl + ntkoff)) + goto fail; + + ip6h = ipv6_hdr(skb); + + pl = ntohs(ip6h->payload_len); + nexthdr = ip6h->nexthdr; + + do { + switch (nexthdr) { + case NEXTHDR_FRAGMENT: + goto ignore_skb; + case NEXTHDR_ROUTING: + case NEXTHDR_HOP: + case NEXTHDR_DEST: + if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff)) + goto fail; + ip6xh = (void *)(skb_network_header(skb) + hl); + ixhl = ipv6_optlen(ip6xh); + if (!pskb_may_pull(skb, hl + ixhl + ntkoff)) + goto fail; + if ((nexthdr == NEXTHDR_HOP) && + !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl))) + goto fail; + nexthdr = ip6xh->nexthdr; + hl += ixhl; + break; + case IPPROTO_ICMPV6: + if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) + if (!tcf_csum_ipv6_icmp(skb, ip6h, + hl, pl + sizeof(*ip6h))) + goto fail; + goto done; + case IPPROTO_TCP: + if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) + if (!tcf_csum_ipv6_tcp(skb, ip6h, + hl, pl + sizeof(*ip6h))) + goto fail; + goto done; + case IPPROTO_UDP: + if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) + if (!tcf_csum_ipv6_udp(skb, ip6h, hl, + pl + sizeof(*ip6h), 0)) + goto fail; + goto done; + case IPPROTO_UDPLITE: + if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) + if (!tcf_csum_ipv6_udp(skb, ip6h, hl, + pl + sizeof(*ip6h), 1)) + goto fail; + goto done; + default: + goto ignore_skb; + } + } while (pskb_may_pull(skb, hl + 1 + ntkoff)); + +done: +ignore_skb: + return 1; + +fail: + return 0; +} + +static int tcf_csum(struct sk_buff *skb, + struct tc_action *a, struct tcf_result *res) +{ + struct tcf_csum *p = a->priv; + int action; + u32 update_flags; + + spin_lock(&p->tcf_lock); + p->tcf_tm.lastuse = jiffies; + p->tcf_bstats.bytes += qdisc_pkt_len(skb); + p->tcf_bstats.packets++; + action = p->tcf_action; + update_flags = p->update_flags; + spin_unlock(&p->tcf_lock); + + if (unlikely(action == TC_ACT_SHOT)) + goto drop; + + switch (skb->protocol) { + case cpu_to_be16(ETH_P_IP): + if (!tcf_csum_ipv4(skb, update_flags)) + goto drop; + break; + case cpu_to_be16(ETH_P_IPV6): + if (!tcf_csum_ipv6(skb, update_flags)) + goto drop; + break; + } + + return action; + +drop: + spin_lock(&p->tcf_lock); + p->tcf_qstats.drops++; + spin_unlock(&p->tcf_lock); + return TC_ACT_SHOT; +} + +static int tcf_csum_dump(struct sk_buff *skb, + struct tc_action *a, int bind, int ref) +{ + unsigned char *b = skb_tail_pointer(skb); + struct tcf_csum *p = a->priv; + struct tc_csum opt = { + .update_flags = p->update_flags, + .index = p->tcf_index, + .action = p->tcf_action, + .refcnt = p->tcf_refcnt - ref, + .bindcnt = p->tcf_bindcnt - bind, + }; + struct tcf_t t; + + NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt); + t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); + t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); + t.expires = jiffies_to_clock_t(p->tcf_tm.expires); + NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t); + + return skb->len; + +nla_put_failure: + nlmsg_trim(skb, b); + return -1; +} + +static struct tc_action_ops act_csum_ops = { + .kind = "csum", + .hinfo = &csum_hash_info, + .type = TCA_ACT_CSUM, + .capab = TCA_CAP_NONE, + .owner = THIS_MODULE, + .act = tcf_csum, + .dump = tcf_csum_dump, + .cleanup = tcf_csum_cleanup, + .lookup = tcf_hash_search, + .init = tcf_csum_init, + .walk = tcf_generic_walker +}; + +MODULE_DESCRIPTION("Checksum updating actions"); +MODULE_LICENSE("GPL"); + +static int __init csum_init_module(void) +{ + return tcf_register_action(&act_csum_ops); +} + +static void __exit csum_cleanup_module(void) +{ + tcf_unregister_action(&act_csum_ops); +} + +module_init(csum_init_module); +module_exit(csum_cleanup_module); diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index c7e59e6ec34..8daef963225 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -39,7 +39,7 @@ static struct tcf_hashinfo ipt_hash_info = { .lock = &ipt_lock, }; -static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook) +static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int hook) { struct xt_tgchk_param par; struct xt_target *target; @@ -66,7 +66,7 @@ static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int return 0; } -static void ipt_destroy_target(struct ipt_entry_target *t) +static void ipt_destroy_target(struct xt_entry_target *t) { struct xt_tgdtor_param par = { .target = t->u.kernel.target, @@ -99,7 +99,7 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { [TCA_IPT_TABLE] = { .type = NLA_STRING, .len = IFNAMSIZ }, [TCA_IPT_HOOK] = { .type = NLA_U32 }, [TCA_IPT_INDEX] = { .type = NLA_U32 }, - [TCA_IPT_TARG] = { .len = sizeof(struct ipt_entry_target) }, + [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, }; static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est, @@ -108,7 +108,7 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est, struct nlattr *tb[TCA_IPT_MAX + 1]; struct tcf_ipt *ipt; struct tcf_common *pc; - struct ipt_entry_target *td, *t; + struct xt_entry_target *td, *t; char *tname; int ret = 0, err; u32 hook = 0; @@ -126,7 +126,7 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est, if (tb[TCA_IPT_TARG] == NULL) return -EINVAL; - td = (struct ipt_entry_target *)nla_data(tb[TCA_IPT_TARG]); + td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) return -EINVAL; @@ -230,7 +230,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, result = TC_ACT_SHOT; ipt->tcf_qstats.drops++; break; - case IPT_CONTINUE: + case XT_CONTINUE: result = TC_ACT_PIPE; break; default: @@ -249,7 +249,7 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int { unsigned char *b = skb_tail_pointer(skb); struct tcf_ipt *ipt = a->priv; - struct ipt_entry_target *t; + struct xt_entry_target *t; struct tcf_t tm; struct tc_cnt c; diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 78ef2c5e130..37dff78e9cb 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -123,7 +123,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, * calls by looking at the number of nested bh disable calls because * softirqs always disables bh. */ - if (softirq_count() != SOFTIRQ_OFFSET) { + if (in_serving_softirq()) { /* If there is an sk_classid we'll use that. */ if (!skb->sk) return -1; diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index e17096e3913..5b271a18bc3 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -111,44 +111,41 @@ static u32 flow_get_proto(struct sk_buff *skb) } } -static int has_ports(u8 protocol) -{ - switch (protocol) { - case IPPROTO_TCP: - case IPPROTO_UDP: - case IPPROTO_UDPLITE: - case IPPROTO_SCTP: - case IPPROTO_DCCP: - case IPPROTO_ESP: - return 1; - default: - return 0; - } -} - static u32 flow_get_proto_src(struct sk_buff *skb) { switch (skb->protocol) { case htons(ETH_P_IP): { struct iphdr *iph; + int poff; if (!pskb_network_may_pull(skb, sizeof(*iph))) break; iph = ip_hdr(skb); - if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && - has_ports(iph->protocol) && - pskb_network_may_pull(skb, iph->ihl * 4 + 2)) - return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4)); + if (iph->frag_off & htons(IP_MF|IP_OFFSET)) + break; + poff = proto_ports_offset(iph->protocol); + if (poff >= 0 && + pskb_network_may_pull(skb, iph->ihl * 4 + 2 + poff)) { + iph = ip_hdr(skb); + return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + + poff)); + } break; } case htons(ETH_P_IPV6): { struct ipv6hdr *iph; + int poff; - if (!pskb_network_may_pull(skb, sizeof(*iph) + 2)) + if (!pskb_network_may_pull(skb, sizeof(*iph))) break; iph = ipv6_hdr(skb); - if (has_ports(iph->nexthdr)) - return ntohs(*(__be16 *)&iph[1]); + poff = proto_ports_offset(iph->nexthdr); + if (poff >= 0 && + pskb_network_may_pull(skb, sizeof(*iph) + poff + 2)) { + iph = ipv6_hdr(skb); + return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) + + poff)); + } break; } } @@ -161,24 +158,36 @@ static u32 flow_get_proto_dst(struct sk_buff *skb) switch (skb->protocol) { case htons(ETH_P_IP): { struct iphdr *iph; + int poff; if (!pskb_network_may_pull(skb, sizeof(*iph))) break; iph = ip_hdr(skb); - if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && - has_ports(iph->protocol) && - pskb_network_may_pull(skb, iph->ihl * 4 + 4)) - return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2)); + if (iph->frag_off & htons(IP_MF|IP_OFFSET)) + break; + poff = proto_ports_offset(iph->protocol); + if (poff >= 0 && + pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) { + iph = ip_hdr(skb); + return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + + 2 + poff)); + } break; } case htons(ETH_P_IPV6): { struct ipv6hdr *iph; + int poff; - if (!pskb_network_may_pull(skb, sizeof(*iph) + 4)) + if (!pskb_network_may_pull(skb, sizeof(*iph))) break; iph = ipv6_hdr(skb); - if (has_ports(iph->nexthdr)) - return ntohs(*(__be16 *)((void *)&iph[1] + 2)); + poff = proto_ports_offset(iph->nexthdr); + if (poff >= 0 && + pskb_network_may_pull(skb, sizeof(*iph) + poff + 4)) { + iph = ipv6_hdr(skb); + return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) + + poff + 2)); + } break; } } @@ -297,6 +306,11 @@ static u32 flow_get_vlan_tag(const struct sk_buff *skb) return tag & VLAN_VID_MASK; } +static u32 flow_get_rxhash(struct sk_buff *skb) +{ + return skb_get_rxhash(skb); +} + static u32 flow_key_get(struct sk_buff *skb, int key) { switch (key) { @@ -334,6 +348,8 @@ static u32 flow_key_get(struct sk_buff *skb, int key) return flow_get_skgid(skb); case FLOW_KEY_VLAN_TAG: return flow_get_vlan_tag(skb); + case FLOW_KEY_RXHASH: + return flow_get_rxhash(skb); default: WARN_ON(1); return 0; diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 3bcac8aa333..34da5e29ea1 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -223,6 +223,11 @@ META_COLLECTOR(int_maclen) dst->value = skb->mac_len; } +META_COLLECTOR(int_rxhash) +{ + dst->value = skb_get_rxhash(skb); +} + /************************************************************************** * Netfilter **************************************************************************/ @@ -541,6 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off), [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend), [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag), + [META_ID(RXHASH)] = META_FUNC(int_rxhash), } }; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 408eea7086a..b22ca2d1ceb 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -240,7 +240,10 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) if (q) goto out; - q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); + if (dev_ingress_queue(dev)) + q = qdisc_match_from_root( + dev_ingress_queue(dev)->qdisc_sleeping, + handle); out: return q; } @@ -360,7 +363,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16); } - if (!s || tsize != s->tsize || (!tab && tsize > 0)) + if (tsize != s->tsize || (!tab && tsize > 0)) return ERR_PTR(-EINVAL); spin_lock(&qdisc_stab_lock); @@ -690,6 +693,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, (new && new->flags & TCQ_F_INGRESS)) { num_q = 1; ingress = 1; + if (!dev_ingress_queue(dev)) + return -ENOENT; } if (dev->flags & IFF_UP) @@ -701,7 +706,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, } for (i = 0; i < num_q; i++) { - struct netdev_queue *dev_queue = &dev->rx_queue; + struct netdev_queue *dev_queue = dev_ingress_queue(dev); if (!ingress) dev_queue = netdev_get_tx_queue(dev, i); @@ -979,7 +984,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) return -ENOENT; q = qdisc_leaf(p, clid); } else { /* ingress */ - q = dev->rx_queue.qdisc_sleeping; + if (dev_ingress_queue(dev)) + q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { q = dev->qdisc; @@ -1043,8 +1049,9 @@ replay: if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) return -ENOENT; q = qdisc_leaf(p, clid); - } else { /*ingress */ - q = dev->rx_queue.qdisc_sleeping; + } else { /* ingress */ + if (dev_ingress_queue_create(dev)) + q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { q = dev->qdisc; @@ -1123,11 +1130,14 @@ replay: create_n_graft: if (!(n->nlmsg_flags&NLM_F_CREATE)) return -ENOENT; - if (clid == TC_H_INGRESS) - q = qdisc_create(dev, &dev->rx_queue, p, - tcm->tcm_parent, tcm->tcm_parent, - tca, &err); - else { + if (clid == TC_H_INGRESS) { + if (dev_ingress_queue(dev)) + q = qdisc_create(dev, dev_ingress_queue(dev), p, + tcm->tcm_parent, tcm->tcm_parent, + tca, &err); + else + err = -ENOENT; + } else { struct netdev_queue *dev_queue; if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue) @@ -1304,8 +1314,10 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0) goto done; - dev_queue = &dev->rx_queue; - if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0) + dev_queue = dev_ingress_queue(dev); + if (dev_queue && + tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, + &q_idx, s_q_idx) < 0) goto done; cont: @@ -1595,8 +1607,10 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0) goto done; - dev_queue = &dev->rx_queue; - if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0) + dev_queue = dev_ingress_queue(dev); + if (dev_queue && + tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, + &t, s_t) < 0) goto done; done: diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 6318e1136b8..282540778aa 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -275,8 +275,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, goto err_out; } flow->filter_list = NULL; - flow->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, - &pfifo_qdisc_ops, classid); + flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); if (!flow->q) flow->q = &noop_qdisc; pr_debug("atm_tc_change: qdisc %p\n", flow->q); @@ -543,7 +542,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) INIT_LIST_HEAD(&p->flows); INIT_LIST_HEAD(&p->link.list); list_add(&p->link.list, &p->flows); - p->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, + p->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle); if (!p->link.q) p->link.q = &noop_qdisc; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 28c01ef5abc..eb763159086 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1379,9 +1379,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) q->link.sibling = &q->link; q->link.common.classid = sch->handle; q->link.qdisc = sch; - if (!(q->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, - &pfifo_qdisc_ops, - sch->handle))) + q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, + sch->handle); + if (!q->link.q) q->link.q = &noop_qdisc; q->link.priority = TC_CBQ_MAXPRIO-1; @@ -1623,7 +1623,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct cbq_class *cl = (struct cbq_class*)arg; if (new == NULL) { - new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, + new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, cl->common.classid); if (new == NULL) return -ENOBUFS; @@ -1874,8 +1874,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t cl->R_tab = rtab; rtab = NULL; cl->refcnt = 1; - if (!(cl->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, - &pfifo_qdisc_ops, classid))) + cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); + if (!cl->q) cl->q = &noop_qdisc; cl->common.classid = classid; cl->tparent = parent; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index b74046a9539..aa8b5313f8c 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -110,7 +110,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, cl->refcnt = 1; cl->common.classid = classid; cl->quantum = quantum; - cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, + cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); if (cl->qdisc == NULL) cl->qdisc = &noop_qdisc; @@ -218,7 +218,7 @@ static int drr_graft_class(struct Qdisc *sch, unsigned long arg, struct drr_class *cl = (struct drr_class *)arg; if (new == NULL) { - new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, + new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, cl->common.classid); if (new == NULL) new = &noop_qdisc; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 63d41f86679..1d295d62bb5 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -61,8 +61,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg, sch, p, new, old); if (new == NULL) { - new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, - &pfifo_qdisc_ops, + new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle); if (new == NULL) new = &noop_qdisc; @@ -384,8 +383,7 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt) p->default_index = default_index; p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]); - p->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, - &pfifo_qdisc_ops, sch->handle); + p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle); if (p->q == NULL) p->q = &noop_qdisc; diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 5948bafa8ce..4dfecb0cba3 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -172,8 +172,7 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, struct Qdisc *q; int err = -ENOMEM; - q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, - ops, TC_H_MAKE(sch->handle, 1)); + q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1)); if (q) { err = fifo_set_limit(q, limit); if (err < 0) { diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 2aeb3a4386a..5dbb3cd96e5 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -383,6 +383,7 @@ struct Qdisc noop_qdisc = { .list = LIST_HEAD_INIT(noop_qdisc.list), .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), .dev_queue = &noop_netdev_queue, + .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), }; EXPORT_SYMBOL(noop_qdisc); @@ -409,6 +410,7 @@ static struct Qdisc noqueue_qdisc = { .list = LIST_HEAD_INIT(noqueue_qdisc.list), .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), .dev_queue = &noqueue_netdev_queue, + .busylock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock), }; @@ -574,10 +576,8 @@ errout: return ERR_PTR(err); } -struct Qdisc * qdisc_create_dflt(struct net_device *dev, - struct netdev_queue *dev_queue, - struct Qdisc_ops *ops, - unsigned int parentid) +struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, + struct Qdisc_ops *ops, unsigned int parentid) { struct Qdisc *sch; @@ -682,7 +682,7 @@ static void attach_one_default_qdisc(struct net_device *dev, struct Qdisc *qdisc; if (dev->tx_queue_len) { - qdisc = qdisc_create_dflt(dev, dev_queue, + qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops, TC_H_ROOT); if (!qdisc) { printk(KERN_INFO "%s: activation failed\n", dev->name); @@ -709,7 +709,7 @@ static void attach_default_qdiscs(struct net_device *dev) dev->qdisc = txq->qdisc_sleeping; atomic_inc(&dev->qdisc->refcnt); } else { - qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT); + qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); if (qdisc) { qdisc->ops->attach(qdisc); dev->qdisc = qdisc; @@ -753,7 +753,8 @@ void dev_activate(struct net_device *dev) need_watchdog = 0; netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); - transition_one_qdisc(dev, &dev->rx_queue, NULL); + if (dev_ingress_queue(dev)) + transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); if (need_watchdog) { dev->trans_start = jiffies; @@ -812,7 +813,8 @@ static bool some_qdisc_is_busy(struct net_device *dev) void dev_deactivate(struct net_device *dev) { netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); - dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); + if (dev_ingress_queue(dev)) + dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc); dev_watchdog_down(dev); @@ -838,7 +840,8 @@ void dev_init_scheduler(struct net_device *dev) { dev->qdisc = &noop_qdisc; netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); - dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); + if (dev_ingress_queue(dev)) + dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); } @@ -861,7 +864,8 @@ static void shutdown_scheduler_queue(struct net_device *dev, void dev_shutdown(struct net_device *dev) { netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); - shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); + if (dev_ingress_queue(dev)) + shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); qdisc_destroy(dev->qdisc); dev->qdisc = &noop_qdisc; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 47496098d35..069c62b7bb3 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1088,7 +1088,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, cl->refcnt = 1; cl->sched = q; cl->cl_parent = parent; - cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, + cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); if (cl->qdisc == NULL) cl->qdisc = &noop_qdisc; @@ -1209,8 +1209,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, if (cl->level > 0) return -EINVAL; if (new == NULL) { - new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, - &pfifo_qdisc_ops, + new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, cl->cl_common.classid); if (new == NULL) new = &noop_qdisc; @@ -1452,8 +1451,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) q->root.cl_common.classid = sch->handle; q->root.refcnt = 1; q->root.sched = q; - q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, - &pfifo_qdisc_ops, + q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle); if (q->root.qdisc == NULL) q->root.qdisc = &noop_qdisc; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 4be8d04b262..01b519d6c52 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1121,8 +1121,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, if (cl->level) return -EINVAL; if (new == NULL && - (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, - &pfifo_qdisc_ops, + (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, cl->common.classid)) == NULL) return -ENOBUFS; @@ -1247,8 +1246,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) return -EBUSY; if (!cl->level && htb_parent_last_child(cl)) { - new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, - &pfifo_qdisc_ops, + new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, cl->parent->common.classid); last_child = 1; } @@ -1302,14 +1300,14 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, struct htb_class *cl = (struct htb_class *)*arg, *parent; struct nlattr *opt = tca[TCA_OPTIONS]; struct qdisc_rate_table *rtab = NULL, *ctab = NULL; - struct nlattr *tb[TCA_HTB_RTAB + 1]; + struct nlattr *tb[__TCA_HTB_MAX]; struct tc_htb_opt *hopt; /* extract all subattrs from opt attr */ if (!opt) goto failure; - err = nla_parse_nested(tb, TCA_HTB_RTAB, opt, htb_policy); + err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy); if (err < 0) goto failure; @@ -1377,7 +1375,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) so that can't be used inside of sch_tree_lock -- thanks to Karlis Peisenieks */ - new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, + new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); sch_tree_lock(sch); if (parent && !parent->level) { diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index fe91e50f9d9..ecc302f4d2a 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -56,7 +56,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { dev_queue = netdev_get_tx_queue(dev, ntx); - qdisc = qdisc_create_dflt(dev, dev_queue, &pfifo_fast_ops, + qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops, TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(ntx + 1))); if (qdisc == NULL) diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 6ae251279fc..32690deab5d 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -227,8 +227,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) for (i = 0; i < q->bands; i++) { if (q->queues[i] == &noop_qdisc) { struct Qdisc *child, *old; - child = qdisc_create_dflt(qdisc_dev(sch), - sch->dev_queue, + child = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, TC_H_MAKE(sch->handle, i + 1)); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 4714ff162bb..e5593c083a7 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -538,8 +538,7 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt) qdisc_watchdog_init(&q->watchdog, sch); - q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, - &tfifo_qdisc_ops, + q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops, TC_H_MAKE(sch->handle, 1)); if (!q->qdisc) { pr_debug("netem: qdisc create failed\n"); diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 0748fb1e3a4..b1c95bce33c 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -200,7 +200,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) for (i=0; i<q->bands; i++) { if (q->queues[i] == &noop_qdisc) { struct Qdisc *child, *old; - child = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, + child = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, TC_H_MAKE(sch->handle, i + 1)); if (child) { diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 201cbac2b32..3cf478d012d 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -123,40 +123,39 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) case htons(ETH_P_IP): { const struct iphdr *iph; + int poff; if (!pskb_network_may_pull(skb, sizeof(*iph))) goto err; iph = ip_hdr(skb); h = (__force u32)iph->daddr; h2 = (__force u32)iph->saddr ^ iph->protocol; - if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && - (iph->protocol == IPPROTO_TCP || - iph->protocol == IPPROTO_UDP || - iph->protocol == IPPROTO_UDPLITE || - iph->protocol == IPPROTO_SCTP || - iph->protocol == IPPROTO_DCCP || - iph->protocol == IPPROTO_ESP) && - pskb_network_may_pull(skb, iph->ihl * 4 + 4)) - h2 ^= *(((u32*)iph) + iph->ihl); + if (iph->frag_off & htons(IP_MF|IP_OFFSET)) + break; + poff = proto_ports_offset(iph->protocol); + if (poff >= 0 && + pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) { + iph = ip_hdr(skb); + h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff); + } break; } case htons(ETH_P_IPV6): { struct ipv6hdr *iph; + int poff; if (!pskb_network_may_pull(skb, sizeof(*iph))) goto err; iph = ipv6_hdr(skb); h = (__force u32)iph->daddr.s6_addr32[3]; h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr; - if ((iph->nexthdr == IPPROTO_TCP || - iph->nexthdr == IPPROTO_UDP || - iph->nexthdr == IPPROTO_UDPLITE || - iph->nexthdr == IPPROTO_SCTP || - iph->nexthdr == IPPROTO_DCCP || - iph->nexthdr == IPPROTO_ESP) && - pskb_network_may_pull(skb, sizeof(*iph) + 4)) - h2 ^= *(u32*)&iph[1]; + poff = proto_ports_offset(iph->nexthdr); + if (poff >= 0 && + pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) { + iph = ipv6_hdr(skb); + h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff); + } break; } default: diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index feaabc103ce..401af959670 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -241,11 +241,11 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device * } if (neigh_event_send(n, skb_res) == 0) { int err; + char haddr[MAX_ADDR_LEN]; - read_lock(&n->lock); - err = dev_hard_header(skb, dev, ntohs(skb->protocol), - n->ha, NULL, skb->len); - read_unlock(&n->lock); + neigh_ha_snapshot(haddr, n, dev); + err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr, + NULL, skb->len); if (err < 0) { neigh_release(n); |