diff options
author | James Morris <jmorris@namei.org> | 2008-08-28 10:47:34 +1000 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2008-08-28 10:47:34 +1000 |
commit | 86d688984deefa3ae5a802880c11f2b408b5d6cf (patch) | |
tree | 7ea5e8189b0a774626d3ed7c3c87df2495a4c4a0 /net/core | |
parent | 93c06cbbf9fea5d5be1778febb7fa9ab1a74e5f5 (diff) | |
parent | 4c246edd2550304df5b766cc841584b2bb058843 (diff) |
Merge branch 'master' into next
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/datagram.c | 87 | ||||
-rw-r--r-- | net/core/dev.c | 116 | ||||
-rw-r--r-- | net/core/neighbour.c | 13 | ||||
-rw-r--r-- | net/core/pktgen.c | 76 | ||||
-rw-r--r-- | net/core/skbuff.c | 12 |
5 files changed, 231 insertions, 73 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index dd61dcad601..52f577a0f54 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -339,6 +339,93 @@ fault: return -EFAULT; } +/** + * skb_copy_datagram_from_iovec - Copy a datagram from an iovec. + * @skb: buffer to copy + * @offset: offset in the buffer to start copying to + * @from: io vector to copy to + * @len: amount of data to copy to buffer from iovec + * + * Returns 0 or -EFAULT. + * Note: the iovec is modified during the copy. + */ +int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, + struct iovec *from, int len) +{ + int start = skb_headlen(skb); + int i, copy = start - offset; + + /* Copy header. */ + if (copy > 0) { + if (copy > len) + copy = len; + if (memcpy_fromiovec(skb->data + offset, from, copy)) + goto fault; + if ((len -= copy) == 0) + return 0; + offset += copy; + } + + /* Copy paged appendix. Hmm... why does this look so complicated? */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + int end; + + WARN_ON(start > offset + len); + + end = start + skb_shinfo(skb)->frags[i].size; + if ((copy = end - offset) > 0) { + int err; + u8 *vaddr; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + struct page *page = frag->page; + + if (copy > len) + copy = len; + vaddr = kmap(page); + err = memcpy_fromiovec(vaddr + frag->page_offset + + offset - start, from, copy); + kunmap(page); + if (err) + goto fault; + + if (!(len -= copy)) + return 0; + offset += copy; + } + start = end; + } + + if (skb_shinfo(skb)->frag_list) { + struct sk_buff *list = skb_shinfo(skb)->frag_list; + + for (; list; list = list->next) { + int end; + + WARN_ON(start > offset + len); + + end = start + list->len; + if ((copy = end - offset) > 0) { + if (copy > len) + copy = len; + if (skb_copy_datagram_from_iovec(list, + offset - start, + from, copy)) + goto fault; + if ((len -= copy) == 0) + return 0; + offset += copy; + } + start = end; + } + } + if (!len) + return 0; + +fault: + return -EFAULT; +} +EXPORT_SYMBOL(skb_copy_datagram_from_iovec); + static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 __user *to, int len, __wsum *csump) diff --git a/net/core/dev.c b/net/core/dev.c index 69320a56a08..60c51f76588 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1339,19 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) } -void __netif_schedule(struct Qdisc *q) +static inline void __netif_reschedule(struct Qdisc *q) { - if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { - struct softnet_data *sd; - unsigned long flags; + struct softnet_data *sd; + unsigned long flags; - local_irq_save(flags); - sd = &__get_cpu_var(softnet_data); - q->next_sched = sd->output_queue; - sd->output_queue = q; - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_restore(flags); - } + local_irq_save(flags); + sd = &__get_cpu_var(softnet_data); + q->next_sched = sd->output_queue; + sd->output_queue = q; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); +} + +void __netif_schedule(struct Qdisc *q) +{ + if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) + __netif_reschedule(q); } EXPORT_SYMBOL(__netif_schedule); @@ -1796,16 +1800,19 @@ gso: skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); #endif if (q->enqueue) { - spinlock_t *root_lock = qdisc_root_lock(q); + spinlock_t *root_lock = qdisc_lock(q); spin_lock(root_lock); - rc = qdisc_enqueue_root(skb, q); - qdisc_run(q); - + if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { + kfree_skb(skb); + rc = NET_XMIT_DROP; + } else { + rc = qdisc_enqueue_root(skb, q); + qdisc_run(q); + } spin_unlock(root_lock); - rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; goto out; } @@ -1909,7 +1916,6 @@ int netif_rx(struct sk_buff *skb) if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { if (queue->input_pkt_queue.qlen) { enqueue: - dev_hold(skb->dev); __skb_queue_tail(&queue->input_pkt_queue, skb); local_irq_restore(flags); return NET_RX_SUCCESS; @@ -1941,22 +1947,6 @@ int netif_rx_ni(struct sk_buff *skb) EXPORT_SYMBOL(netif_rx_ni); -static inline struct net_device *skb_bond(struct sk_buff *skb) -{ - struct net_device *dev = skb->dev; - - if (dev->master) { - if (skb_bond_should_drop(skb)) { - kfree_skb(skb); - return NULL; - } - skb->dev = dev->master; - } - - return dev; -} - - static void net_tx_action(struct softirq_action *h) { struct softnet_data *sd = &__get_cpu_var(softnet_data); @@ -1992,15 +1982,17 @@ static void net_tx_action(struct softirq_action *h) head = head->next_sched; - smp_mb__before_clear_bit(); - clear_bit(__QDISC_STATE_SCHED, &q->state); - - root_lock = qdisc_root_lock(q); + root_lock = qdisc_lock(q); if (spin_trylock(root_lock)) { + smp_mb__before_clear_bit(); + clear_bit(__QDISC_STATE_SCHED, + &q->state); qdisc_run(q); spin_unlock(root_lock); } else { - __netif_schedule(q); + if (!test_bit(__QDISC_STATE_DEACTIVATED, + &q->state)) + __netif_reschedule(q); } } } @@ -2102,7 +2094,8 @@ static int ing_filter(struct sk_buff *skb) q = rxq->qdisc; if (q != &noop_qdisc) { spin_lock(qdisc_lock(q)); - result = qdisc_enqueue_root(skb, q); + if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) + result = qdisc_enqueue_root(skb, q); spin_unlock(qdisc_lock(q)); } @@ -2183,6 +2176,7 @@ int netif_receive_skb(struct sk_buff *skb) { struct packet_type *ptype, *pt_prev; struct net_device *orig_dev; + struct net_device *null_or_orig; int ret = NET_RX_DROP; __be16 type; @@ -2196,10 +2190,14 @@ int netif_receive_skb(struct sk_buff *skb) if (!skb->iif) skb->iif = skb->dev->ifindex; - orig_dev = skb_bond(skb); - - if (!orig_dev) - return NET_RX_DROP; + null_or_orig = NULL; + orig_dev = skb->dev; + if (orig_dev->master) { + if (skb_bond_should_drop(skb)) + null_or_orig = orig_dev; /* deliver only exact match */ + else + skb->dev = orig_dev->master; + } __get_cpu_var(netdev_rx_stat).total++; @@ -2223,7 +2221,8 @@ int netif_receive_skb(struct sk_buff *skb) #endif list_for_each_entry_rcu(ptype, &ptype_all, list) { - if (!ptype->dev || ptype->dev == skb->dev) { + if (ptype->dev == null_or_orig || ptype->dev == skb->dev || + ptype->dev == orig_dev) { if (pt_prev) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; @@ -2248,7 +2247,8 @@ ncls: list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { if (ptype->type == type && - (!ptype->dev || ptype->dev == skb->dev)) { + (ptype->dev == null_or_orig || ptype->dev == skb->dev || + ptype->dev == orig_dev)) { if (pt_prev) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; @@ -2270,6 +2270,20 @@ out: return ret; } +/* Network device is going away, flush any packets still pending */ +static void flush_backlog(void *arg) +{ + struct net_device *dev = arg; + struct softnet_data *queue = &__get_cpu_var(softnet_data); + struct sk_buff *skb, *tmp; + + skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) + if (skb->dev == dev) { + __skb_unlink(skb, &queue->input_pkt_queue); + kfree_skb(skb); + } +} + static int process_backlog(struct napi_struct *napi, int quota) { int work = 0; @@ -2279,7 +2293,6 @@ static int process_backlog(struct napi_struct *napi, int quota) napi->weight = weight_p; do { struct sk_buff *skb; - struct net_device *dev; local_irq_disable(); skb = __skb_dequeue(&queue->input_pkt_queue); @@ -2288,14 +2301,9 @@ static int process_backlog(struct napi_struct *napi, int quota) local_irq_enable(); break; } - local_irq_enable(); - dev = skb->dev; - netif_receive_skb(skb); - - dev_put(dev); } while (++work < quota && jiffies == start_time); return work; @@ -3988,6 +3996,10 @@ int register_netdevice(struct net_device *dev) } } + /* Enable software GSO if SG is supported. */ + if (dev->features & NETIF_F_SG) + dev->features |= NETIF_F_GSO; + netdev_initialize_kobject(dev); ret = netdev_register_kobject(dev); if (ret) @@ -4165,6 +4177,8 @@ void netdev_run_todo(void) dev->reg_state = NETREG_UNREGISTERED; + on_each_cpu(flush_backlog, dev, 1); + netdev_wait_allrefs(dev); /* paranoia */ diff --git a/net/core/neighbour.c b/net/core/neighbour.c index f62c8af85d3..9d92e41826e 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2281,6 +2281,7 @@ static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos) struct neighbour *n = neigh_get_first(seq); if (n) { + --(*pos); while (*pos) { n = neigh_get_next(seq, n, pos); if (!n) @@ -2341,6 +2342,7 @@ static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos) struct pneigh_entry *pn = pneigh_get_first(seq); if (pn) { + --(*pos); while (*pos) { pn = pneigh_get_next(seq, pn, pos); if (!pn) @@ -2354,10 +2356,11 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) { struct neigh_seq_state *state = seq->private; void *rc; + loff_t idxpos = *pos; - rc = neigh_get_idx(seq, pos); + rc = neigh_get_idx(seq, &idxpos); if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY)) - rc = pneigh_get_idx(seq, pos); + rc = pneigh_get_idx(seq, &idxpos); return rc; } @@ -2366,7 +2369,6 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl __acquires(tbl->lock) { struct neigh_seq_state *state = seq->private; - loff_t pos_minus_one; state->tbl = tbl; state->bucket = 0; @@ -2374,8 +2376,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl read_lock_bh(&tbl->lock); - pos_minus_one = *pos - 1; - return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN; + return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; } EXPORT_SYMBOL(neigh_seq_start); @@ -2385,7 +2386,7 @@ void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos) void *rc; if (v == SEQ_START_TOKEN) { - rc = neigh_get_idx(seq, pos); + rc = neigh_get_first(seq); goto out; } diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 3284605f2ec..a756847e381 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -168,7 +168,7 @@ #include <asm/div64.h> /* do_div */ #include <asm/timex.h> -#define VERSION "pktgen v2.69: Packet Generator for packet performance testing.\n" +#define VERSION "pktgen v2.70: Packet Generator for packet performance testing.\n" #define IP_NAME_SZ 32 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ @@ -189,6 +189,7 @@ #define F_FLOW_SEQ (1<<11) /* Sequential flows */ #define F_IPSEC_ON (1<<12) /* ipsec on for flows */ #define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ +#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ /* Thread control flag bits */ #define T_TERMINATE (1<<0) @@ -621,6 +622,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) if (pkt_dev->flags & F_QUEUE_MAP_RND) seq_printf(seq, "QUEUE_MAP_RND "); + if (pkt_dev->flags & F_QUEUE_MAP_CPU) + seq_printf(seq, "QUEUE_MAP_CPU "); + if (pkt_dev->cflows) { if (pkt_dev->flags & F_FLOW_SEQ) seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/ @@ -1134,6 +1138,12 @@ static ssize_t pktgen_if_write(struct file *file, else if (strcmp(f, "!QUEUE_MAP_RND") == 0) pkt_dev->flags &= ~F_QUEUE_MAP_RND; + + else if (strcmp(f, "QUEUE_MAP_CPU") == 0) + pkt_dev->flags |= F_QUEUE_MAP_CPU; + + else if (strcmp(f, "!QUEUE_MAP_CPU") == 0) + pkt_dev->flags &= ~F_QUEUE_MAP_CPU; #ifdef CONFIG_XFRM else if (strcmp(f, "IPSEC") == 0) pkt_dev->flags |= F_IPSEC_ON; @@ -1895,6 +1905,23 @@ static int pktgen_device_event(struct notifier_block *unused, return NOTIFY_DONE; } +static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, const char *ifname) +{ + char b[IFNAMSIZ+5]; + int i = 0; + + for(i=0; ifname[i] != '@'; i++) { + if(i == IFNAMSIZ) + break; + + b[i] = ifname[i]; + } + b[i] = 0; + + return dev_get_by_name(&init_net, b); +} + + /* Associate pktgen_dev with a device. */ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) @@ -1908,7 +1935,7 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) pkt_dev->odev = NULL; } - odev = dev_get_by_name(&init_net, ifname); + odev = pktgen_dev_get_by_name(pkt_dev, ifname); if (!odev) { printk(KERN_ERR "pktgen: no such netdevice: \"%s\"\n", ifname); return -ENODEV; @@ -1934,6 +1961,8 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) */ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) { + int ntxq; + if (!pkt_dev->odev) { printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in " "setup_inject.\n"); @@ -1942,6 +1971,33 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) return; } + /* make sure that we don't pick a non-existing transmit queue */ + ntxq = pkt_dev->odev->real_num_tx_queues; + if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) { + printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU " + "disabled because CPU count (%d) exceeds number ", + num_online_cpus()); + printk(KERN_WARNING "pktgen: WARNING: of tx queues " + "(%d) on %s \n", ntxq, pkt_dev->odev->name); + pkt_dev->flags &= ~F_QUEUE_MAP_CPU; + } + if (ntxq <= pkt_dev->queue_map_min) { + printk(KERN_WARNING "pktgen: WARNING: Requested " + "queue_map_min (%d) exceeds number of tx\n", + pkt_dev->queue_map_min); + printk(KERN_WARNING "pktgen: WARNING: queues (%d) on " + "%s, resetting\n", ntxq, pkt_dev->odev->name); + pkt_dev->queue_map_min = ntxq - 1; + } + if (ntxq <= pkt_dev->queue_map_max) { + printk(KERN_WARNING "pktgen: WARNING: Requested " + "queue_map_max (%d) exceeds number of tx\n", + pkt_dev->queue_map_max); + printk(KERN_WARNING "pktgen: WARNING: queues (%d) on " + "%s, resetting\n", ntxq, pkt_dev->odev->name); + pkt_dev->queue_map_max = ntxq - 1; + } + /* Default to the interface's mac if not explicitly set. */ if (is_zero_ether_addr(pkt_dev->src_mac)) @@ -2085,15 +2141,19 @@ static inline int f_pick(struct pktgen_dev *pkt_dev) if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { /* reset time */ pkt_dev->flows[flow].count = 0; + pkt_dev->flows[flow].flags = 0; pkt_dev->curfl += 1; if (pkt_dev->curfl >= pkt_dev->cflows) pkt_dev->curfl = 0; /*reset */ } } else { flow = random32() % pkt_dev->cflows; + pkt_dev->curfl = flow; - if (pkt_dev->flows[flow].count > pkt_dev->lflow) + if (pkt_dev->flows[flow].count > pkt_dev->lflow) { pkt_dev->flows[flow].count = 0; + pkt_dev->flows[flow].flags = 0; + } } return pkt_dev->curfl; @@ -2125,7 +2185,11 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) #endif static void set_cur_queue_map(struct pktgen_dev *pkt_dev) { - if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { + + if (pkt_dev->flags & F_QUEUE_MAP_CPU) + pkt_dev->cur_queue_map = smp_processor_id(); + + else if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { __u16 t; if (pkt_dev->flags & F_QUEUE_MAP_RND) { t = random32() % @@ -2162,7 +2226,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) mc = random32() % pkt_dev->src_mac_count; else { mc = pkt_dev->cur_src_mac_offset++; - if (pkt_dev->cur_src_mac_offset > + if (pkt_dev->cur_src_mac_offset >= pkt_dev->src_mac_count) pkt_dev->cur_src_mac_offset = 0; } @@ -2189,7 +2253,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) else { mc = pkt_dev->cur_dst_mac_offset++; - if (pkt_dev->cur_dst_mac_offset > + if (pkt_dev->cur_dst_mac_offset >= pkt_dev->dst_mac_count) { pkt_dev->cur_dst_mac_offset = 0; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 84640172d65..ca1ccdf1ef7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2256,14 +2256,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) segs = nskb; tail = nskb; - nskb->dev = skb->dev; - skb_copy_queue_mapping(nskb, skb); - nskb->priority = skb->priority; - nskb->protocol = skb->protocol; - nskb->vlan_tci = skb->vlan_tci; - nskb->dst = dst_clone(skb->dst); - memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); - nskb->pkt_type = skb->pkt_type; + __copy_skb_header(nskb, skb); nskb->mac_len = skb->mac_len; skb_reserve(nskb, headroom); @@ -2274,6 +2267,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) skb_copy_from_linear_data(skb, skb_put(nskb, doffset), doffset); if (!sg) { + nskb->ip_summed = CHECKSUM_NONE; nskb->csum = skb_copy_and_csum_bits(skb, offset, skb_put(nskb, len), len, 0); @@ -2283,8 +2277,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) frag = skb_shinfo(nskb)->frags; k = 0; - nskb->ip_summed = CHECKSUM_PARTIAL; - nskb->csum = skb->csum; skb_copy_from_linear_data_offset(skb, offset, skb_put(nskb, hsize), hsize); |