From 05423b241311c9380b7280179295bac7794281b6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 26 Oct 2009 18:40:35 -0700 Subject: vlan: allow null VLAN ID to be used We currently use a 16 bit field (vlan_tci) to store VLAN ID/PRIO on a skb. Null value is used as a special value, meaning vlan tagging not enabled. This forbids use of null vlan ID. As pointed by David, some drivers use the 3 high order bits (PRIO) As VLAN ID is 12 bits, we can use the remaining bit (CFI) as a flag, and allow null VLAN ID. In case future code really wants to use VLAN_CFI_MASK, we'll have to use a bit outside of vlan_tci. #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ #define VLAN_PRIO_SHIFT 13 #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ #define VLAN_TAG_PRESENT VLAN_CFI_MASK #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ Reported-by: Gertjan Hofman Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/8021q/vlan_dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/8021q/vlan_dev.c') diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 4198ec5c8ab..e3701977f58 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -393,7 +393,7 @@ int vlan_dev_set_egress_priority(const struct net_device *dev, struct vlan_dev_info *vlan = vlan_dev_info(dev); struct vlan_priority_tci_mapping *mp = NULL; struct vlan_priority_tci_mapping *np; - u32 vlan_qos = (vlan_prio << 13) & 0xE000; + u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; /* See if a priority mapping exists.. */ mp = vlan->egress_priority_map[skb_prio & 0xF]; -- cgit v1.2.3-70-g09d2 From d6534799410d2b53e5e80b8a90d6a8bab3de30ed Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Wed, 28 Oct 2009 18:25:16 +0000 Subject: vlan: Add support to netdev_ops.ndo_fcoe_get_wwn for VLAN device Implements the netdev_ops.ndo_fcoe_get_wwn for VLAN device. Signed-off-by: Yi Zou Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- net/8021q/vlan_dev.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'net/8021q/vlan_dev.c') diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index e3701977f58..790fd55ec31 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -626,6 +626,17 @@ static int vlan_dev_fcoe_disable(struct net_device *dev) rc = ops->ndo_fcoe_disable(real_dev); return rc; } + +static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) +{ + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; + const struct net_device_ops *ops = real_dev->netdev_ops; + int rc = -EINVAL; + + if (ops->ndo_fcoe_get_wwn) + rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); + return rc; +} #endif static void vlan_dev_change_rx_flags(struct net_device *dev, int change) @@ -791,6 +802,7 @@ static const struct net_device_ops vlan_netdev_ops = { .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, .ndo_fcoe_enable = vlan_dev_fcoe_enable, .ndo_fcoe_disable = vlan_dev_fcoe_disable, + .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, #endif }; @@ -813,6 +825,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = { .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, .ndo_fcoe_enable = vlan_dev_fcoe_enable, .ndo_fcoe_disable = vlan_dev_fcoe_disable, + .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, #endif }; -- cgit v1.2.3-70-g09d2 From cbbef5e183079455763fc470ccf69008f92ab4b6 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 10 Nov 2009 06:14:24 +0000 Subject: vlan/macvlan: propagate transmission state to upper layers Both vlan and macvlan devices usually don't use a qdisc and immediately queue packets to the underlying device. Propagate transmission state of the underlying device to the upper layers so they can react on congestion and/or inform the sending process. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 2 +- net/8021q/vlan_dev.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'net/8021q/vlan_dev.c') diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d7dba3f6f76..271aa7e1d03 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -202,7 +202,7 @@ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, } else txq->tx_dropped++; - return NETDEV_TX_OK; + return ret; } static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 790fd55ec31..91596594213 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -332,7 +332,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, } else txq->tx_dropped++; - return NETDEV_TX_OK; + return ret; } static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, @@ -358,7 +358,7 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, } else txq->tx_dropped++; - return NETDEV_TX_OK; + return ret; } static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) -- cgit v1.2.3-70-g09d2 From 9793241fe92f7d9303fb221e43fc598eb065f267 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 17 Nov 2009 04:53:09 +0000 Subject: vlan: Precise RX stats accounting With multi queue devices, its possible that several cpus call vlan RX routines simultaneously for the same vlan device. We update RX stats counter without any locking, so we can get slightly wrong counters. One possible fix is to use percpu counters, to get precise accounting and also get guarantee of no cache line ping pongs between cpus. Note: this adds 16 bytes (32 bytes on 64bit arches) of percpu data per vlan device. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/8021q/vlan.h | 17 +++++++++++++++++ net/8021q/vlan_core.c | 12 +++++++----- net/8021q/vlan_dev.c | 47 +++++++++++++++++++++++++++++++++++++++++------ 3 files changed, 65 insertions(+), 11 deletions(-) (limited to 'net/8021q/vlan_dev.c') diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 68f9290e683..5685296017e 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h @@ -16,6 +16,21 @@ struct vlan_priority_tci_mapping { struct vlan_priority_tci_mapping *next; }; + +/** + * struct vlan_rx_stats - VLAN percpu rx stats + * @rx_packets: number of received packets + * @rx_bytes: number of received bytes + * @multicast: number of received multicast packets + * @rx_errors: number of errors + */ +struct vlan_rx_stats { + unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long multicast; + unsigned long rx_errors; +}; + /** * struct vlan_dev_info - VLAN private device data * @nr_ingress_mappings: number of ingress priority mappings @@ -29,6 +44,7 @@ struct vlan_priority_tci_mapping { * @dent: proc dir entry * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX + * @vlan_rx_stats: ptr to percpu rx stats */ struct vlan_dev_info { unsigned int nr_ingress_mappings; @@ -45,6 +61,7 @@ struct vlan_dev_info { struct proc_dir_entry *dent; unsigned long cnt_inc_headroom_on_tx; unsigned long cnt_encap_on_xmit; + struct vlan_rx_stats *vlan_rx_stats; }; static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 971d3755ae8..e75a2f3b10a 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -31,7 +31,7 @@ EXPORT_SYMBOL(__vlan_hwaccel_rx); int vlan_hwaccel_do_receive(struct sk_buff *skb) { struct net_device *dev = skb->dev; - struct net_device_stats *stats; + struct vlan_rx_stats *rx_stats; skb->dev = vlan_dev_info(dev)->real_dev; netif_nit_deliver(skb); @@ -40,15 +40,17 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb) skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); skb->vlan_tci = 0; - stats = &dev->stats; - stats->rx_packets++; - stats->rx_bytes += skb->len; + rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, + smp_processor_id()); + + rx_stats->rx_packets++; + rx_stats->rx_bytes += skb->len; switch (skb->pkt_type) { case PACKET_BROADCAST: break; case PACKET_MULTICAST: - stats->multicast++; + rx_stats->multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make sure. diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 91596594213..de0dc6bacbe 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -140,7 +140,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct vlan_hdr *vhdr; - struct net_device_stats *stats; + struct vlan_rx_stats *rx_stats; u16 vlan_id; u16 vlan_tci; @@ -163,9 +163,10 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, goto err_unlock; } - stats = &skb->dev->stats; - stats->rx_packets++; - stats->rx_bytes += skb->len; + rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, + smp_processor_id()); + rx_stats->rx_packets++; + rx_stats->rx_bytes += skb->len; skb_pull_rcsum(skb, VLAN_HLEN); @@ -180,7 +181,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, break; case PACKET_MULTICAST: - stats->multicast++; + rx_stats->multicast++; break; case PACKET_OTHERHOST: @@ -200,7 +201,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, skb = vlan_check_reorder_header(skb); if (!skb) { - stats->rx_errors++; + rx_stats->rx_errors++; goto err_unlock; } @@ -731,6 +732,11 @@ static int vlan_dev_init(struct net_device *dev) subclass = 1; vlan_dev_set_lockdep_class(dev, subclass); + + vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats); + if (!vlan_dev_info(dev)->vlan_rx_stats) + return -ENOMEM; + return 0; } @@ -740,6 +746,8 @@ static void vlan_dev_uninit(struct net_device *dev) struct vlan_dev_info *vlan = vlan_dev_info(dev); int i; + free_percpu(vlan->vlan_rx_stats); + vlan->vlan_rx_stats = NULL; for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { while ((pm = vlan->egress_priority_map[i]) != NULL) { vlan->egress_priority_map[i] = pm->next; @@ -775,6 +783,31 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev) return dev_ethtool_get_flags(vlan->real_dev); } +static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev) +{ + struct net_device_stats *stats = &dev->stats; + + dev_txq_stats_fold(dev, stats); + + if (vlan_dev_info(dev)->vlan_rx_stats) { + struct vlan_rx_stats *p, rx = {0}; + int i; + + for_each_possible_cpu(i) { + p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i); + rx.rx_packets += p->rx_packets; + rx.rx_bytes += p->rx_bytes; + rx.rx_errors += p->rx_errors; + rx.multicast += p->multicast; + } + stats->rx_packets = rx.rx_packets; + stats->rx_bytes = rx.rx_bytes; + stats->rx_errors = rx.rx_errors; + stats->multicast = rx.multicast; + } + return stats; +} + static const struct ethtool_ops vlan_ethtool_ops = { .get_settings = vlan_ethtool_get_settings, .get_drvinfo = vlan_ethtool_get_drvinfo, @@ -797,6 +830,7 @@ static const struct net_device_ops vlan_netdev_ops = { .ndo_change_rx_flags = vlan_dev_change_rx_flags, .ndo_do_ioctl = vlan_dev_ioctl, .ndo_neigh_setup = vlan_dev_neigh_setup, + .ndo_get_stats = vlan_dev_get_stats, #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, @@ -820,6 +854,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = { .ndo_change_rx_flags = vlan_dev_change_rx_flags, .ndo_do_ioctl = vlan_dev_ioctl, .ndo_neigh_setup = vlan_dev_neigh_setup, + .ndo_get_stats = vlan_dev_get_stats, #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, -- cgit v1.2.3-70-g09d2 From 5e7565930524410f097f5b04f8aba663089a6ffc Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 25 Nov 2009 07:54:54 +0000 Subject: vlan: support "loose binding" to the underlying network device Currently the UP/DOWN state of VLANs is synchronized to the state of the underlying device, meaning all VLANs are set down once the underlying device is set down. This causes all routes to the VLAN devices to vanish. Add a flag to specify a "loose binding" mode, in which only the operstate is transfered, but the VLAN device state is independant. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/if_vlan.h | 1 + net/8021q/vlan.c | 9 +++++++-- net/8021q/vlan_dev.c | 6 ++++-- net/8021q/vlan_netlink.c | 3 ++- 4 files changed, 14 insertions(+), 5 deletions(-) (limited to 'net/8021q/vlan_dev.c') diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 153f6b9e722..3d870fda8c4 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -339,6 +339,7 @@ enum vlan_ioctl_cmds { enum vlan_flags { VLAN_FLAG_REORDER_HDR = 0x1, VLAN_FLAG_GVRP = 0x2, + VLAN_FLAG_LOOSE_BINDING = 0x4, }; enum vlan_name_types { diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 1483243edf1..225aa2fac0e 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -431,6 +431,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, struct vlan_group *grp; int i, flgs; struct net_device *vlandev; + struct vlan_dev_info *vlan; LIST_HEAD(list); if (is_vlan_dev(dev)) @@ -507,7 +508,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, if (!(flgs & IFF_UP)) continue; - dev_change_flags(vlandev, flgs & ~IFF_UP); + vlan = vlan_dev_info(vlandev); + if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) + dev_change_flags(vlandev, flgs & ~IFF_UP); vlan_transfer_operstate(dev, vlandev); } break; @@ -523,7 +526,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, if (flgs & IFF_UP) continue; - dev_change_flags(vlandev, flgs | IFF_UP); + vlan = vlan_dev_info(vlandev); + if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) + dev_change_flags(vlandev, flgs | IFF_UP); vlan_transfer_operstate(dev, vlandev); } break; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index de0dc6bacbe..b7889782047 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -431,7 +431,8 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask) struct vlan_dev_info *vlan = vlan_dev_info(dev); u32 old_flags = vlan->flags; - if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP)) + if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | + VLAN_FLAG_LOOSE_BINDING)) return -EINVAL; vlan->flags = (old_flags & ~mask) | (flags & mask); @@ -456,7 +457,8 @@ static int vlan_dev_open(struct net_device *dev) struct net_device *real_dev = vlan->real_dev; int err; - if (!(real_dev->flags & IFF_UP)) + if (!(real_dev->flags & IFF_UP) && + !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) return -ENETDOWN; if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index 3c9cf6a8e7f..ddc105734af 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -60,7 +60,8 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[]) if (data[IFLA_VLAN_FLAGS]) { flags = nla_data(data[IFLA_VLAN_FLAGS]); if ((flags->flags & flags->mask) & - ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP)) + ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | + VLAN_FLAG_LOOSE_BINDING)) return -EINVAL; } -- cgit v1.2.3-70-g09d2