summaryrefslogtreecommitdiffstats
path: root/net/8021q/vlan_dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/8021q/vlan_dev.c')
-rw-r--r--net/8021q/vlan_dev.c434
1 files changed, 66 insertions, 368 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 14e3d1fa07a..f247f5bff88 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -65,179 +65,6 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb)
return 0;
}
-static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
-{
- if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
- if (skb_cow(skb, skb_headroom(skb)) < 0)
- skb = NULL;
- if (skb) {
- /* Lifted from Gleb's VLAN code... */
- memmove(skb->data - ETH_HLEN,
- skb->data - VLAN_ETH_HLEN, 12);
- skb->mac_header += VLAN_HLEN;
- }
- }
-
- return skb;
-}
-
-static inline void vlan_set_encap_proto(struct sk_buff *skb,
- struct vlan_hdr *vhdr)
-{
- __be16 proto;
- unsigned char *rawp;
-
- /*
- * Was a VLAN packet, grab the encapsulated protocol, which the layer
- * three protocols care about.
- */
-
- proto = vhdr->h_vlan_encapsulated_proto;
- if (ntohs(proto) >= 1536) {
- skb->protocol = proto;
- return;
- }
-
- rawp = skb->data;
- if (*(unsigned short *)rawp == 0xFFFF)
- /*
- * This is a magic hack to spot IPX packets. Older Novell
- * breaks the protocol design and runs IPX over 802.3 without
- * an 802.2 LLC layer. We look for FFFF which isn't a used
- * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
- * but does for the rest.
- */
- skb->protocol = htons(ETH_P_802_3);
- else
- /*
- * Real 802.2 LLC
- */
- skb->protocol = htons(ETH_P_802_2);
-}
-
-/*
- * Determine the packet's protocol ID. The rule here is that we
- * assume 802.3 if the type field is short enough to be a length.
- * This is normal practice and works for any 'now in use' protocol.
- *
- * Also, at this point we assume that we ARE dealing exclusively with
- * VLAN packets, or packets that should be made into VLAN packets based
- * on a default VLAN ID.
- *
- * NOTE: Should be similar to ethernet/eth.c.
- *
- * SANITY NOTE: This method is called when a packet is moving up the stack
- * towards userland. To get here, it would have already passed
- * through the ethernet/eth.c eth_type_trans() method.
- * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be
- * stored UNALIGNED in the memory. RISC systems don't like
- * such cases very much...
- * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be
- * aligned, so there doesn't need to be any of the unaligned
- * stuff. It has been commented out now... --Ben
- *
- */
-int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype, struct net_device *orig_dev)
-{
- struct vlan_hdr *vhdr;
- struct vlan_rx_stats *rx_stats;
- struct net_device *vlan_dev;
- u16 vlan_id;
- u16 vlan_tci;
-
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (skb == NULL)
- goto err_free;
-
- if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
- goto err_free;
-
- vhdr = (struct vlan_hdr *)skb->data;
- vlan_tci = ntohs(vhdr->h_vlan_TCI);
- vlan_id = vlan_tci & VLAN_VID_MASK;
-
- rcu_read_lock();
- vlan_dev = vlan_find_dev(dev, vlan_id);
-
- /* If the VLAN device is defined, we use it.
- * If not, and the VID is 0, it is a 802.1p packet (not
- * really a VLAN), so we will just netif_rx it later to the
- * original interface, but with the skb->proto set to the
- * wrapped proto: we do nothing here.
- */
-
- if (!vlan_dev) {
- if (vlan_id) {
- pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
- __func__, vlan_id, dev->name);
- goto err_unlock;
- }
- rx_stats = NULL;
- } else {
- skb->dev = vlan_dev;
-
- rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats);
-
- u64_stats_update_begin(&rx_stats->syncp);
- rx_stats->rx_packets++;
- rx_stats->rx_bytes += skb->len;
-
- skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
-
- pr_debug("%s: priority: %u for TCI: %hu\n",
- __func__, skb->priority, vlan_tci);
-
- switch (skb->pkt_type) {
- case PACKET_BROADCAST:
- /* Yeah, stats collect these together.. */
- /* stats->broadcast ++; // no such counter :-( */
- break;
-
- case PACKET_MULTICAST:
- rx_stats->rx_multicast++;
- break;
-
- case PACKET_OTHERHOST:
- /* Our lower layer thinks this is not local, let's make
- * sure.
- * This allows the VLAN to have a different MAC than the
- * underlying device, and still route correctly.
- */
- if (!compare_ether_addr(eth_hdr(skb)->h_dest,
- skb->dev->dev_addr))
- skb->pkt_type = PACKET_HOST;
- break;
- default:
- break;
- }
- u64_stats_update_end(&rx_stats->syncp);
- }
-
- skb_pull_rcsum(skb, VLAN_HLEN);
- vlan_set_encap_proto(skb, vhdr);
-
- if (vlan_dev) {
- skb = vlan_check_reorder_header(skb);
- if (!skb) {
- rx_stats->rx_errors++;
- goto err_unlock;
- }
- }
-
- netif_rx(skb);
-
- rcu_read_unlock();
- return NET_RX_SUCCESS;
-
-err_unlock:
- rcu_read_unlock();
-err_free:
- atomic_long_inc(&dev->rx_dropped);
- kfree_skb(skb);
- return NET_RX_DROP;
-}
-
static inline u16
vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
{
@@ -274,9 +101,6 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
u16 vlan_tci = 0;
int rc;
- if (WARN_ON(skb_headroom(skb) < dev->hard_header_len))
- return -ENOSPC;
-
if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
@@ -313,8 +137,6 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- int i = skb_get_queue_mapping(skb);
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
unsigned int len;
int ret;
@@ -326,71 +148,31 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
*/
if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
- unsigned int orig_headroom = skb_headroom(skb);
u16 vlan_tci;
-
- vlan_dev_info(dev)->cnt_encap_on_xmit++;
-
vlan_tci = vlan_dev_info(dev)->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
- skb = __vlan_put_tag(skb, vlan_tci);
- if (!skb) {
- txq->tx_dropped++;
- return NETDEV_TX_OK;
- }
-
- if (orig_headroom < VLAN_HLEN)
- vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
+ skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
}
-
skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
len = skb->len;
ret = dev_queue_xmit(skb);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- txq->tx_packets++;
- txq->tx_bytes += len;
- } else
- txq->tx_dropped++;
-
- return ret;
-}
-
-static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- int i = skb_get_queue_mapping(skb);
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- u16 vlan_tci;
- unsigned int len;
- int ret;
+ struct vlan_pcpu_stats *stats;
- vlan_tci = vlan_dev_info(dev)->vlan_id;
- vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
- skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
-
- skb->dev = vlan_dev_info(dev)->real_dev;
- len = skb->len;
- ret = dev_queue_xmit(skb);
-
- if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- txq->tx_packets++;
- txq->tx_bytes += len;
- } else
- txq->tx_dropped++;
+ stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ u64_stats_update_begin(&stats->syncp);
+ } else {
+ this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
+ }
return ret;
}
-static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- struct net_device *rdev = vlan_dev_info(dev)->real_dev;
- const struct net_device_ops *ops = rdev->netdev_ops;
-
- return ops->ndo_select_queue(rdev, skb);
-}
-
static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
{
/* TODO: gotta make sure the underlying layer can handle it,
@@ -532,9 +314,6 @@ static int vlan_dev_stop(struct net_device *dev)
struct vlan_dev_info *vlan = vlan_dev_info(dev);
struct net_device *real_dev = vlan->real_dev;
- if (vlan->flags & VLAN_FLAG_GVRP)
- vlan_gvrp_request_leave(dev);
-
dev_mc_unsync(real_dev, dev);
dev_uc_unsync(real_dev, dev);
if (dev->flags & IFF_ALLMULTI)
@@ -670,6 +449,19 @@ static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
return rc;
}
+
+static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = 0;
+
+ if (ops->ndo_fcoe_ddp_target)
+ rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
+
+ return rc;
+}
#endif
static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
@@ -719,8 +511,7 @@ static const struct header_ops vlan_header_ops = {
.parse = eth_header_parse,
};
-static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops,
- vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq;
+static const struct net_device_ops vlan_netdev_ops;
static int vlan_dev_init(struct net_device *dev)
{
@@ -737,7 +528,8 @@ static int vlan_dev_init(struct net_device *dev)
(1<<__LINK_STATE_DORMANT))) |
(1<<__LINK_STATE_PRESENT);
- dev->features |= real_dev->features & real_dev->vlan_features;
+ dev->hw_features = NETIF_F_ALL_TX_OFFLOADS;
+ dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
dev->gso_max_size = real_dev->gso_max_size;
/* ipv6 shared card related stuff */
@@ -752,29 +544,24 @@ static int vlan_dev_init(struct net_device *dev)
dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
#endif
+ dev->needed_headroom = real_dev->needed_headroom;
if (real_dev->features & NETIF_F_HW_VLAN_TX) {
dev->header_ops = real_dev->header_ops;
dev->hard_header_len = real_dev->hard_header_len;
- if (real_dev->netdev_ops->ndo_select_queue)
- dev->netdev_ops = &vlan_netdev_accel_ops_sq;
- else
- dev->netdev_ops = &vlan_netdev_accel_ops;
} else {
dev->header_ops = &vlan_header_ops;
dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
- if (real_dev->netdev_ops->ndo_select_queue)
- dev->netdev_ops = &vlan_netdev_ops_sq;
- else
- dev->netdev_ops = &vlan_netdev_ops;
}
+ dev->netdev_ops = &vlan_netdev_ops;
+
if (is_vlan_dev(real_dev))
subclass = 1;
vlan_dev_set_lockdep_class(dev, subclass);
- vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats);
- if (!vlan_dev_info(dev)->vlan_rx_stats)
+ vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+ if (!vlan_dev_info(dev)->vlan_pcpu_stats)
return -ENOMEM;
return 0;
@@ -786,8 +573,8 @@ static void vlan_dev_uninit(struct net_device *dev)
struct vlan_dev_info *vlan = vlan_dev_info(dev);
int i;
- free_percpu(vlan->vlan_rx_stats);
- vlan->vlan_rx_stats = NULL;
+ free_percpu(vlan->vlan_pcpu_stats);
+ vlan->vlan_pcpu_stats = NULL;
for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
while ((pm = vlan->egress_priority_map[i]) != NULL) {
vlan->egress_priority_map[i] = pm->next;
@@ -796,6 +583,19 @@ static void vlan_dev_uninit(struct net_device *dev)
}
}
+static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+
+ features &= real_dev->features;
+ features &= real_dev->vlan_features;
+ if (dev_ethtool_get_rx_csum(real_dev))
+ features |= NETIF_F_RXCSUM;
+ features |= NETIF_F_LLTX;
+
+ return features;
+}
+
static int vlan_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
@@ -811,77 +611,47 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
strcpy(info->fw_version, "N/A");
}
-static u32 vlan_ethtool_get_rx_csum(struct net_device *dev)
-{
- const struct vlan_dev_info *vlan = vlan_dev_info(dev);
- return dev_ethtool_get_rx_csum(vlan->real_dev);
-}
-
-static u32 vlan_ethtool_get_flags(struct net_device *dev)
-{
- const struct vlan_dev_info *vlan = vlan_dev_info(dev);
- return dev_ethtool_get_flags(vlan->real_dev);
-}
-
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- dev_txq_stats_fold(dev, stats);
- if (vlan_dev_info(dev)->vlan_rx_stats) {
- struct vlan_rx_stats *p, accum = {0};
+ if (vlan_dev_info(dev)->vlan_pcpu_stats) {
+ struct vlan_pcpu_stats *p;
+ u32 rx_errors = 0, tx_dropped = 0;
int i;
for_each_possible_cpu(i) {
- u64 rxpackets, rxbytes, rxmulticast;
+ u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
unsigned int start;
- p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
+ p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
do {
start = u64_stats_fetch_begin_bh(&p->syncp);
rxpackets = p->rx_packets;
rxbytes = p->rx_bytes;
rxmulticast = p->rx_multicast;
+ txpackets = p->tx_packets;
+ txbytes = p->tx_bytes;
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
- accum.rx_packets += rxpackets;
- accum.rx_bytes += rxbytes;
- accum.rx_multicast += rxmulticast;
- /* rx_errors is ulong, not protected by syncp */
- accum.rx_errors += p->rx_errors;
+
+ stats->rx_packets += rxpackets;
+ stats->rx_bytes += rxbytes;
+ stats->multicast += rxmulticast;
+ stats->tx_packets += txpackets;
+ stats->tx_bytes += txbytes;
+ /* rx_errors & tx_dropped are u32 */
+ rx_errors += p->rx_errors;
+ tx_dropped += p->tx_dropped;
}
- stats->rx_packets = accum.rx_packets;
- stats->rx_bytes = accum.rx_bytes;
- stats->rx_errors = accum.rx_errors;
- stats->multicast = accum.rx_multicast;
+ stats->rx_errors = rx_errors;
+ stats->tx_dropped = tx_dropped;
}
return stats;
}
-static int vlan_ethtool_set_tso(struct net_device *dev, u32 data)
-{
- if (data) {
- struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
-
- /* Underlying device must support TSO for VLAN-tagged packets
- * and must have TSO enabled now.
- */
- if (!(real_dev->vlan_features & NETIF_F_TSO))
- return -EOPNOTSUPP;
- if (!(real_dev->features & NETIF_F_TSO))
- return -EINVAL;
- dev->features |= NETIF_F_TSO;
- } else {
- dev->features &= ~NETIF_F_TSO;
- }
- return 0;
-}
-
static const struct ethtool_ops vlan_ethtool_ops = {
.get_settings = vlan_ethtool_get_settings,
.get_drvinfo = vlan_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_rx_csum = vlan_ethtool_get_rx_csum,
- .get_flags = vlan_ethtool_get_flags,
- .set_tso = vlan_ethtool_set_tso,
};
static const struct net_device_ops vlan_netdev_ops = {
@@ -905,81 +675,9 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_fcoe_enable = vlan_dev_fcoe_enable,
.ndo_fcoe_disable = vlan_dev_fcoe_disable,
.ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
+ .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
#endif
-};
-
-static const struct net_device_ops vlan_netdev_accel_ops = {
- .ndo_change_mtu = vlan_dev_change_mtu,
- .ndo_init = vlan_dev_init,
- .ndo_uninit = vlan_dev_uninit,
- .ndo_open = vlan_dev_open,
- .ndo_stop = vlan_dev_stop,
- .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = vlan_dev_set_mac_address,
- .ndo_set_rx_mode = vlan_dev_set_rx_mode,
- .ndo_set_multicast_list = vlan_dev_set_rx_mode,
- .ndo_change_rx_flags = vlan_dev_change_rx_flags,
- .ndo_do_ioctl = vlan_dev_ioctl,
- .ndo_neigh_setup = vlan_dev_neigh_setup,
- .ndo_get_stats64 = vlan_dev_get_stats64,
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
- .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
- .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
- .ndo_fcoe_enable = vlan_dev_fcoe_enable,
- .ndo_fcoe_disable = vlan_dev_fcoe_disable,
- .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
-#endif
-};
-
-static const struct net_device_ops vlan_netdev_ops_sq = {
- .ndo_select_queue = vlan_dev_select_queue,
- .ndo_change_mtu = vlan_dev_change_mtu,
- .ndo_init = vlan_dev_init,
- .ndo_uninit = vlan_dev_uninit,
- .ndo_open = vlan_dev_open,
- .ndo_stop = vlan_dev_stop,
- .ndo_start_xmit = vlan_dev_hard_start_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = vlan_dev_set_mac_address,
- .ndo_set_rx_mode = vlan_dev_set_rx_mode,
- .ndo_set_multicast_list = vlan_dev_set_rx_mode,
- .ndo_change_rx_flags = vlan_dev_change_rx_flags,
- .ndo_do_ioctl = vlan_dev_ioctl,
- .ndo_neigh_setup = vlan_dev_neigh_setup,
- .ndo_get_stats64 = vlan_dev_get_stats64,
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
- .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
- .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
- .ndo_fcoe_enable = vlan_dev_fcoe_enable,
- .ndo_fcoe_disable = vlan_dev_fcoe_disable,
- .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
-#endif
-};
-
-static const struct net_device_ops vlan_netdev_accel_ops_sq = {
- .ndo_select_queue = vlan_dev_select_queue,
- .ndo_change_mtu = vlan_dev_change_mtu,
- .ndo_init = vlan_dev_init,
- .ndo_uninit = vlan_dev_uninit,
- .ndo_open = vlan_dev_open,
- .ndo_stop = vlan_dev_stop,
- .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = vlan_dev_set_mac_address,
- .ndo_set_rx_mode = vlan_dev_set_rx_mode,
- .ndo_set_multicast_list = vlan_dev_set_rx_mode,
- .ndo_change_rx_flags = vlan_dev_change_rx_flags,
- .ndo_do_ioctl = vlan_dev_ioctl,
- .ndo_neigh_setup = vlan_dev_neigh_setup,
- .ndo_get_stats64 = vlan_dev_get_stats64,
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
- .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
- .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
- .ndo_fcoe_enable = vlan_dev_fcoe_enable,
- .ndo_fcoe_disable = vlan_dev_fcoe_disable,
- .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
-#endif
+ .ndo_fix_features = vlan_dev_fix_features,
};
void vlan_setup(struct net_device *dev)