diff options
Diffstat (limited to 'drivers/net/vmxnet3')
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_drv.c | 93 | ||||
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_ethtool.c | 274 | ||||
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_int.h | 7 |
3 files changed, 226 insertions, 148 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index d143e8b72b5..cc14b4a7504 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -48,6 +48,9 @@ static atomic_t devices_found; static int enable_mq = 1; static int irq_share_mode; +static void +vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); + /* * Enable/Disable the given intr */ @@ -139,9 +142,13 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) { u32 ret; int i; + unsigned long flags; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + adapter->link_speed = ret >> 16; if (ret & 1) { /* Link is up. */ printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", @@ -183,8 +190,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) /* Check if there is an error on xmit/recv queues */ if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { + spin_lock(&adapter->cmd_lock); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS); + spin_unlock(&adapter->cmd_lock); for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tqd_start[i].status.stopped) @@ -804,30 +813,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, skb_transport_header(skb))->doff * 4; ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; } else { - unsigned int pull_size; - if (skb->ip_summed == CHECKSUM_PARTIAL) { ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); if (ctx->ipv4) { struct iphdr *iph = (struct iphdr *) skb_network_header(skb); - if (iph->protocol == IPPROTO_TCP) { - pull_size = ctx->eth_ip_hdr_size + - sizeof(struct tcphdr); - - if (unlikely(!pskb_may_pull(skb, - pull_size))) { - goto err; - } + if (iph->protocol == IPPROTO_TCP) ctx->l4_hdr_size = ((struct tcphdr *) skb_transport_header(skb))->doff * 4; - } else if (iph->protocol == IPPROTO_UDP) { + else if (iph->protocol == IPPROTO_UDP) + /* + * Use tcp header size so that bytes to + * be copied are more than required by + * the device. + */ ctx->l4_hdr_size = - sizeof(struct udphdr); - } else { + sizeof(struct tcphdr); + else ctx->l4_hdr_size = 0; - } } else { /* for simplicity, don't copy L4 headers */ ctx->l4_hdr_size = 0; @@ -1859,18 +1863,14 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct Vmxnet3_DriverShared *shared = adapter->shared; u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + unsigned long flags; if (grp) { /* add vlan rx stripping. */ if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { int i; - struct Vmxnet3_DSDevRead *devRead = &shared->devRead; adapter->vlan_grp = grp; - /* update FEATURES to device */ - devRead->misc.uptFeatures |= UPT1_F_RXVLAN; - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, - VMXNET3_CMD_UPDATE_FEATURE); /* * Clear entire vfTable; then enable untagged pkts. * Note: setting one entry in vfTable to non-zero turns @@ -1880,8 +1880,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) vfTable[i] = 0; VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_VLAN_FILTERS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } else { printk(KERN_ERR "%s: vlan_rx_register when device has " "no NETIF_F_HW_VLAN_RX\n", netdev->name); @@ -1900,13 +1902,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) */ vfTable[i] = 0; } + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_VLAN_FILTERS); - - /* update FEATURES to device */ - devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, - VMXNET3_CMD_UPDATE_FEATURE); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } } } @@ -1939,10 +1938,13 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + unsigned long flags; VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_VLAN_FILTERS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } @@ -1951,10 +1953,13 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + unsigned long flags; VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_VLAN_FILTERS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } @@ -1985,6 +1990,7 @@ static void vmxnet3_set_mc(struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); + unsigned long flags; struct Vmxnet3_RxFilterConf *rxConf = &adapter->shared->devRead.rxFilterConf; u8 *new_table = NULL; @@ -2020,6 +2026,7 @@ vmxnet3_set_mc(struct net_device *netdev) rxConf->mfTablePA = 0; } + spin_lock_irqsave(&adapter->cmd_lock, flags); if (new_mode != rxConf->rxMode) { rxConf->rxMode = cpu_to_le32(new_mode); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, @@ -2028,6 +2035,7 @@ vmxnet3_set_mc(struct net_device *netdev) VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_MAC_FILTERS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); kfree(new_table); } @@ -2080,10 +2088,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) devRead->misc.uptFeatures |= UPT1_F_LRO; devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); } - if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && - adapter->vlan_grp) { + if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) devRead->misc.uptFeatures |= UPT1_F_RXVLAN; - } devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); @@ -2168,6 +2174,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) /* rx filter settings */ devRead->rxFilterConf.rxMode = 0; vmxnet3_restore_vlan(adapter); + vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); + /* the rest are already zeroed */ } @@ -2177,6 +2185,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) { int err, i; u32 ret; + unsigned long flags; dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," " ring sizes %u %u %u\n", adapter->netdev->name, @@ -2206,9 +2215,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) adapter->shared_pa)); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( adapter->shared_pa)); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV); ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); if (ret != 0) { printk(KERN_ERR "Failed to activate dev %s: error %u\n", @@ -2255,7 +2266,10 @@ rq_err: void vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) { + unsigned long flags; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } @@ -2263,12 +2277,15 @@ int vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) { int i; + unsigned long flags; if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) return 0; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); vmxnet3_disable_all_intrs(adapter); for (i = 0; i < adapter->num_rx_queues; i++) @@ -2426,7 +2443,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; ring0_size = adapter->rx_queue[0].rx_ring[0].size; ring0_size = (ring0_size + sz - 1) / sz * sz; - ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE / + ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE / sz * sz); ring1_size = adapter->rx_queue[0].rx_ring[1].size; comp_size = ring0_size + ring1_size; @@ -2695,7 +2712,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, break; } else { /* If fails to enable required number of MSI-x vectors - * try enabling 3 of them. One each for rx, tx and event + * try enabling minimum number of vectors required. */ vectors = vector_threshold; printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" @@ -2718,9 +2735,11 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) u32 cfg; /* intr settings */ + spin_lock(&adapter->cmd_lock); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_CONF_INTR); cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock(&adapter->cmd_lock); adapter->intr.type = cfg & 0x3; adapter->intr.mask_mode = (cfg >> 2) & 0x3; @@ -2755,7 +2774,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) */ if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE - || adapter->num_rx_queues != 2) { + || adapter->num_rx_queues != 1) { adapter->share_intr = VMXNET3_INTR_TXSHARE; printk(KERN_ERR "Number of rx queues : 1\n"); adapter->num_rx_queues = 1; @@ -2905,6 +2924,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->netdev = netdev; adapter->pdev = pdev; + spin_lock_init(&adapter->cmd_lock); adapter->shared = pci_alloc_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), &adapter->shared_pa); @@ -3108,11 +3128,15 @@ vmxnet3_suspend(struct device *device) u8 *arpreq; struct in_device *in_dev; struct in_ifaddr *ifa; + unsigned long flags; int i = 0; if (!netif_running(netdev)) return 0; + for (i = 0; i < adapter->num_rx_queues; i++) + napi_disable(&adapter->rx_queue[i].napi); + vmxnet3_disable_all_intrs(adapter); vmxnet3_free_irqs(adapter); vmxnet3_free_intr_resources(adapter); @@ -3188,8 +3212,10 @@ skip_arp: adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( pmConf)); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_PMCFG); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); pci_save_state(pdev); pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), @@ -3204,7 +3230,8 @@ skip_arp: static int vmxnet3_resume(struct device *device) { - int err; + int err, i = 0; + unsigned long flags; struct pci_dev *pdev = to_pci_dev(device); struct net_device *netdev = pci_get_drvdata(pdev); struct vmxnet3_adapter *adapter = netdev_priv(netdev); @@ -3232,10 +3259,14 @@ vmxnet3_resume(struct device *device) pci_enable_wake(pdev, PCI_D0, 0); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_PMCFG); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); vmxnet3_alloc_intr_resources(adapter); vmxnet3_request_irqs(adapter); + for (i = 0; i < adapter->num_rx_queues; i++) + napi_enable(&adapter->rx_queue[i].napi); vmxnet3_enable_all_intrs(adapter); return 0; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 8e17fc8a7fe..81254be85b9 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -45,6 +45,7 @@ static int vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); + unsigned long flags; if (adapter->rxcsum != val) { adapter->rxcsum = val; @@ -56,8 +57,10 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_RXCSUM; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_FEATURE); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } } return 0; @@ -68,76 +71,78 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) static const struct vmxnet3_stat_desc vmxnet3_tq_dev_stats[] = { /* description, offset */ - { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, - { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, - { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, - { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, - { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, - { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, - { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, - { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, - { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, - { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, + { "Tx Queue#", 0 }, + { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, + { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, + { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, + { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, + { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, + { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, + { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, + { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, + { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, + { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, }; /* per tq stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_tq_driver_stats[] = { /* description, offset */ - {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, - drop_total) }, - { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, - drop_too_many_frags) }, - { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, - drop_oversized_hdr) }, - { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, - drop_hdr_inspect_err) }, - { " tso", offsetof(struct vmxnet3_tq_driver_stats, - drop_tso) }, - { "ring full", offsetof(struct vmxnet3_tq_driver_stats, - tx_ring_full) }, - { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, - linearized) }, - { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, - copy_skb_header) }, - { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, - oversized_hdr) }, + {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, + drop_total) }, + { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, + drop_too_many_frags) }, + { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, + drop_oversized_hdr) }, + { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, + drop_hdr_inspect_err) }, + { " tso", offsetof(struct vmxnet3_tq_driver_stats, + drop_tso) }, + { " ring full", offsetof(struct vmxnet3_tq_driver_stats, + tx_ring_full) }, + { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, + linearized) }, + { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, + copy_skb_header) }, + { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, + oversized_hdr) }, }; /* per rq stats maintained by the device */ static const struct vmxnet3_stat_desc vmxnet3_rq_dev_stats[] = { - { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, - { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, - { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, - { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, - { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, - { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, - { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, - { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, - { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, - { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, + { "Rx Queue#", 0 }, + { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, + { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, + { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, + { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, + { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, + { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, + { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, + { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, + { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, + { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, }; /* per rq stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_rq_driver_stats[] = { /* description, offset */ - { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, - drop_total) }, - { " err", offsetof(struct vmxnet3_rq_driver_stats, - drop_err) }, - { " fcs", offsetof(struct vmxnet3_rq_driver_stats, - drop_fcs) }, - { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, - rx_buf_alloc_failure) }, + { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, + drop_total) }, + { " err", offsetof(struct vmxnet3_rq_driver_stats, + drop_err) }, + { " fcs", offsetof(struct vmxnet3_rq_driver_stats, + drop_fcs) }, + { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, + rx_buf_alloc_failure) }, }; /* gloabl stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_global_stats[] = { /* description, offset */ - { "tx timeout count", offsetof(struct vmxnet3_adapter, + { "tx timeout count", offsetof(struct vmxnet3_adapter, tx_timeout_count) } }; @@ -151,12 +156,15 @@ vmxnet3_get_stats(struct net_device *netdev) struct UPT1_TxStats *devTxStats; struct UPT1_RxStats *devRxStats; struct net_device_stats *net_stats = &netdev->stats; + unsigned long flags; int i; adapter = netdev_priv(netdev); /* Collect the dev stats into the shared area */ + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); memset(net_stats, 0, sizeof(*net_stats)); for (i = 0; i < adapter->num_tx_queues; i++) { @@ -193,12 +201,15 @@ vmxnet3_get_stats(struct net_device *netdev) static int vmxnet3_get_sset_count(struct net_device *netdev, int sset) { + struct vmxnet3_adapter *adapter = netdev_priv(netdev); switch (sset) { case ETH_SS_STATS: - return ARRAY_SIZE(vmxnet3_tq_dev_stats) + - ARRAY_SIZE(vmxnet3_tq_driver_stats) + - ARRAY_SIZE(vmxnet3_rq_dev_stats) + - ARRAY_SIZE(vmxnet3_rq_driver_stats) + + return (ARRAY_SIZE(vmxnet3_tq_dev_stats) + + ARRAY_SIZE(vmxnet3_tq_driver_stats)) * + adapter->num_tx_queues + + (ARRAY_SIZE(vmxnet3_rq_dev_stats) + + ARRAY_SIZE(vmxnet3_rq_driver_stats)) * + adapter->num_rx_queues + ARRAY_SIZE(vmxnet3_global_stats); default: return -EOPNOTSUPP; @@ -206,10 +217,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset) } +/* Should be multiple of 4 */ +#define NUM_TX_REGS 8 +#define NUM_RX_REGS 12 + static int vmxnet3_get_regs_len(struct net_device *netdev) { - return 20 * sizeof(u32); + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) + + adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32)); } @@ -240,29 +257,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) static void vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) { + struct vmxnet3_adapter *adapter = netdev_priv(netdev); if (stringset == ETH_SS_STATS) { - int i; - - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { - memcpy(buf, vmxnet3_tq_dev_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { - memcpy(buf, vmxnet3_tq_driver_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { - memcpy(buf, vmxnet3_rq_dev_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; + int i, j; + for (j = 0; j < adapter->num_tx_queues; j++) { + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { + memcpy(buf, vmxnet3_tq_dev_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); + i++) { + memcpy(buf, vmxnet3_tq_driver_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } } - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { - memcpy(buf, vmxnet3_rq_driver_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; + + for (j = 0; j < adapter->num_rx_queues; j++) { + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { + memcpy(buf, vmxnet3_rq_dev_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); + i++) { + memcpy(buf, vmxnet3_rq_driver_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } } + for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { memcpy(buf, vmxnet3_global_stats[i].desc, ETH_GSTRING_LEN); @@ -277,6 +302,7 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) struct vmxnet3_adapter *adapter = netdev_priv(netdev); u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; + unsigned long flags; if (data & ~ETH_FLAG_LRO) return -EOPNOTSUPP; @@ -292,8 +318,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) else adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_LRO; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_FEATURE); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } return 0; } @@ -303,30 +331,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *buf) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); + unsigned long flags; u8 *base; int i; int j = 0; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); /* this does assume each counter is 64-bit wide */ -/* TODO change this for multiple queues */ - - base = (u8 *)&adapter->tqd_start[j].stats; - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) - *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); - - base = (u8 *)&adapter->tx_queue[j].stats; - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) - *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); - - base = (u8 *)&adapter->rqd_start[j].stats; - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) - *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); + for (j = 0; j < adapter->num_tx_queues; j++) { + base = (u8 *)&adapter->tqd_start[j].stats; + *buf++ = (u64)j; + for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) + *buf++ = *(u64 *)(base + + vmxnet3_tq_dev_stats[i].offset); + + base = (u8 *)&adapter->tx_queue[j].stats; + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) + *buf++ = *(u64 *)(base + + vmxnet3_tq_driver_stats[i].offset); + } - base = (u8 *)&adapter->rx_queue[j].stats; - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) - *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); + for (j = 0; j < adapter->num_tx_queues; j++) { + base = (u8 *)&adapter->rqd_start[j].stats; + *buf++ = (u64) j; + for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) + *buf++ = *(u64 *)(base + + vmxnet3_rq_dev_stats[i].offset); + + base = (u8 *)&adapter->rx_queue[j].stats; + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) + *buf++ = *(u64 *)(base + + vmxnet3_rq_driver_stats[i].offset); + } base = (u8 *)adapter; for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) @@ -339,7 +378,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 *buf = p; - int i = 0; + int i = 0, j = 0; memset(p, 0, vmxnet3_get_regs_len(netdev)); @@ -348,31 +387,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) /* Update vmxnet3_get_regs_len if we want to dump more registers */ /* make each ring use multiple of 16 bytes */ -/* TODO change this for multiple queues */ - buf[0] = adapter->tx_queue[i].tx_ring.next2fill; - buf[1] = adapter->tx_queue[i].tx_ring.next2comp; - buf[2] = adapter->tx_queue[i].tx_ring.gen; - buf[3] = 0; - - buf[4] = adapter->tx_queue[i].comp_ring.next2proc; - buf[5] = adapter->tx_queue[i].comp_ring.gen; - buf[6] = adapter->tx_queue[i].stopped; - buf[7] = 0; - - buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill; - buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp; - buf[10] = adapter->rx_queue[i].rx_ring[0].gen; - buf[11] = 0; - - buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill; - buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp; - buf[14] = adapter->rx_queue[i].rx_ring[1].gen; - buf[15] = 0; - - buf[16] = adapter->rx_queue[i].comp_ring.next2proc; - buf[17] = adapter->rx_queue[i].comp_ring.gen; - buf[18] = 0; - buf[19] = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + buf[j++] = adapter->tx_queue[i].tx_ring.next2fill; + buf[j++] = adapter->tx_queue[i].tx_ring.next2comp; + buf[j++] = adapter->tx_queue[i].tx_ring.gen; + buf[j++] = 0; + + buf[j++] = adapter->tx_queue[i].comp_ring.next2proc; + buf[j++] = adapter->tx_queue[i].comp_ring.gen; + buf[j++] = adapter->tx_queue[i].stopped; + buf[j++] = 0; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill; + buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp; + buf[j++] = adapter->rx_queue[i].rx_ring[0].gen; + buf[j++] = 0; + + buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill; + buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp; + buf[j++] = adapter->rx_queue[i].rx_ring[1].gen; + buf[j++] = 0; + + buf[j++] = adapter->rx_queue[i].comp_ring.next2proc; + buf[j++] = adapter->rx_queue[i].comp_ring.gen; + buf[j++] = 0; + buf[j++] = 0; + } + } @@ -574,6 +617,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev, const struct ethtool_rxfh_indir *p) { unsigned int i; + unsigned long flags; struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; @@ -592,8 +636,10 @@ vmxnet3_set_rss_indir(struct net_device *netdev, for (i = 0; i < rssConf->indTableSize; i++) rssConf->indTable[i] = p->ring_index[i]; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RSSIDT); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); return 0; diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 7fadeed37f0..fb5d245ac87 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -68,10 +68,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01001000 +#define VMXNET3_DRIVER_VERSION_NUM 0x01001900 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ @@ -289,7 +289,7 @@ struct vmxnet3_rx_queue { #define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ VMXNET3_DEVICE_MAX_RX_QUEUES + 1) -#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */ +#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */ struct vmxnet3_intr { @@ -317,6 +317,7 @@ struct vmxnet3_adapter { struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES]; struct vlan_group *vlan_grp; struct vmxnet3_intr intr; + spinlock_t cmd_lock; struct Vmxnet3_DriverShared *shared; struct Vmxnet3_PMConf *pm_conf; struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */ |