diff options
Diffstat (limited to 'drivers/net/benet/be_main.c')
-rw-r--r-- | drivers/net/benet/be_main.c | 1417 |
1 files changed, 975 insertions, 442 deletions
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 45b1f663528..a485f7fdaf3 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,13 +8,14 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@serverengines.com + * linux-drivers@emulex.com * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ +#include <linux/prefetch.h> #include "be.h" #include "be_cmds.h" #include <asm/div64.h> @@ -25,9 +26,9 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); MODULE_AUTHOR("ServerEngines Corporation"); MODULE_LICENSE("GPL"); -static unsigned int rx_frag_size = 2048; +static ushort rx_frag_size = 2048; static unsigned int num_vfs; -module_param(rx_frag_size, uint, S_IRUGO); +module_param(rx_frag_size, ushort, S_IRUGO); module_param(num_vfs, uint, S_IRUGO); MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); @@ -41,6 +42,8 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, + { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)}, + { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)}, { 0 } }; MODULE_DEVICE_TABLE(pci, be_dev_ids); @@ -115,17 +118,12 @@ static char *ue_status_hi_desc[] = { "Unknown" }; -static inline bool be_multi_rxq(struct be_adapter *adapter) -{ - return (adapter->num_rx_qs > 1); -} - static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) { struct be_dma_mem *mem = &q->dma_mem; if (mem->va) - pci_free_consistent(adapter->pdev, mem->size, - mem->va, mem->dma); + dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, + mem->dma); } static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, @@ -137,7 +135,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, q->len = len; q->entry_size = entry_size; mem->size = len * entry_size; - mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma); + mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, + GFP_KERNEL); if (!mem->va) return -1; memset(mem->va, 0, mem->size); @@ -188,6 +187,8 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid, { u32 val = 0; val |= qid & DB_EQ_RING_ID_MASK; + val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << + DB_EQ_RING_ID_EXT_MASK_SHIFT); if (adapter->eeh_err) return; @@ -205,6 +206,8 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) { u32 val = 0; val |= qid & DB_CQ_RING_ID_MASK; + val |= ((qid & DB_CQ_RING_ID_EXT_MASK) << + DB_CQ_RING_ID_EXT_MASK_SHIFT); if (adapter->eeh_err) return; @@ -230,12 +233,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (!be_physfn(adapter)) goto netdev_addr; - status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id); + status = be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id, 0); if (status) return status; status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, - adapter->if_handle, &adapter->pmac_id); + adapter->if_handle, &adapter->pmac_id, 0); netdev_addr: if (!status) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); @@ -243,14 +247,185 @@ netdev_addr: return status; } +static void populate_be2_stats(struct be_adapter *adapter) +{ + + struct be_drv_stats *drvs = &adapter->drv_stats; + struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter); + struct be_port_rxf_stats_v0 *port_stats = + be_port_rxf_stats_from_cmd(adapter); + struct be_rxf_stats_v0 *rxf_stats = + be_rxf_stats_from_cmd(adapter); + + drvs->rx_pause_frames = port_stats->rx_pause_frames; + drvs->rx_crc_errors = port_stats->rx_crc_errors; + drvs->rx_control_frames = port_stats->rx_control_frames; + drvs->rx_in_range_errors = port_stats->rx_in_range_errors; + drvs->rx_frame_too_long = port_stats->rx_frame_too_long; + drvs->rx_dropped_runt = port_stats->rx_dropped_runt; + drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; + drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; + drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; + drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow; + drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; + drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small; + drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short; + drvs->rx_out_range_errors = port_stats->rx_out_range_errors; + drvs->rx_input_fifo_overflow_drop = + port_stats->rx_input_fifo_overflow; + drvs->rx_dropped_header_too_small = + port_stats->rx_dropped_header_too_small; + drvs->rx_address_match_errors = + port_stats->rx_address_match_errors; + drvs->rx_alignment_symbol_errors = + port_stats->rx_alignment_symbol_errors; + + drvs->tx_pauseframes = port_stats->tx_pauseframes; + drvs->tx_controlframes = port_stats->tx_controlframes; + + if (adapter->port_num) + drvs->jabber_events = + rxf_stats->port1_jabber_events; + else + drvs->jabber_events = + rxf_stats->port0_jabber_events; + drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; + drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb; + drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; + drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring; + drvs->forwarded_packets = rxf_stats->forwarded_packets; + drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; + drvs->rx_drops_no_tpre_descr = + rxf_stats->rx_drops_no_tpre_descr; + drvs->rx_drops_too_many_frags = + rxf_stats->rx_drops_too_many_frags; + adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; +} + +static void populate_be3_stats(struct be_adapter *adapter) +{ + struct be_drv_stats *drvs = &adapter->drv_stats; + struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter); + + struct be_rxf_stats_v1 *rxf_stats = + be_rxf_stats_from_cmd(adapter); + struct be_port_rxf_stats_v1 *port_stats = + be_port_rxf_stats_from_cmd(adapter); + + drvs->rx_priority_pause_frames = 0; + drvs->pmem_fifo_overflow_drop = 0; + drvs->rx_pause_frames = port_stats->rx_pause_frames; + drvs->rx_crc_errors = port_stats->rx_crc_errors; + drvs->rx_control_frames = port_stats->rx_control_frames; + drvs->rx_in_range_errors = port_stats->rx_in_range_errors; + drvs->rx_frame_too_long = port_stats->rx_frame_too_long; + drvs->rx_dropped_runt = port_stats->rx_dropped_runt; + drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; + drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; + drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; + drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; + drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small; + drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short; + drvs->rx_out_range_errors = port_stats->rx_out_range_errors; + drvs->rx_dropped_header_too_small = + port_stats->rx_dropped_header_too_small; + drvs->rx_input_fifo_overflow_drop = + port_stats->rx_input_fifo_overflow_drop; + drvs->rx_address_match_errors = + port_stats->rx_address_match_errors; + drvs->rx_alignment_symbol_errors = + port_stats->rx_alignment_symbol_errors; + drvs->rxpp_fifo_overflow_drop = + port_stats->rxpp_fifo_overflow_drop; + drvs->tx_pauseframes = port_stats->tx_pauseframes; + drvs->tx_controlframes = port_stats->tx_controlframes; + drvs->jabber_events = port_stats->jabber_events; + drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; + drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb; + drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; + drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring; + drvs->forwarded_packets = rxf_stats->forwarded_packets; + drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; + drvs->rx_drops_no_tpre_descr = + rxf_stats->rx_drops_no_tpre_descr; + drvs->rx_drops_too_many_frags = + rxf_stats->rx_drops_too_many_frags; + adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; +} + +static void populate_lancer_stats(struct be_adapter *adapter) +{ + + struct be_drv_stats *drvs = &adapter->drv_stats; + struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd + (adapter); + drvs->rx_priority_pause_frames = 0; + drvs->pmem_fifo_overflow_drop = 0; + drvs->rx_pause_frames = + make_64bit_val(pport_stats->rx_pause_frames_lo, + pport_stats->rx_pause_frames_hi); + drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi, + pport_stats->rx_crc_errors_lo); + drvs->rx_control_frames = + make_64bit_val(pport_stats->rx_control_frames_hi, + pport_stats->rx_control_frames_lo); + drvs->rx_in_range_errors = pport_stats->rx_in_range_errors; + drvs->rx_frame_too_long = + make_64bit_val(pport_stats->rx_internal_mac_errors_hi, + pport_stats->rx_frames_too_long_lo); + drvs->rx_dropped_runt = pport_stats->rx_dropped_runt; + drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors; + drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors; + drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors; + drvs->rx_dropped_tcp_length = + pport_stats->rx_dropped_invalid_tcp_length; + drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small; + drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short; + drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors; + drvs->rx_dropped_header_too_small = + pport_stats->rx_dropped_header_too_small; + drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow; + drvs->rx_address_match_errors = pport_stats->rx_address_match_errors; + drvs->rx_alignment_symbol_errors = + make_64bit_val(pport_stats->rx_symbol_errors_hi, + pport_stats->rx_symbol_errors_lo); + drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow; + drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi, + pport_stats->tx_pause_frames_lo); + drvs->tx_controlframes = + make_64bit_val(pport_stats->tx_control_frames_hi, + pport_stats->tx_control_frames_lo); + drvs->jabber_events = pport_stats->rx_jabbers; + drvs->rx_drops_no_pbuf = 0; + drvs->rx_drops_no_txpb = 0; + drvs->rx_drops_no_erx_descr = 0; + drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue; + drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi, + pport_stats->num_forwards_lo); + drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi, + pport_stats->rx_drops_mtu_lo); + drvs->rx_drops_no_tpre_descr = 0; + drvs->rx_drops_too_many_frags = + make_64bit_val(pport_stats->rx_drops_too_many_frags_hi, + pport_stats->rx_drops_too_many_frags_lo); +} + +void be_parse_stats(struct be_adapter *adapter) +{ + if (adapter->generation == BE_GEN3) { + if (lancer_chip(adapter)) + populate_lancer_stats(adapter); + else + populate_be3_stats(adapter); + } else { + populate_be2_stats(adapter); + } +} + void netdev_stats_update(struct be_adapter *adapter) { - struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va); - struct be_rxf_stats *rxf_stats = &hw_stats->rxf; - struct be_port_rxf_stats *port_stats = - &rxf_stats->port[adapter->port_num]; + struct be_drv_stats *drvs = &adapter->drv_stats; struct net_device_stats *dev_stats = &adapter->netdev->stats; - struct be_erx_stats *erx_stats = &hw_stats->erx; struct be_rx_obj *rxo; int i; @@ -260,43 +435,54 @@ void netdev_stats_update(struct be_adapter *adapter) dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes; dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts; /* no space in linux buffers: best possible approximation */ - dev_stats->rx_dropped += - erx_stats->rx_drops_no_fragments[rxo->q.id]; + if (adapter->generation == BE_GEN3) { + if (!(lancer_chip(adapter))) { + struct be_erx_stats_v1 *erx_stats = + be_erx_stats_from_cmd(adapter); + dev_stats->rx_dropped += + erx_stats->rx_drops_no_fragments[rxo->q.id]; + } + } else { + struct be_erx_stats_v0 *erx_stats = + be_erx_stats_from_cmd(adapter); + dev_stats->rx_dropped += + erx_stats->rx_drops_no_fragments[rxo->q.id]; + } } dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts; dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes; /* bad pkts received */ - dev_stats->rx_errors = port_stats->rx_crc_errors + - port_stats->rx_alignment_symbol_errors + - port_stats->rx_in_range_errors + - port_stats->rx_out_range_errors + - port_stats->rx_frame_too_long + - port_stats->rx_dropped_too_small + - port_stats->rx_dropped_too_short + - port_stats->rx_dropped_header_too_small + - port_stats->rx_dropped_tcp_length + - port_stats->rx_dropped_runt + - port_stats->rx_tcp_checksum_errs + - port_stats->rx_ip_checksum_errs + - port_stats->rx_udp_checksum_errs; + dev_stats->rx_errors = drvs->rx_crc_errors + + drvs->rx_alignment_symbol_errors + + drvs->rx_in_range_errors + + drvs->rx_out_range_errors + + drvs->rx_frame_too_long + + drvs->rx_dropped_too_small + + drvs->rx_dropped_too_short + + drvs->rx_dropped_header_too_small + + drvs->rx_dropped_tcp_length + + drvs->rx_dropped_runt + + drvs->rx_tcp_checksum_errs + + drvs->rx_ip_checksum_errs + + drvs->rx_udp_checksum_errs; /* detailed rx errors */ - dev_stats->rx_length_errors = port_stats->rx_in_range_errors + - port_stats->rx_out_range_errors + - port_stats->rx_frame_too_long; + dev_stats->rx_length_errors = drvs->rx_in_range_errors + + drvs->rx_out_range_errors + + drvs->rx_frame_too_long; - dev_stats->rx_crc_errors = port_stats->rx_crc_errors; + dev_stats->rx_crc_errors = drvs->rx_crc_errors; /* frame alignment errors */ - dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors; + dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors; /* receiver fifo overrun */ /* drops_no_pbuf is no per i/f, it's per BE card */ - dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow + - port_stats->rx_input_fifo_overflow + - rxf_stats->rx_drops_no_pbuf; + dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop + + drvs->rx_input_fifo_overflow_drop + + drvs->rx_drops_no_pbuf; } void be_link_status_update(struct be_adapter *adapter, bool link_up) @@ -307,11 +493,9 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up) if (adapter->link_up != link_up) { adapter->link_speed = -1; if (link_up) { - netif_start_queue(netdev); netif_carrier_on(netdev); printk(KERN_INFO "%s: Link up\n", netdev->name); } else { - netif_stop_queue(netdev); netif_carrier_off(netdev); printk(KERN_INFO "%s: Link down\n", netdev->name); } @@ -404,7 +588,8 @@ static void be_tx_stats_update(struct be_adapter *adapter, } /* Determine number of WRB entries needed to xmit data in an skb */ -static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) +static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb, + bool *dummy) { int cnt = (skb->len > skb->data_len); @@ -412,12 +597,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) /* to account for hdr wrb */ cnt++; - if (cnt & 1) { + if (lancer_chip(adapter) || !(cnt & 1)) { + *dummy = false; + } else { /* add a dummy to make it an even num */ cnt++; *dummy = true; - } else - *dummy = false; + } BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); return cnt; } @@ -443,8 +629,18 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, hdr, skb_shinfo(skb)->gso_size); - if (skb_is_gso_v6(skb)) + if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); + if (lancer_chip(adapter) && adapter->sli_family == + LANCER_A0_SLI_FAMILY) { + AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1); + if (is_tcp_pkt(skb)) + AMAP_SET_BITS(struct amap_eth_hdr_wrb, + tcpcs, hdr, 1); + else if (is_udp_pkt(skb)) + AMAP_SET_BITS(struct amap_eth_hdr_wrb, + udpcs, hdr, 1); + } } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (is_tcp_pkt(skb)) AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); @@ -469,7 +665,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); } -static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb, +static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, bool unmap_single) { dma_addr_t dma; @@ -479,11 +675,10 @@ static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb, dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; if (wrb->frag_len) { if (unmap_single) - pci_unmap_single(pdev, dma, wrb->frag_len, - PCI_DMA_TODEVICE); + dma_unmap_single(dev, dma, wrb->frag_len, + DMA_TO_DEVICE); else - pci_unmap_page(pdev, dma, wrb->frag_len, - PCI_DMA_TODEVICE); + dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE); } } @@ -492,7 +687,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, { dma_addr_t busaddr; int i, copied = 0; - struct pci_dev *pdev = adapter->pdev; + struct device *dev = &adapter->pdev->dev; struct sk_buff *first_skb = skb; struct be_queue_info *txq = &adapter->tx_obj.q; struct be_eth_wrb *wrb; @@ -506,9 +701,8 @@ static int make_tx_wrbs(struct be_adapter *adapter, if (skb->len > skb->data_len) { int len = skb_headlen(skb); - busaddr = pci_map_single(pdev, skb->data, len, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, busaddr)) + busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, busaddr)) goto dma_err; map_single = true; wrb = queue_head_node(txq); @@ -521,10 +715,9 @@ static int make_tx_wrbs(struct be_adapter *adapter, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; - busaddr = pci_map_page(pdev, frag->page, - frag->page_offset, - frag->size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, busaddr)) + busaddr = dma_map_page(dev, frag->page, frag->page_offset, + frag->size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, busaddr)) goto dma_err; wrb = queue_head_node(txq); wrb_fill(wrb, busaddr, frag->size); @@ -548,7 +741,7 @@ dma_err: txq->head = map_head; while (copied) { wrb = queue_head_node(txq); - unmap_tx_frag(pdev, wrb, map_single); + unmap_tx_frag(dev, wrb, map_single); map_single = false; copied -= wrb->frag_len; queue_head_inc(txq); @@ -566,7 +759,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, u32 start = txq->head; bool dummy_wrb, stopped = false; - wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); + wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); if (copied) { @@ -689,15 +882,15 @@ static void be_set_multicast_list(struct net_device *netdev) struct be_adapter *adapter = netdev_priv(netdev); if (netdev->flags & IFF_PROMISC) { - be_cmd_promiscuous_config(adapter, adapter->port_num, 1); + be_cmd_promiscuous_config(adapter, true); adapter->promiscuous = true; goto done; } - /* BE was previously in promiscous mode; disable it */ + /* BE was previously in promiscuous mode; disable it */ if (adapter->promiscuous) { adapter->promiscuous = false; - be_cmd_promiscuous_config(adapter, adapter->port_num, 0); + be_cmd_promiscuous_config(adapter, false); } /* Enable multicast promisc if num configured exceeds what we support */ @@ -728,11 +921,11 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) status = be_cmd_pmac_del(adapter, adapter->vf_cfg[vf].vf_if_handle, - adapter->vf_cfg[vf].vf_pmac_id); + adapter->vf_cfg[vf].vf_pmac_id, vf + 1); status = be_cmd_pmac_add(adapter, mac, adapter->vf_cfg[vf].vf_if_handle, - &adapter->vf_cfg[vf].vf_pmac_id); + &adapter->vf_cfg[vf].vf_pmac_id, vf + 1); if (status) dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", @@ -807,7 +1000,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev, rate = 10000; adapter->vf_cfg[vf].vf_tx_rate = rate; - status = be_cmd_set_qos(adapter, rate / 10, vf); + status = be_cmd_set_qos(adapter, rate / 10, vf + 1); if (status) dev_info(&adapter->pdev->dev, @@ -837,32 +1030,26 @@ static void be_rx_rate_update(struct be_rx_obj *rxo) } static void be_rx_stats_update(struct be_rx_obj *rxo, - u32 pktsize, u16 numfrags, u8 pkt_type) + struct be_rx_compl_info *rxcp) { struct be_rx_stats *stats = &rxo->stats; stats->rx_compl++; - stats->rx_frags += numfrags; - stats->rx_bytes += pktsize; + stats->rx_frags += rxcp->num_rcvd; + stats->rx_bytes += rxcp->pkt_size; stats->rx_pkts++; - if (pkt_type == BE_MULTICAST_PACKET) + if (rxcp->pkt_type == BE_MULTICAST_PACKET) stats->rx_mcast_pkts++; + if (rxcp->err) + stats->rxcp_err++; } -static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) +static inline bool csum_passed(struct be_rx_compl_info *rxcp) { - u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk; - - l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); - ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp); - ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp); - if (ip_version) { - tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); - udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp); - } - ipv6_chk = (ip_version && (tcpf || udpf)); - - return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true; + /* L4 checksum is not reliable for non TCP/UDP packets. + * Also ignore ipcksm for ipv6 pkts */ + return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum && + (rxcp->ip_csum || rxcp->ipv6); } static struct be_rx_page_info * @@ -877,8 +1064,9 @@ get_rx_page_info(struct be_adapter *adapter, BUG_ON(!rx_page_info->page); if (rx_page_info->last_page_user) { - pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus), - adapter->big_page_size, PCI_DMA_FROMDEVICE); + dma_unmap_page(&adapter->pdev->dev, + dma_unmap_addr(rx_page_info, bus), + adapter->big_page_size, DMA_FROM_DEVICE); rx_page_info->last_page_user = false; } @@ -889,20 +1077,17 @@ get_rx_page_info(struct be_adapter *adapter, /* Throwaway the data in the Rx completion */ static void be_rx_compl_discard(struct be_adapter *adapter, struct be_rx_obj *rxo, - struct be_eth_rx_compl *rxcp) + struct be_rx_compl_info *rxcp) { struct be_queue_info *rxq = &rxo->q; struct be_rx_page_info *page_info; - u16 rxq_idx, i, num_rcvd; - - rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); - num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); + u16 i, num_rcvd = rxcp->num_rcvd; for (i = 0; i < num_rcvd; i++) { - page_info = get_rx_page_info(adapter, rxo, rxq_idx); + page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); put_page(page_info->page); memset(page_info, 0, sizeof(*page_info)); - index_inc(&rxq_idx, rxq->len); + index_inc(&rxcp->rxq_idx, rxq->len); } } @@ -911,30 +1096,23 @@ static void be_rx_compl_discard(struct be_adapter *adapter, * indicated by rxcp. */ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, - struct sk_buff *skb, struct be_eth_rx_compl *rxcp, - u16 num_rcvd) + struct sk_buff *skb, struct be_rx_compl_info *rxcp) { struct be_queue_info *rxq = &rxo->q; struct be_rx_page_info *page_info; - u16 rxq_idx, i, j; - u32 pktsize, hdr_len, curr_frag_len, size; + u16 i, j; + u16 hdr_len, curr_frag_len, remaining; u8 *start; - u8 pkt_type; - - rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); - pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); - pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); - - page_info = get_rx_page_info(adapter, rxo, rxq_idx); + page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); start = page_address(page_info->page) + page_info->page_offset; prefetch(start); /* Copy data in the first descriptor of this completion */ - curr_frag_len = min(pktsize, rx_frag_size); + curr_frag_len = min(rxcp->pkt_size, rx_frag_size); /* Copy the header portion into skb_data */ - hdr_len = min((u32)BE_HDR_LEN, curr_frag_len); + hdr_len = min(BE_HDR_LEN, curr_frag_len); memcpy(skb->data, start, hdr_len); skb->len = curr_frag_len; if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */ @@ -953,19 +1131,17 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, } page_info->page = NULL; - if (pktsize <= rx_frag_size) { - BUG_ON(num_rcvd != 1); - goto done; + if (rxcp->pkt_size <= rx_frag_size) { + BUG_ON(rxcp->num_rcvd != 1); + return; } /* More frags present for this completion */ - size = pktsize; - for (i = 1, j = 0; i < num_rcvd; i++) { - size -= curr_frag_len; - index_inc(&rxq_idx, rxq->len); - page_info = get_rx_page_info(adapter, rxo, rxq_idx); - - curr_frag_len = min(size, rx_frag_size); + index_inc(&rxcp->rxq_idx, rxq->len); + remaining = rxcp->pkt_size - curr_frag_len; + for (i = 1, j = 0; i < rxcp->num_rcvd; i++) { + page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); + curr_frag_len = min(remaining, rx_frag_size); /* Coalesce all frags from the same physical page in one slot */ if (page_info->page_offset == 0) { @@ -984,30 +1160,22 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, skb->len += curr_frag_len; skb->data_len += curr_frag_len; + remaining -= curr_frag_len; + index_inc(&rxcp->rxq_idx, rxq->len); page_info->page = NULL; } BUG_ON(j > MAX_SKB_FRAGS); - -done: - be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type); } /* Process the RX completion indicated by rxcp when GRO is disabled */ static void be_rx_compl_process(struct be_adapter *adapter, struct be_rx_obj *rxo, - struct be_eth_rx_compl *rxcp) + struct be_rx_compl_info *rxcp) { + struct net_device *netdev = adapter->netdev; struct sk_buff *skb; - u32 vlanf, vid; - u16 num_rcvd; - u8 vtm; - num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); - /* Is it a flush compl that has no data */ - if (unlikely(num_rcvd == 0)) - return; - - skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN); + skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN); if (unlikely(!skb)) { if (net_ratelimit()) dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); @@ -1015,32 +1183,26 @@ static void be_rx_compl_process(struct be_adapter *adapter, return; } - skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd); + skb_fill_rx_data(adapter, rxo, skb, rxcp); - if (do_pkt_csum(rxcp, adapter->rx_csum)) - skb_checksum_none_assert(skb); - else + if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp))) skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); skb->truesize = skb->len + sizeof(struct sk_buff); - skb->protocol = eth_type_trans(skb, adapter->netdev); + skb->protocol = eth_type_trans(skb, netdev); + if (adapter->netdev->features & NETIF_F_RXHASH) + skb->rxhash = rxcp->rss_hash; - vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); - vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); - /* vlanf could be wrongly set in some cards. - * ignore if vtm is not set */ - if ((adapter->function_mode & 0x400) && !vtm) - vlanf = 0; - - if (unlikely(vlanf)) { + if (unlikely(rxcp->vlanf)) { if (!adapter->vlan_grp || adapter->vlans_added == 0) { kfree_skb(skb); return; } - vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); - vid = swab16(vid); - vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); + vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, + rxcp->vlan_tag); } else { netif_receive_skb(skb); } @@ -1049,32 +1211,14 @@ static void be_rx_compl_process(struct be_adapter *adapter, /* Process the RX completion indicated by rxcp when GRO is enabled */ static void be_rx_compl_process_gro(struct be_adapter *adapter, struct be_rx_obj *rxo, - struct be_eth_rx_compl *rxcp) + struct be_rx_compl_info *rxcp) { struct be_rx_page_info *page_info; struct sk_buff *skb = NULL; struct be_queue_info *rxq = &rxo->q; struct be_eq_obj *eq_obj = &rxo->rx_eq; - u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; - u16 i, rxq_idx = 0, vid, j; - u8 vtm; - u8 pkt_type; - - num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); - /* Is it a flush compl that has no data */ - if (unlikely(num_rcvd == 0)) - return; - - pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); - vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); - rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); - vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); - pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); - - /* vlanf could be wrongly set in some cards. - * ignore if vtm is not set */ - if ((adapter->function_mode & 0x400) && !vtm) - vlanf = 0; + u16 remaining, curr_frag_len; + u16 i, j; skb = napi_get_frags(&eq_obj->napi); if (!skb) { @@ -1082,9 +1226,9 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, return; } - remaining = pkt_size; - for (i = 0, j = -1; i < num_rcvd; i++) { - page_info = get_rx_page_info(adapter, rxo, rxq_idx); + remaining = rxcp->pkt_size; + for (i = 0, j = -1; i < rxcp->num_rcvd; i++) { + page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); curr_frag_len = min(remaining, rx_frag_size); @@ -1102,69 +1246,145 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, skb_shinfo(skb)->frags[j].size += curr_frag_len; remaining -= curr_frag_len; - index_inc(&rxq_idx, rxq->len); + index_inc(&rxcp->rxq_idx, rxq->len); memset(page_info, 0, sizeof(*page_info)); } BUG_ON(j > MAX_SKB_FRAGS); skb_shinfo(skb)->nr_frags = j + 1; - skb->len = pkt_size; - skb->data_len = pkt_size; - skb->truesize += pkt_size; + skb->len = rxcp->pkt_size; + skb->data_len = rxcp->pkt_size; + skb->truesize += rxcp->pkt_size; skb->ip_summed = CHECKSUM_UNNECESSARY; + if (adapter->netdev->features & NETIF_F_RXHASH) + skb->rxhash = rxcp->rss_hash; - if (likely(!vlanf)) { + if (likely(!rxcp->vlanf)) napi_gro_frags(&eq_obj->napi); - } else { - vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); - vid = swab16(vid); + else + vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, + rxcp->vlan_tag); +} + +static void be_parse_rx_compl_v1(struct be_adapter *adapter, + struct be_eth_rx_compl *compl, + struct be_rx_compl_info *rxcp) +{ + rxcp->pkt_size = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl); + rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl); + rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl); + rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl); + rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl); + rxcp->ip_csum = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl); + rxcp->l4_csum = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl); + rxcp->ipv6 = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl); + rxcp->rxq_idx = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl); + rxcp->num_rcvd = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl); + rxcp->pkt_type = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); + rxcp->rss_hash = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp); + if (rxcp->vlanf) { + rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, + compl); + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, + compl); + } +} + +static void be_parse_rx_compl_v0(struct be_adapter *adapter, + struct be_eth_rx_compl *compl, + struct be_rx_compl_info *rxcp) +{ + rxcp->pkt_size = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl); + rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl); + rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl); + rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl); + rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl); + rxcp->ip_csum = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl); + rxcp->l4_csum = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl); + rxcp->ipv6 = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl); + rxcp->rxq_idx = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl); + rxcp->num_rcvd = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl); + rxcp->pkt_type = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); + rxcp->rss_hash = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp); + if (rxcp->vlanf) { + rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, + compl); + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, + compl); + } +} + +static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) +{ + struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq); + struct be_rx_compl_info *rxcp = &rxo->rxcp; + struct be_adapter *adapter = rxo->adapter; - if (!adapter->vlan_grp || adapter->vlans_added == 0) - return; + /* For checking the valid bit it is Ok to use either definition as the + * valid bit is at the same position in both v0 and v1 Rx compl */ + if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0) + return NULL; - vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); - } + rmb(); + be_dws_le_to_cpu(compl, sizeof(*compl)); - be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type); -} + if (adapter->be3_native) + be_parse_rx_compl_v1(adapter, compl, rxcp); + else + be_parse_rx_compl_v0(adapter, compl, rxcp); -static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo) -{ - struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq); + if (rxcp->vlanf) { + /* vlanf could be wrongly set in some cards. + * ignore if vtm is not set */ + if ((adapter->function_mode & 0x400) && !rxcp->vtm) + rxcp->vlanf = 0; - if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) - return NULL; + if (!lancer_chip(adapter)) + rxcp->vlan_tag = swab16(rxcp->vlan_tag); - rmb(); - be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); + if (((adapter->pvid & VLAN_VID_MASK) == + (rxcp->vlan_tag & VLAN_VID_MASK)) && + !adapter->vlan_tag[rxcp->vlan_tag]) + rxcp->vlanf = 0; + } + + /* As the compl has been parsed, reset it; we wont touch it again */ + compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0; queue_tail_inc(&rxo->cq); return rxcp; } -/* To reset the valid bit, we need to reset the whole word as - * when walking the queue the valid entries are little-endian - * and invalid entries are host endian - */ -static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp) +static inline struct page *be_alloc_pages(u32 size, gfp_t gfp) { - rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0; -} - -static inline struct page *be_alloc_pages(u32 size) -{ - gfp_t alloc_flags = GFP_ATOMIC; u32 order = get_order(size); + if (order > 0) - alloc_flags |= __GFP_COMP; - return alloc_pages(alloc_flags, order); + gfp |= __GFP_COMP; + return alloc_pages(gfp, order); } /* * Allocate a page, split it to fragments of size rx_frag_size and post as * receive buffers to BE */ -static void be_post_rx_frags(struct be_rx_obj *rxo) +static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) { struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl; @@ -1178,14 +1398,14 @@ static void be_post_rx_frags(struct be_rx_obj *rxo) page_info = &rxo->page_info_tbl[rxq->head]; for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { if (!pagep) { - pagep = be_alloc_pages(adapter->big_page_size); + pagep = be_alloc_pages(adapter->big_page_size, gfp); if (unlikely(!pagep)) { rxo->stats.rx_post_fail++; break; } - page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, - adapter->big_page_size, - PCI_DMA_FROMDEVICE); + page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep, + 0, adapter->big_page_size, + DMA_FROM_DEVICE); page_info->page_offset = 0; } else { get_page(pagep); @@ -1239,7 +1459,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) return txcp; } -static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) +static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index) { struct be_queue_info *txq = &adapter->tx_obj.q; struct be_eth_wrb *wrb; @@ -1258,17 +1478,16 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) do { cur_index = txq->tail; wrb = queue_tail_node(txq); - unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr && - skb_headlen(sent_skb))); + unmap_tx_frag(&adapter->pdev->dev, wrb, + (unmap_skb_hdr && skb_headlen(sent_skb))); unmap_skb_hdr = false; num_wrbs++; queue_tail_inc(txq); } while (cur_index != last_index); - atomic_sub(num_wrbs, &txq->used); - kfree_skb(sent_skb); + return num_wrbs; } static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj) @@ -1327,14 +1546,13 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo) struct be_rx_page_info *page_info; struct be_queue_info *rxq = &rxo->q; struct be_queue_info *rx_cq = &rxo->cq; - struct be_eth_rx_compl *rxcp; + struct be_rx_compl_info *rxcp; u16 tail; /* First cleanup pending rx completions */ while ((rxcp = be_rx_compl_get(rxo)) != NULL) { be_rx_compl_discard(adapter, rxo, rxcp); - be_rx_compl_reset(rxcp); - be_cq_notify(adapter, rx_cq->id, true, 1); + be_cq_notify(adapter, rx_cq->id, false, 1); } /* Then free posted rx buffer that were not used */ @@ -1352,7 +1570,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter) struct be_queue_info *tx_cq = &adapter->tx_obj.cq; struct be_queue_info *txq = &adapter->tx_obj.q; struct be_eth_tx_compl *txcp; - u16 end_idx, cmpl = 0, timeo = 0; + u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0; struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; struct sk_buff *sent_skb; bool dummy_wrb; @@ -1362,12 +1580,14 @@ static void be_tx_compl_clean(struct be_adapter *adapter) while ((txcp = be_tx_compl_get(tx_cq))) { end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, wrb_index, txcp); - be_tx_compl_process(adapter, end_idx); + num_wrbs += be_tx_compl_process(adapter, end_idx); cmpl++; } if (cmpl) { be_cq_notify(adapter, tx_cq->id, false, cmpl); + atomic_sub(num_wrbs, &txq->used); cmpl = 0; + num_wrbs = 0; } if (atomic_read(&txq->used) == 0 || ++timeo > 200) @@ -1385,8 +1605,10 @@ static void be_tx_compl_clean(struct be_adapter *adapter) sent_skb = sent_skbs[txq->tail]; end_idx = txq->tail; index_adv(&end_idx, - wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len); - be_tx_compl_process(adapter, end_idx); + wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1, + txq->len); + num_wrbs = be_tx_compl_process(adapter, end_idx); + atomic_sub(num_wrbs, &txq->used); } } @@ -1480,7 +1702,9 @@ static int be_tx_queues_create(struct be_adapter *adapter) /* Ask BE to create Tx Event queue */ if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) goto tx_eq_free; - adapter->base_eq_id = adapter->tx_eq.q.id; + + adapter->tx_eq.eq_idx = adapter->eq_next_idx++; + /* Alloc TX eth compl queue */ cq = &adapter->tx_obj.cq; @@ -1549,12 +1773,31 @@ static void be_rx_queues_destroy(struct be_adapter *adapter) } } +static u32 be_num_rxqs_want(struct be_adapter *adapter) +{ + if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) && + !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) { + return 1 + MAX_RSS_QS; /* one default non-RSS queue */ + } else { + dev_warn(&adapter->pdev->dev, + "No support for multiple RX queues\n"); + return 1; + } +} + static int be_rx_queues_create(struct be_adapter *adapter) { struct be_queue_info *eq, *q, *cq; struct be_rx_obj *rxo; int rc, i; + adapter->num_rx_qs = min(be_num_rxqs_want(adapter), + msix_enabled(adapter) ? + adapter->num_msix_vec - 1 : 1); + if (adapter->num_rx_qs != MAX_RX_QS) + dev_warn(&adapter->pdev->dev, + "Can create only %d RX queues", adapter->num_rx_qs); + adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; for_all_rx_queues(adapter, rxo, i) { rxo->adapter = adapter; @@ -1572,6 +1815,8 @@ static int be_rx_queues_create(struct be_adapter *adapter) if (rc) goto err; + rxo->rx_eq.eq_idx = adapter->eq_next_idx++; + /* CQ */ cq = &rxo->cq; rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, @@ -1582,7 +1827,6 @@ static int be_rx_queues_create(struct be_adapter *adapter) rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); if (rc) goto err; - /* Rx Q */ q = &rxo->q; rc = be_queue_alloc(adapter, q, RX_Q_LEN, @@ -1615,29 +1859,45 @@ err: return -1; } -/* There are 8 evt ids per func. Retruns the evt id's bit number */ -static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id) +static bool event_peek(struct be_eq_obj *eq_obj) { - return eq_id - adapter->base_eq_id; + struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q); + if (!eqe->evt) + return false; + else + return true; } static irqreturn_t be_intx(int irq, void *dev) { struct be_adapter *adapter = dev; struct be_rx_obj *rxo; - int isr, i; + int isr, i, tx = 0 , rx = 0; + + if (lancer_chip(adapter)) { + if (event_peek(&adapter->tx_eq)) + tx = event_handle(adapter, &adapter->tx_eq); + for_all_rx_queues(adapter, rxo, i) { + if (event_peek(&rxo->rx_eq)) + rx |= event_handle(adapter, &rxo->rx_eq); + } - isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + - (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE); - if (!isr) - return IRQ_NONE; + if (!(tx || rx)) + return IRQ_NONE; - if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr)) - event_handle(adapter, &adapter->tx_eq); + } else { + isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + + (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE); + if (!isr) + return IRQ_NONE; - for_all_rx_queues(adapter, rxo, i) { - if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr)) - event_handle(adapter, &rxo->rx_eq); + if ((1 << adapter->tx_eq.eq_idx & isr)) + event_handle(adapter, &adapter->tx_eq); + + for_all_rx_queues(adapter, rxo, i) { + if ((1 << rxo->rx_eq.eq_idx & isr)) + event_handle(adapter, &rxo->rx_eq); + } } return IRQ_HANDLED; @@ -1662,25 +1922,18 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev) return IRQ_HANDLED; } -static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo, - struct be_eth_rx_compl *rxcp) +static inline bool do_gro(struct be_rx_compl_info *rxcp) { - int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); - int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); - - if (err) - rxo->stats.rxcp_err++; - - return (tcp_frame && !err) ? true : false; + return (rxcp->tcpf && !rxcp->err) ? true : false; } -int be_poll_rx(struct napi_struct *napi, int budget) +static int be_poll_rx(struct napi_struct *napi, int budget) { struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi); struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq); struct be_adapter *adapter = rxo->adapter; struct be_queue_info *rx_cq = &rxo->cq; - struct be_eth_rx_compl *rxcp; + struct be_rx_compl_info *rxcp; u32 work_done; rxo->stats.rx_polls++; @@ -1689,17 +1942,22 @@ int be_poll_rx(struct napi_struct *napi, int budget) if (!rxcp) break; - if (do_gro(adapter, rxo, rxcp)) - be_rx_compl_process_gro(adapter, rxo, rxcp); - else - be_rx_compl_process(adapter, rxo, rxcp); + /* Ignore flush completions */ + if (rxcp->num_rcvd && rxcp->pkt_size) { + if (do_gro(rxcp)) + be_rx_compl_process_gro(adapter, rxo, rxcp); + else + be_rx_compl_process(adapter, rxo, rxcp); + } else if (rxcp->pkt_size == 0) { + be_rx_compl_discard(adapter, rxo, rxcp); + } - be_rx_compl_reset(rxcp); + be_rx_stats_update(rxo, rxcp); } /* Refill the queue */ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) - be_post_rx_frags(rxo); + be_post_rx_frags(rxo, GFP_ATOMIC); /* All consumed */ if (work_done < budget) { @@ -1724,12 +1982,12 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget) struct be_queue_info *tx_cq = &adapter->tx_obj.cq; struct be_eth_tx_compl *txcp; int tx_compl = 0, mcc_compl, status = 0; - u16 end_idx; + u16 end_idx, num_wrbs = 0; while ((txcp = be_tx_compl_get(tx_cq))) { end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, wrb_index, txcp); - be_tx_compl_process(adapter, end_idx); + num_wrbs += be_tx_compl_process(adapter, end_idx); tx_compl++; } @@ -1745,6 +2003,8 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget) if (tx_compl) { be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl); + atomic_sub(num_wrbs, &txq->used); + /* As Tx wrbs have been freed up, wake up netdev queue if * it was stopped due to lack of tx wrbs. */ @@ -1779,6 +2039,7 @@ void be_detect_dump_ue(struct be_adapter *adapter) if (ue_status_lo || ue_status_hi) { adapter->ue_detected = true; + adapter->eeh_err = true; dev_err(&adapter->pdev->dev, "UE Detected!!\n"); } @@ -1806,9 +2067,31 @@ static void be_worker(struct work_struct *work) struct be_rx_obj *rxo; int i; - if (!adapter->stats_ioctl_sent) - be_cmd_get_stats(adapter, &adapter->stats_cmd); + if (!adapter->ue_detected && !lancer_chip(adapter)) + be_detect_dump_ue(adapter); + + /* when interrupts are not yet enabled, just reap any pending + * mcc completions */ + if (!netif_running(adapter->netdev)) { + int mcc_compl, status = 0; + + mcc_compl = be_process_mcc(adapter, &status); + + if (mcc_compl) { + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; + be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); + } + + goto reschedule; + } + if (!adapter->stats_cmd_sent) { + if (lancer_chip(adapter)) + lancer_cmd_get_pport_stats(adapter, + &adapter->stats_cmd); + else + be_cmd_get_stats(adapter, &adapter->stats_cmd); + } be_tx_rate_update(adapter); for_all_rx_queues(adapter, rxo, i) { @@ -1817,63 +2100,46 @@ static void be_worker(struct work_struct *work) if (rxo->rx_post_starved) { rxo->rx_post_starved = false; - be_post_rx_frags(rxo); + be_post_rx_frags(rxo, GFP_KERNEL); } } - if (!adapter->ue_detected) - be_detect_dump_ue(adapter); - +reschedule: + adapter->work_counter++; schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); } static void be_msix_disable(struct be_adapter *adapter) { - if (adapter->msix_enabled) { + if (msix_enabled(adapter)) { pci_disable_msix(adapter->pdev); - adapter->msix_enabled = false; - } -} - -static int be_num_rxqs_get(struct be_adapter *adapter) -{ - if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) && - !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) { - return 1 + MAX_RSS_QS; /* one default non-RSS queue */ - } else { - dev_warn(&adapter->pdev->dev, - "No support for multiple RX queues\n"); - return 1; + adapter->num_msix_vec = 0; } } static void be_msix_enable(struct be_adapter *adapter) { #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */ - int i, status; + int i, status, num_vec; - adapter->num_rx_qs = be_num_rxqs_get(adapter); + num_vec = be_num_rxqs_want(adapter) + 1; - for (i = 0; i < (adapter->num_rx_qs + 1); i++) + for (i = 0; i < num_vec; i++) adapter->msix_entries[i].entry = i; - status = pci_enable_msix(adapter->pdev, adapter->msix_entries, - adapter->num_rx_qs + 1); + status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec); if (status == 0) { goto done; } else if (status >= BE_MIN_MSIX_VECTORS) { + num_vec = status; if (pci_enable_msix(adapter->pdev, adapter->msix_entries, - status) == 0) { - adapter->num_rx_qs = status - 1; - dev_warn(&adapter->pdev->dev, - "Could alloc only %d MSIx vectors. " - "Using %d RX Qs\n", status, adapter->num_rx_qs); + num_vec) == 0) goto done; - } } return; done: - adapter->msix_enabled = true; + adapter->num_msix_vec = num_vec; + return; } static void be_sriov_enable(struct be_adapter *adapter) @@ -1881,7 +2147,20 @@ static void be_sriov_enable(struct be_adapter *adapter) be_check_sriov_fn_type(adapter); #ifdef CONFIG_PCI_IOV if (be_physfn(adapter) && num_vfs) { - int status; + int status, pos; + u16 nvfs; + + pos = pci_find_ext_capability(adapter->pdev, + PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(adapter->pdev, + pos + PCI_SRIOV_TOTAL_VF, &nvfs); + + if (num_vfs > nvfs) { + dev_info(&adapter->pdev->dev, + "Device supports %d VFs and not %d\n", + nvfs, num_vfs); + num_vfs = nvfs; + } status = pci_enable_sriov(adapter->pdev, num_vfs); adapter->sriov_enabled = status ? false : true; @@ -1899,10 +2178,10 @@ static void be_sriov_disable(struct be_adapter *adapter) #endif } -static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) +static inline int be_msix_vec_get(struct be_adapter *adapter, + struct be_eq_obj *eq_obj) { - return adapter->msix_entries[ - be_evt_bit_get(adapter, eq_id)].vector; + return adapter->msix_entries[eq_obj->eq_idx].vector; } static int be_request_irq(struct be_adapter *adapter, @@ -1913,14 +2192,14 @@ static int be_request_irq(struct be_adapter *adapter, int vec; sprintf(eq_obj->desc, "%s-%s", netdev->name, desc); - vec = be_msix_vec_get(adapter, eq_obj->q.id); + vec = be_msix_vec_get(adapter, eq_obj); return request_irq(vec, handler, 0, eq_obj->desc, context); } static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj, void *context) { - int vec = be_msix_vec_get(adapter, eq_obj->q.id); + int vec = be_msix_vec_get(adapter, eq_obj); free_irq(vec, context); } @@ -1954,8 +2233,7 @@ err_msix: err: dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n", status); - pci_disable_msix(adapter->pdev); - adapter->msix_enabled = false; + be_msix_disable(adapter); return status; } @@ -1964,7 +2242,7 @@ static int be_irq_register(struct be_adapter *adapter) struct net_device *netdev = adapter->netdev; int status; - if (adapter->msix_enabled) { + if (msix_enabled(adapter)) { status = be_msix_register(adapter); if (status == 0) goto done; @@ -1997,7 +2275,7 @@ static void be_irq_unregister(struct be_adapter *adapter) return; /* INTx */ - if (!adapter->msix_enabled) { + if (!msix_enabled(adapter)) { free_irq(netdev->irq, adapter); goto done; } @@ -2019,22 +2297,32 @@ static int be_close(struct net_device *netdev) struct be_eq_obj *tx_eq = &adapter->tx_eq; int vec, i; - cancel_delayed_work_sync(&adapter->work); - be_async_mcc_disable(adapter); - netif_stop_queue(netdev); netif_carrier_off(netdev); adapter->link_up = false; - be_intr_set(adapter, false); + if (!lancer_chip(adapter)) + be_intr_set(adapter, false); - if (adapter->msix_enabled) { - vec = be_msix_vec_get(adapter, tx_eq->q.id); + for_all_rx_queues(adapter, rxo, i) + napi_disable(&rxo->rx_eq.napi); + + napi_disable(&tx_eq->napi); + + if (lancer_chip(adapter)) { + be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0); + be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); + for_all_rx_queues(adapter, rxo, i) + be_cq_notify(adapter, rxo->cq.id, false, 0); + } + + if (msix_enabled(adapter)) { + vec = be_msix_vec_get(adapter, tx_eq); synchronize_irq(vec); for_all_rx_queues(adapter, rxo, i) { - vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id); + vec = be_msix_vec_get(adapter, &rxo->rx_eq); synchronize_irq(vec); } } else { @@ -2042,11 +2330,6 @@ static int be_close(struct net_device *netdev) } be_irq_unregister(adapter); - for_all_rx_queues(adapter, rxo, i) - napi_disable(&rxo->rx_eq.napi); - - napi_disable(&tx_eq->napi); - /* Wait for all pending tx completions to arrive so that * all tx skbs are freed. */ @@ -2066,14 +2349,15 @@ static int be_open(struct net_device *netdev) u16 link_speed; for_all_rx_queues(adapter, rxo, i) { - be_post_rx_frags(rxo); + be_post_rx_frags(rxo, GFP_KERNEL); napi_enable(&rxo->rx_eq.napi); } napi_enable(&tx_eq->napi); be_irq_register(adapter); - be_intr_set(adapter, true); + if (!lancer_chip(adapter)) + be_intr_set(adapter, true); /* The evt queues are created in unarmed state; arm them */ for_all_rx_queues(adapter, rxo, i) { @@ -2085,10 +2369,8 @@ static int be_open(struct net_device *netdev) /* Now that interrupts are on we can process async mcc */ be_async_mcc_enable(adapter); - schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); - status = be_cmd_link_status_query(adapter, &link_up, &mac_speed, - &link_speed); + &link_speed, 0); if (status) goto err; be_link_status_update(adapter, link_up); @@ -2119,7 +2401,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) memset(mac, 0, ETH_ALEN); cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_KERNEL); if (cmd.va == NULL) return -1; memset(cmd.va, 0, cmd.size); @@ -2130,8 +2413,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) if (status) { dev_err(&adapter->pdev->dev, "Could not enable Wake-on-lan\n"); - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, - cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } status = be_cmd_enable_magic_wol(adapter, @@ -2144,7 +2427,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) pci_enable_wake(adapter->pdev, PCI_D3cold, 0); } - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); return status; } @@ -2165,7 +2448,8 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter) for (vf = 0; vf < num_vfs; vf++) { status = be_cmd_pmac_add(adapter, mac, adapter->vf_cfg[vf].vf_if_handle, - &adapter->vf_cfg[vf].vf_pmac_id); + &adapter->vf_cfg[vf].vf_pmac_id, + vf + 1); if (status) dev_err(&adapter->pdev->dev, "Mac address add failed for VF %d\n", vf); @@ -2185,7 +2469,7 @@ static inline void be_vf_eth_addr_rem(struct be_adapter *adapter) if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) be_cmd_pmac_del(adapter, adapter->vf_cfg[vf].vf_if_handle, - adapter->vf_cfg[vf].vf_pmac_id); + adapter->vf_cfg[vf].vf_pmac_id, vf + 1); } } @@ -2196,7 +2480,9 @@ static int be_setup(struct be_adapter *adapter) int status; u8 mac[ETH_ALEN]; - cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST; + cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | + BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_MULTICAST; if (be_physfn(adapter)) { cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS | @@ -2204,7 +2490,7 @@ static int be_setup(struct be_adapter *adapter) BE_IF_FLAGS_PASS_L3L4_ERRORS; en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS; - if (be_multi_rxq(adapter)) { + if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) { cap_flags |= BE_IF_FLAGS_RSS; en_flags |= BE_IF_FLAGS_RSS; } @@ -2217,22 +2503,26 @@ static int be_setup(struct be_adapter *adapter) goto do_none; if (be_physfn(adapter)) { - while (vf < num_vfs) { - cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED - | BE_IF_FLAGS_BROADCAST; - status = be_cmd_if_create(adapter, cap_flags, en_flags, - mac, true, + if (adapter->sriov_enabled) { + while (vf < num_vfs) { + cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | + BE_IF_FLAGS_BROADCAST; + status = be_cmd_if_create(adapter, cap_flags, + en_flags, mac, true, &adapter->vf_cfg[vf].vf_if_handle, NULL, vf+1); - if (status) { - dev_err(&adapter->pdev->dev, - "Interface Create failed for VF %d\n", vf); - goto if_destroy; + if (status) { + dev_err(&adapter->pdev->dev, + "Interface Create failed for VF %d\n", + vf); + goto if_destroy; + } + adapter->vf_cfg[vf].vf_pmac_id = + BE_INVALID_PMAC_ID; + vf++; } - adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID; - vf++; } - } else if (!be_physfn(adapter)) { + } else { status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); if (!status) { @@ -2253,44 +2543,46 @@ static int be_setup(struct be_adapter *adapter) if (status != 0) goto rx_qs_destroy; - if (be_physfn(adapter)) { - status = be_vf_eth_addr_config(adapter); - if (status) - goto mcc_q_destroy; - } - adapter->link_speed = -1; return 0; -mcc_q_destroy: - if (be_physfn(adapter)) - be_vf_eth_addr_rem(adapter); - be_mcc_queues_destroy(adapter); rx_qs_destroy: be_rx_queues_destroy(adapter); tx_qs_destroy: be_tx_queues_destroy(adapter); if_destroy: - for (vf = 0; vf < num_vfs; vf++) - if (adapter->vf_cfg[vf].vf_if_handle) - be_cmd_if_destroy(adapter, - adapter->vf_cfg[vf].vf_if_handle); - be_cmd_if_destroy(adapter, adapter->if_handle); + if (be_physfn(adapter) && adapter->sriov_enabled) + for (vf = 0; vf < num_vfs; vf++) + if (adapter->vf_cfg[vf].vf_if_handle) + be_cmd_if_destroy(adapter, + adapter->vf_cfg[vf].vf_if_handle, + vf + 1); + be_cmd_if_destroy(adapter, adapter->if_handle, 0); do_none: return status; } static int be_clear(struct be_adapter *adapter) { - if (be_physfn(adapter)) + int vf; + + if (be_physfn(adapter) && adapter->sriov_enabled) be_vf_eth_addr_rem(adapter); be_mcc_queues_destroy(adapter); be_rx_queues_destroy(adapter); be_tx_queues_destroy(adapter); + adapter->eq_next_idx = 0; + + if (be_physfn(adapter) && adapter->sriov_enabled) + for (vf = 0; vf < num_vfs; vf++) + if (adapter->vf_cfg[vf].vf_if_handle) + be_cmd_if_destroy(adapter, + adapter->vf_cfg[vf].vf_if_handle, + vf + 1); - be_cmd_if_destroy(adapter, adapter->if_handle); + be_cmd_if_destroy(adapter, adapter->if_handle, 0); /* tell fw we're done with firing cmds */ be_cmd_fw_clean(adapter); @@ -2299,9 +2591,6 @@ static int be_clear(struct be_adapter *adapter) #define FW_FILE_HDR_SIGN "ServerEngines Corp. " -char flash_cookie[2][16] = {"*** SE FLAS", - "H DIRECTORY *** "}; - static bool be_flash_redboot(struct be_adapter *adapter, const u8 *p, u32 img_start, int image_size, int hdr_size) @@ -2339,10 +2628,10 @@ static int be_flash_data(struct be_adapter *adapter, int num_bytes; const u8 *p = fw->data; struct be_cmd_write_flashrom *req = flash_cmd->va; - struct flash_comp *pflashcomp; + const struct flash_comp *pflashcomp; int num_comp; - struct flash_comp gen3_flash_types[9] = { + static const struct flash_comp gen3_flash_types[9] = { { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE, FLASH_IMAGE_MAX_SIZE_g3}, { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT, @@ -2362,7 +2651,7 @@ static int be_flash_data(struct be_adapter *adapter, { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW, FLASH_NCSI_IMAGE_MAX_SIZE_g3} }; - struct flash_comp gen2_flash_types[8] = { + static const struct flash_comp gen2_flash_types[8] = { { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE, FLASH_IMAGE_MAX_SIZE_g2}, { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT, @@ -2384,11 +2673,11 @@ static int be_flash_data(struct be_adapter *adapter, if (adapter->generation == BE_GEN3) { pflashcomp = gen3_flash_types; filehdr_size = sizeof(struct flash_file_hdr_g3); - num_comp = 9; + num_comp = ARRAY_SIZE(gen3_flash_types); } else { pflashcomp = gen2_flash_types; filehdr_size = sizeof(struct flash_file_hdr_g2); - num_comp = 8; + num_comp = ARRAY_SIZE(gen2_flash_types); } for (i = 0; i < num_comp; i++) { if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) && @@ -2396,8 +2685,8 @@ static int be_flash_data(struct be_adapter *adapter, continue; if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) && (!be_flash_redboot(adapter, fw->data, - pflashcomp[i].offset, pflashcomp[i].size, - filehdr_size))) + pflashcomp[i].offset, pflashcomp[i].size, filehdr_size + + (num_of_images * sizeof(struct image_hdr))))) continue; p = fw->data; p += filehdr_size + pflashcomp[i].offset @@ -2425,7 +2714,6 @@ static int be_flash_data(struct be_adapter *adapter, "cmd to write to flash rom failed.\n"); return -1; } - yield(); } } return 0; @@ -2443,35 +2731,105 @@ static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr) return 0; } -int be_load_fw(struct be_adapter *adapter, u8 *func) +static int lancer_fw_download(struct be_adapter *adapter, + const struct firmware *fw) +{ +#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) +#define LANCER_FW_DOWNLOAD_LOCATION "/prg" + struct be_dma_mem flash_cmd; + const u8 *data_ptr = NULL; + u8 *dest_image_ptr = NULL; + size_t image_size = 0; + u32 chunk_size = 0; + u32 data_written = 0; + u32 offset = 0; + int status = 0; + u8 add_status = 0; + + if (!IS_ALIGNED(fw->size, sizeof(u32))) { + dev_err(&adapter->pdev->dev, + "FW Image not properly aligned. " + "Length must be 4 byte aligned.\n"); + status = -EINVAL; + goto lancer_fw_exit; + } + + flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) + + LANCER_FW_DOWNLOAD_CHUNK; + flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, + &flash_cmd.dma, GFP_KERNEL); + if (!flash_cmd.va) { + status = -ENOMEM; + dev_err(&adapter->pdev->dev, + "Memory allocation failure while flashing\n"); + goto lancer_fw_exit; + } + + dest_image_ptr = flash_cmd.va + + sizeof(struct lancer_cmd_req_write_object); + image_size = fw->size; + data_ptr = fw->data; + + while (image_size) { + chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK); + + /* Copy the image chunk content. */ + memcpy(dest_image_ptr, data_ptr, chunk_size); + + status = lancer_cmd_write_object(adapter, &flash_cmd, + chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION, + &data_written, &add_status); + + if (status) + break; + + offset += data_written; + data_ptr += data_written; + image_size -= data_written; + } + + if (!status) { + /* Commit the FW written */ + status = lancer_cmd_write_object(adapter, &flash_cmd, + 0, offset, LANCER_FW_DOWNLOAD_LOCATION, + &data_written, &add_status); + } + + dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, + flash_cmd.dma); + if (status) { + dev_err(&adapter->pdev->dev, + "Firmware load error. " + "Status code: 0x%x Additional Status: 0x%x\n", + status, add_status); + goto lancer_fw_exit; + } + + dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); +lancer_fw_exit: + return status; +} + +static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) { - char fw_file[ETHTOOL_FLASH_MAX_FILENAME]; - const struct firmware *fw; struct flash_file_hdr_g2 *fhdr; struct flash_file_hdr_g3 *fhdr3; struct image_hdr *img_hdr_ptr = NULL; struct be_dma_mem flash_cmd; - int status, i = 0, num_imgs = 0; const u8 *p; - - strcpy(fw_file, func); - - status = request_firmware(&fw, fw_file, &adapter->pdev->dev); - if (status) - goto fw_exit; + int status = 0, i = 0, num_imgs = 0; p = fw->data; fhdr = (struct flash_file_hdr_g2 *) p; - dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; - flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size, - &flash_cmd.dma); + flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, + &flash_cmd.dma, GFP_KERNEL); if (!flash_cmd.va) { status = -ENOMEM; dev_err(&adapter->pdev->dev, "Memory allocation failure while flashing\n"); - goto fw_exit; + goto be_fw_exit; } if ((adapter->generation == BE_GEN3) && @@ -2495,15 +2853,41 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) status = -1; } - pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va, - flash_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, + flash_cmd.dma); if (status) { dev_err(&adapter->pdev->dev, "Firmware load error\n"); - goto fw_exit; + goto be_fw_exit; } dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); +be_fw_exit: + return status; +} + +int be_load_fw(struct be_adapter *adapter, u8 *fw_file) +{ + const struct firmware *fw; + int status; + + if (!netif_running(adapter->netdev)) { + dev_err(&adapter->pdev->dev, + "Firmware load not allowed (interface is down)\n"); + return -1; + } + + status = request_firmware(&fw, fw_file, &adapter->pdev->dev); + if (status) + goto fw_exit; + + dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); + + if (lancer_chip(adapter)) + status = lancer_fw_download(adapter, fw); + else + status = be_fw_download(adapter, fw); + fw_exit: release_firmware(fw); return status; @@ -2532,15 +2916,22 @@ static void be_netdev_init(struct net_device *netdev) struct be_rx_obj *rxo; int i; - netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | - NETIF_F_GRO | NETIF_F_TSO6; + netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_TX; + if (be_multi_rxq(adapter)) + netdev->hw_features |= NETIF_F_RXHASH; - netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; + netdev->features |= netdev->hw_features | + NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; - netdev->flags |= IFF_MULTICAST; + netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + + if (lancer_chip(adapter)) + netdev->vlan_features |= NETIF_F_TSO6; - adapter->rx_csum = true; + netdev->flags |= IFF_MULTICAST; /* Default settings for Rx and Tx flow control */ adapter->rx_fc = true; @@ -2558,9 +2949,6 @@ static void be_netdev_init(struct net_device *netdev) netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, BE_NAPI_WEIGHT); - - netif_carrier_off(netdev); - netif_stop_queue(netdev); } static void be_unmap_pci_bars(struct be_adapter *adapter) @@ -2578,6 +2966,15 @@ static int be_map_pci_bars(struct be_adapter *adapter) u8 __iomem *addr; int pcicfg_reg, db_reg; + if (lancer_chip(adapter)) { + addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0), + pci_resource_len(adapter->pdev, 0)); + if (addr == NULL) + return -ENOMEM; + adapter->db = addr; + return 0; + } + if (be_physfn(adapter)) { addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), pci_resource_len(adapter->pdev, 2)); @@ -2626,13 +3023,13 @@ static void be_ctrl_cleanup(struct be_adapter *adapter) be_unmap_pci_bars(adapter); if (mem->va) - pci_free_consistent(adapter->pdev, mem->size, - mem->va, mem->dma); + dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, + mem->dma); mem = &adapter->mc_cmd_mem; if (mem->va) - pci_free_consistent(adapter->pdev, mem->size, - mem->va, mem->dma); + dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, + mem->dma); } static int be_ctrl_init(struct be_adapter *adapter) @@ -2647,8 +3044,10 @@ static int be_ctrl_init(struct be_adapter *adapter) goto done; mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; - mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, - mbox_mem_alloc->size, &mbox_mem_alloc->dma); + mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev, + mbox_mem_alloc->size, + &mbox_mem_alloc->dma, + GFP_KERNEL); if (!mbox_mem_alloc->va) { status = -ENOMEM; goto unmap_pci_bars; @@ -2660,15 +3059,16 @@ static int be_ctrl_init(struct be_adapter *adapter) memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config); - mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size, - &mc_cmd_mem->dma); + mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev, + mc_cmd_mem->size, &mc_cmd_mem->dma, + GFP_KERNEL); if (mc_cmd_mem->va == NULL) { status = -ENOMEM; goto free_mbox; } memset(mc_cmd_mem->va, 0, mc_cmd_mem->size); - spin_lock_init(&adapter->mbox_lock); + mutex_init(&adapter->mbox_lock); spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); @@ -2677,8 +3077,8 @@ static int be_ctrl_init(struct be_adapter *adapter) return 0; free_mbox: - pci_free_consistent(adapter->pdev, mbox_mem_alloc->size, - mbox_mem_alloc->va, mbox_mem_alloc->dma); + dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size, + mbox_mem_alloc->va, mbox_mem_alloc->dma); unmap_pci_bars: be_unmap_pci_bars(adapter); @@ -2692,16 +3092,24 @@ static void be_stats_cleanup(struct be_adapter *adapter) struct be_dma_mem *cmd = &adapter->stats_cmd; if (cmd->va) - pci_free_consistent(adapter->pdev, cmd->size, - cmd->va, cmd->dma); + dma_free_coherent(&adapter->pdev->dev, cmd->size, + cmd->va, cmd->dma); } static int be_stats_init(struct be_adapter *adapter) { struct be_dma_mem *cmd = &adapter->stats_cmd; - cmd->size = sizeof(struct be_cmd_req_get_stats); - cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); + if (adapter->generation == BE_GEN2) { + cmd->size = sizeof(struct be_cmd_req_get_stats_v0); + } else { + if (lancer_chip(adapter)) + cmd->size = sizeof(struct lancer_cmd_req_pport_stats); + else + cmd->size = sizeof(struct be_cmd_req_get_stats_v1); + } + cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, + GFP_KERNEL); if (cmd->va == NULL) return -1; memset(cmd->va, 0, cmd->size); @@ -2715,6 +3123,8 @@ static void __devexit be_remove(struct pci_dev *pdev) if (!adapter) return; + cancel_delayed_work_sync(&adapter->work); + unregister_netdev(adapter->netdev); be_clear(adapter); @@ -2723,6 +3133,7 @@ static void __devexit be_remove(struct pci_dev *pdev) be_ctrl_cleanup(adapter); + kfree(adapter->vf_cfg); be_sriov_disable(adapter); be_msix_disable(adapter); @@ -2750,7 +3161,8 @@ static int be_get_config(struct be_adapter *adapter) memset(mac, 0, ETH_ALEN); - if (be_physfn(adapter)) { + /* A default permanent address is given to each VF for Lancer*/ + if (be_physfn(adapter) || lancer_chip(adapter)) { status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0); @@ -2769,9 +3181,97 @@ static int be_get_config(struct be_adapter *adapter) else adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; + status = be_cmd_get_cntl_attributes(adapter); + if (status) + return status; + + be_cmd_check_native_mode(adapter); + return 0; +} + +static int be_dev_family_check(struct be_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u32 sli_intf = 0, if_type; + + switch (pdev->device) { + case BE_DEVICE_ID1: + case OC_DEVICE_ID1: + adapter->generation = BE_GEN2; + break; + case BE_DEVICE_ID2: + case OC_DEVICE_ID2: + adapter->generation = BE_GEN3; + break; + case OC_DEVICE_ID3: + case OC_DEVICE_ID4: + pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf); + if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> + SLI_INTF_IF_TYPE_SHIFT; + + if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) || + if_type != 0x02) { + dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n"); + return -EINVAL; + } + adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >> + SLI_INTF_FAMILY_SHIFT); + adapter->generation = BE_GEN3; + break; + default: + adapter->generation = 0; + } return 0; } +static int lancer_wait_ready(struct be_adapter *adapter) +{ +#define SLIPORT_READY_TIMEOUT 500 + u32 sliport_status; + int status = 0, i; + + for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); + if (sliport_status & SLIPORT_STATUS_RDY_MASK) + break; + + msleep(20); + } + + if (i == SLIPORT_READY_TIMEOUT) + status = -1; + + return status; +} + +static int lancer_test_and_set_rdy_state(struct be_adapter *adapter) +{ + int status; + u32 sliport_status, err, reset_needed; + status = lancer_wait_ready(adapter); + if (!status) { + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); + err = sliport_status & SLIPORT_STATUS_ERR_MASK; + reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK; + if (err && reset_needed) { + iowrite32(SLI_PORT_CONTROL_IP_MASK, + adapter->db + SLIPORT_CONTROL_OFFSET); + + /* check adapter has corrected the error */ + status = lancer_wait_ready(adapter); + sliport_status = ioread32(adapter->db + + SLIPORT_STATUS_OFFSET); + sliport_status &= (SLIPORT_STATUS_ERR_MASK | + SLIPORT_STATUS_RN_MASK); + if (status || sliport_status) + status = -1; + } else if (err || reset_needed) { + status = -1; + } + } + return status; +} + static int __devinit be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) { @@ -2794,30 +3294,21 @@ static int __devinit be_probe(struct pci_dev *pdev, goto rel_reg; } adapter = netdev_priv(netdev); - - switch (pdev->device) { - case BE_DEVICE_ID1: - case OC_DEVICE_ID1: - adapter->generation = BE_GEN2; - break; - case BE_DEVICE_ID2: - case OC_DEVICE_ID2: - adapter->generation = BE_GEN3; - break; - default: - adapter->generation = 0; - } - adapter->pdev = pdev; pci_set_drvdata(pdev, adapter); + + status = be_dev_family_check(adapter); + if (status) + goto free_netdev; + adapter->netdev = netdev; SET_NETDEV_DEV(netdev, &pdev->dev); - status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (!status) { netdev->features |= NETIF_F_HIGHDMA; } else { - status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (status) { dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); goto free_netdev; @@ -2825,10 +3316,25 @@ static int __devinit be_probe(struct pci_dev *pdev, } be_sriov_enable(adapter); + if (adapter->sriov_enabled) { + adapter->vf_cfg = kcalloc(num_vfs, + sizeof(struct be_vf_cfg), GFP_KERNEL); + + if (!adapter->vf_cfg) + goto free_netdev; + } status = be_ctrl_init(adapter); if (status) - goto free_netdev; + goto free_vf_cfg; + + if (lancer_chip(adapter)) { + status = lancer_test_and_set_rdy_state(adapter); + if (status) { + dev_err(&pdev->dev, "Adapter in non recoverable error\n"); + goto ctrl_clean; + } + } /* sync up with fw's ready state */ if (be_physfn(adapter)) { @@ -2842,11 +3348,9 @@ static int __devinit be_probe(struct pci_dev *pdev, if (status) goto ctrl_clean; - if (be_physfn(adapter)) { - status = be_cmd_reset_function(adapter); - if (status) - goto ctrl_clean; - } + status = be_cmd_reset_function(adapter); + if (status) + goto ctrl_clean; status = be_stats_init(adapter); if (status) @@ -2868,10 +3372,35 @@ static int __devinit be_probe(struct pci_dev *pdev, status = register_netdev(netdev); if (status != 0) goto unsetup; + netif_carrier_off(netdev); + + if (be_physfn(adapter) && adapter->sriov_enabled) { + u8 mac_speed; + bool link_up; + u16 vf, lnk_speed; + + if (!lancer_chip(adapter)) { + status = be_vf_eth_addr_config(adapter); + if (status) + goto unreg_netdev; + } + + for (vf = 0; vf < num_vfs; vf++) { + status = be_cmd_link_status_query(adapter, &link_up, + &mac_speed, &lnk_speed, vf + 1); + if (!status) + adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10; + else + goto unreg_netdev; + } + } dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); + schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); return 0; +unreg_netdev: + unregister_netdev(netdev); unsetup: be_clear(adapter); msix_disable: @@ -2880,9 +3409,11 @@ stats_clean: be_stats_cleanup(adapter); ctrl_clean: be_ctrl_cleanup(adapter); +free_vf_cfg: + kfree(adapter->vf_cfg); free_netdev: be_sriov_disable(adapter); - free_netdev(adapter->netdev); + free_netdev(netdev); pci_set_drvdata(pdev, NULL); rel_reg: pci_release_regions(pdev); @@ -2898,6 +3429,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) struct be_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; + cancel_delayed_work_sync(&adapter->work); if (adapter->wol) be_setup_wol(adapter, true); @@ -2910,6 +3442,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc); be_clear(adapter); + be_msix_disable(adapter); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); @@ -2931,6 +3464,7 @@ static int be_resume(struct pci_dev *pdev) pci_set_power_state(pdev, 0); pci_restore_state(pdev); + be_msix_enable(adapter); /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) @@ -2946,6 +3480,8 @@ static int be_resume(struct pci_dev *pdev) if (adapter->wol) be_setup_wol(adapter, false); + + schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); return 0; } @@ -2955,15 +3491,19 @@ static int be_resume(struct pci_dev *pdev) static void be_shutdown(struct pci_dev *pdev) { struct be_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - netif_device_detach(netdev); + if (!adapter) + return; - be_cmd_reset_function(adapter); + cancel_delayed_work_sync(&adapter->work); + + netif_device_detach(adapter->netdev); if (adapter->wol) be_setup_wol(adapter, true); + be_cmd_reset_function(adapter); + pci_disable_device(pdev); } @@ -3075,13 +3615,6 @@ static int __init be_init_module(void) rx_frag_size = 2048; } - if (num_vfs > 32) { - printk(KERN_WARNING DRV_NAME - " : Module param num_vfs must not be greater than 32." - "Using 32\n"); - num_vfs = 32; - } - return pci_register_driver(&be_driver); } module_init(be_init_module); |