diff options
Diffstat (limited to 'drivers/net/benet')
-rw-r--r-- | drivers/net/benet/be.h | 4 | ||||
-rw-r--r-- | drivers/net/benet/be_cmds.c | 8 | ||||
-rw-r--r-- | drivers/net/benet/be_cmds.h | 5 | ||||
-rw-r--r-- | drivers/net/benet/be_hw.h | 12 | ||||
-rw-r--r-- | drivers/net/benet/be_main.c | 136 |
5 files changed, 117 insertions, 48 deletions
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index ed709a5d07d..4ac0d72660f 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -220,9 +220,7 @@ struct be_rx_obj { struct be_rx_stats stats; u8 rss_id; bool rx_post_starved; /* Zero rx frags have been posted to BE */ - u16 last_frag_index; - u16 rsvd; - u32 cache_line_barrier[15]; + u32 cache_line_barrier[16]; }; struct be_drv_stats { diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 1822ecdadc7..cc3a235475b 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -726,7 +726,7 @@ int be_cmd_cq_create(struct be_adapter *adapter, req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); if (lancer_chip(adapter)) { - req->hdr.version = 1; + req->hdr.version = 2; req->page_size = 1; /* 1 for 4K */ AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt, coalesce_wm); @@ -862,6 +862,12 @@ int be_cmd_txq_create(struct be_adapter *adapter, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, sizeof(*req)); + if (lancer_chip(adapter)) { + req->hdr.version = 1; + AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt, + adapter->if_handle); + } + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); req->ulp_num = BE_ULP1_NUM; req->type = BE_ETH_TX_RING_TYPE_STANDARD; diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index 93e5768fc70..b4ac3938b29 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h @@ -430,7 +430,7 @@ struct be_cmd_resp_mcc_create { /* Pseudo amap definition in which each bit of the actual structure is defined * as a byte: used to calculate offset/shift/mask of each field */ struct amap_tx_context { - u8 rsvd0[16]; /* dword 0 */ + u8 if_id[16]; /* dword 0 */ u8 tx_ring_size[4]; /* dword 0 */ u8 rsvd1[26]; /* dword 0 */ u8 pci_func_id[8]; /* dword 1 */ @@ -518,7 +518,8 @@ enum be_if_flags { BE_IF_FLAGS_VLAN = 0x100, BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200, BE_IF_FLAGS_PASS_L2_ERRORS = 0x400, - BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800 + BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800, + BE_IF_FLAGS_MULTICAST = 0x1000 }; /* An RX interface is an object with one or more MAC addresses and diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h index 3f459f76cd1..dbe67f353e8 100644 --- a/drivers/net/benet/be_hw.h +++ b/drivers/net/benet/be_hw.h @@ -44,6 +44,18 @@ #define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */ #define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */ + +/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */ +#define SLIPORT_STATUS_OFFSET 0x404 +#define SLIPORT_CONTROL_OFFSET 0x408 + +#define SLIPORT_STATUS_ERR_MASK 0x80000000 +#define SLIPORT_STATUS_RN_MASK 0x01000000 +#define SLIPORT_STATUS_RDY_MASK 0x00800000 + + +#define SLI_PORT_CONTROL_IP_MASK 0x08000000 + /********* Memory BAR register ************/ #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 0bdccb10aac..68f10781732 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -865,14 +865,17 @@ static void be_rx_stats_update(struct be_rx_obj *rxo, static inline bool csum_passed(struct be_eth_rx_compl *rxcp) { - u8 l4_cksm, ipv6, ipcksm; + u8 l4_cksm, ipv6, ipcksm, tcpf, udpf; l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp); ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp); + tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); + udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp); - /* Ignore ipcksm for ipv6 pkts */ - return l4_cksm && (ipcksm || ipv6); + /* L4 checksum is not reliable for non TCP/UDP packets. + * Also ignore ipcksm for ipv6 pkts */ + return (tcpf || udpf) && l4_cksm && (ipcksm || ipv6); } static struct be_rx_page_info * @@ -909,17 +912,11 @@ static void be_rx_compl_discard(struct be_adapter *adapter, rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); - /* Skip out-of-buffer compl(lancer) or flush compl(BE) */ - if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) { - - rxo->last_frag_index = rxq_idx; - - for (i = 0; i < num_rcvd; i++) { - page_info = get_rx_page_info(adapter, rxo, rxq_idx); - put_page(page_info->page); - memset(page_info, 0, sizeof(*page_info)); - index_inc(&rxq_idx, rxq->len); - } + for (i = 0; i < num_rcvd; i++) { + page_info = get_rx_page_info(adapter, rxo, rxq_idx); + put_page(page_info->page); + memset(page_info, 0, sizeof(*page_info)); + index_inc(&rxq_idx, rxq->len); } } @@ -1169,20 +1166,20 @@ static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp) rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0; } -static inline struct page *be_alloc_pages(u32 size) +static inline struct page *be_alloc_pages(u32 size, gfp_t gfp) { - gfp_t alloc_flags = GFP_ATOMIC; u32 order = get_order(size); + if (order > 0) - alloc_flags |= __GFP_COMP; - return alloc_pages(alloc_flags, order); + gfp |= __GFP_COMP; + return alloc_pages(gfp, order); } /* * Allocate a page, split it to fragments of size rx_frag_size and post as * receive buffers to BE */ -static void be_post_rx_frags(struct be_rx_obj *rxo) +static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) { struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl; @@ -1196,7 +1193,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo) page_info = &rxo->page_info_tbl[rxq->head]; for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { if (!pagep) { - pagep = be_alloc_pages(adapter->big_page_size); + pagep = be_alloc_pages(adapter->big_page_size, gfp); if (unlikely(!pagep)) { rxo->stats.rx_post_fail++; break; @@ -1579,9 +1576,6 @@ static int be_rx_queues_create(struct be_adapter *adapter) adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; for_all_rx_queues(adapter, rxo, i) { rxo->adapter = adapter; - /* Init last_frag_index so that the frag index in the first - * completion will never match */ - rxo->last_frag_index = 0xffff; rxo->rx_eq.max_eqd = BE_MAX_EQD; rxo->rx_eq.enable_aic = true; @@ -1722,7 +1716,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget) struct be_queue_info *rx_cq = &rxo->cq; struct be_eth_rx_compl *rxcp; u32 work_done; - u16 frag_index, num_rcvd; + u16 num_rcvd; u8 err; rxo->stats.rx_polls++; @@ -1732,16 +1726,10 @@ static int be_poll_rx(struct napi_struct *napi, int budget) break; err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); - frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, - rxcp); num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); - - /* Skip out-of-buffer compl(lancer) or flush compl(BE) */ - if (likely(frag_index != rxo->last_frag_index && - num_rcvd != 0)) { - rxo->last_frag_index = frag_index; - + /* Ignore flush completions */ + if (num_rcvd) { if (do_gro(rxo, rxcp, err)) be_rx_compl_process_gro(adapter, rxo, rxcp); else @@ -1753,7 +1741,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget) /* Refill the queue */ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) - be_post_rx_frags(rxo); + be_post_rx_frags(rxo, GFP_ATOMIC); /* All consumed */ if (work_done < budget) { @@ -1890,7 +1878,7 @@ static void be_worker(struct work_struct *work) if (rxo->rx_post_starved) { rxo->rx_post_starved = false; - be_post_rx_frags(rxo); + be_post_rx_frags(rxo, GFP_KERNEL); } } if (!adapter->ue_detected && !lancer_chip(adapter)) @@ -2094,13 +2082,24 @@ static int be_close(struct net_device *netdev) be_async_mcc_disable(adapter); - netif_stop_queue(netdev); netif_carrier_off(netdev); adapter->link_up = false; if (!lancer_chip(adapter)) be_intr_set(adapter, false); + for_all_rx_queues(adapter, rxo, i) + napi_disable(&rxo->rx_eq.napi); + + napi_disable(&tx_eq->napi); + + if (lancer_chip(adapter)) { + be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0); + be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); + for_all_rx_queues(adapter, rxo, i) + be_cq_notify(adapter, rxo->cq.id, false, 0); + } + if (adapter->msix_enabled) { vec = be_msix_vec_get(adapter, tx_eq); synchronize_irq(vec); @@ -2114,11 +2113,6 @@ static int be_close(struct net_device *netdev) } be_irq_unregister(adapter); - for_all_rx_queues(adapter, rxo, i) - napi_disable(&rxo->rx_eq.napi); - - napi_disable(&tx_eq->napi); - /* Wait for all pending tx completions to arrive so that * all tx skbs are freed. */ @@ -2138,7 +2132,7 @@ static int be_open(struct net_device *netdev) u16 link_speed; for_all_rx_queues(adapter, rxo, i) { - be_post_rx_frags(rxo); + be_post_rx_frags(rxo, GFP_KERNEL); napi_enable(&rxo->rx_eq.napi); } napi_enable(&tx_eq->napi); @@ -2269,7 +2263,9 @@ static int be_setup(struct be_adapter *adapter) int status; u8 mac[ETH_ALEN]; - cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST; + cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | + BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_MULTICAST; if (be_physfn(adapter)) { cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS | @@ -2913,6 +2909,54 @@ static int be_dev_family_check(struct be_adapter *adapter) return 0; } +static int lancer_wait_ready(struct be_adapter *adapter) +{ +#define SLIPORT_READY_TIMEOUT 500 + u32 sliport_status; + int status = 0, i; + + for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); + if (sliport_status & SLIPORT_STATUS_RDY_MASK) + break; + + msleep(20); + } + + if (i == SLIPORT_READY_TIMEOUT) + status = -1; + + return status; +} + +static int lancer_test_and_set_rdy_state(struct be_adapter *adapter) +{ + int status; + u32 sliport_status, err, reset_needed; + status = lancer_wait_ready(adapter); + if (!status) { + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); + err = sliport_status & SLIPORT_STATUS_ERR_MASK; + reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK; + if (err && reset_needed) { + iowrite32(SLI_PORT_CONTROL_IP_MASK, + adapter->db + SLIPORT_CONTROL_OFFSET); + + /* check adapter has corrected the error */ + status = lancer_wait_ready(adapter); + sliport_status = ioread32(adapter->db + + SLIPORT_STATUS_OFFSET); + sliport_status &= (SLIPORT_STATUS_ERR_MASK | + SLIPORT_STATUS_RN_MASK); + if (status || sliport_status) + status = -1; + } else if (err || reset_needed) { + status = -1; + } + } + return status; +} + static int __devinit be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) { @@ -2962,6 +3006,14 @@ static int __devinit be_probe(struct pci_dev *pdev, if (status) goto free_netdev; + if (lancer_chip(adapter)) { + status = lancer_test_and_set_rdy_state(adapter); + if (status) { + dev_err(&pdev->dev, "Adapter in non recoverable error\n"); + goto free_netdev; + } + } + /* sync up with fw's ready state */ if (be_physfn(adapter)) { status = be_cmd_POST(adapter); |