diff options
Diffstat (limited to 'drivers/net/bna/bnad.c')
-rw-r--r-- | drivers/net/bna/bnad.c | 427 |
1 files changed, 226 insertions, 201 deletions
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c index 7e839b9cec2..fad912656fe 100644 --- a/drivers/net/bna/bnad.c +++ b/drivers/net/bna/bnad.c @@ -70,6 +70,8 @@ do { \ (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ } while (0) +#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */ + /* * Reinitialize completions in CQ, once Rx is taken down */ @@ -107,7 +109,7 @@ static void bnad_free_all_txbufs(struct bnad *bnad, struct bna_tcb *tcb) { - u16 unmap_cons; + u32 unmap_cons; struct bnad_unmap_q *unmap_q = tcb->unmap_q; struct bnad_skb_unmap *unmap_array; struct sk_buff *skb = NULL; @@ -130,7 +132,9 @@ bnad_free_all_txbufs(struct bnad *bnad, PCI_DMA_TODEVICE); pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); - unmap_cons++; + if (++unmap_cons >= unmap_q->q_depth) + break; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { pci_unmap_page(bnad->pcidev, pci_unmap_addr(&unmap_array[unmap_cons], @@ -139,7 +143,8 @@ bnad_free_all_txbufs(struct bnad *bnad, PCI_DMA_TODEVICE); pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); - unmap_cons++; + if (++unmap_cons >= unmap_q->q_depth) + break; } dev_kfree_skb_any(skb); } @@ -167,11 +172,11 @@ bnad_free_txbufs(struct bnad *bnad, /* * Just return if TX is stopped. This check is useful * when bnad_free_txbufs() runs out of a tasklet scheduled - * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit + * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit * but this routine runs actually after the cleanup has been * executed. */ - if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) + if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) return 0; updated_hw_cons = *(tcb->hw_consumer_index); @@ -239,7 +244,7 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr) { struct bnad *bnad = (struct bnad *)bnad_ptr; struct bna_tcb *tcb; - u32 acked; + u32 acked = 0; int i, j; for (i = 0; i < bnad->num_tx; i++) { @@ -252,10 +257,26 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr) (!test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))) { acked = bnad_free_txbufs(bnad, tcb); - bna_ib_ack(tcb->i_dbell, acked); + if (likely(test_bit(BNAD_TXQ_TX_STARTED, + &tcb->flags))) + bna_ib_ack(tcb->i_dbell, acked); smp_mb__before_clear_bit(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); } + if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, + &tcb->flags))) + continue; + if (netif_queue_stopped(bnad->netdev)) { + if (acked && netif_carrier_ok(bnad->netdev) && + BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= + BNAD_NETIF_WAKE_THRESHOLD) { + netif_wake_queue(bnad->netdev); + /* TODO */ + /* Counters for individual TxQs? */ + BNAD_UPDATE_CTR(bnad, + netif_queue_wakeup); + } + } } } } @@ -264,7 +285,7 @@ static u32 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) { struct net_device *netdev = bnad->netdev; - u32 sent; + u32 sent = 0; if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) return 0; @@ -275,12 +296,15 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) netif_carrier_ok(netdev) && BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= BNAD_NETIF_WAKE_THRESHOLD) { - netif_wake_queue(netdev); - BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { + netif_wake_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } } + } + + if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) bna_ib_ack(tcb->i_dbell, sent); - } else - bna_ib_ack(tcb->i_dbell, 0); smp_mb__before_clear_bit(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); @@ -313,25 +337,24 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) } static void -bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) +bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) { struct bnad_unmap_q *unmap_q; struct sk_buff *skb; + int unmap_cons; unmap_q = rcb->unmap_q; - while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) { - skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; - BUG_ON(!(skb)); - unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; + for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { + skb = unmap_q->unmap_array[unmap_cons].skb; + if (!skb) + continue; + unmap_q->unmap_array[unmap_cons].skb = NULL; pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> - unmap_array[unmap_q->consumer_index], - dma_addr), rcb->rxq->buffer_size + - NET_IP_ALIGN, PCI_DMA_FROMDEVICE); + unmap_array[unmap_cons], + dma_addr), rcb->rxq->buffer_size, + PCI_DMA_FROMDEVICE); dev_kfree_skb(skb); - BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); - BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth); } - bnad_reset_rcb(bnad, rcb); } @@ -385,43 +408,11 @@ finishing: unmap_q->producer_index = unmap_prod; rcb->producer_index = unmap_prod; smp_mb(); - bna_rxq_prod_indx_doorbell(rcb); + if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags))) + bna_rxq_prod_indx_doorbell(rcb); } } -/* - * Locking is required in the enable path - * because it is called from a napi poll - * context, where the bna_lock is not held - * unlike the IRQ context. - */ -static void -bnad_enable_txrx_irqs(struct bnad *bnad) -{ - struct bna_tcb *tcb; - struct bna_ccb *ccb; - int i, j; - unsigned long flags; - - spin_lock_irqsave(&bnad->bna_lock, flags); - for (i = 0; i < bnad->num_tx; i++) { - for (j = 0; j < bnad->num_txq_per_tx; j++) { - tcb = bnad->tx_info[i].tcb[j]; - bna_ib_coalescing_timer_set(tcb->i_dbell, - tcb->txq->ib->ib_config.coalescing_timeo); - bna_ib_ack(tcb->i_dbell, 0); - } - } - - for (i = 0; i < bnad->num_rx; i++) { - for (j = 0; j < bnad->num_rxp_per_rx; j++) { - ccb = bnad->rx_info[i].rx_ctrl[j].ccb; - bnad_enable_rx_irq_unsafe(ccb); - } - } - spin_unlock_irqrestore(&bnad->bna_lock, flags); -} - static inline void bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) { @@ -448,6 +439,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) u32 qid0 = ccb->rcb[0]->rxq->rxq_id; struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; + if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) + return 0; + prefetch(bnad->netdev); BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, wi_range); @@ -544,12 +538,15 @@ next: BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); if (likely(ccb)) { - bna_ib_ack(ccb->i_dbell, packets); + if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) + bna_ib_ack(ccb->i_dbell, packets); bnad_refill_rxq(bnad, ccb->rcb[0]); if (ccb->rcb[1]) bnad_refill_rxq(bnad, ccb->rcb[1]); - } else - bna_ib_ack(ccb->i_dbell, 0); + } else { + if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) + bna_ib_ack(ccb->i_dbell, 0); + } return packets; } @@ -557,6 +554,9 @@ next: static void bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) { + if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) + return; + bna_ib_coalescing_timer_set(ccb->i_dbell, 0); bna_ib_ack(ccb->i_dbell, 0); } @@ -566,7 +566,8 @@ bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) { unsigned long flags; - spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */ + /* Because of polling context */ + spin_lock_irqsave(&bnad->bna_lock, flags); bnad_enable_rx_irq_unsafe(ccb); spin_unlock_irqrestore(&bnad->bna_lock, flags); } @@ -575,9 +576,11 @@ static void bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) { struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); - if (likely(napi_schedule_prep((&rx_ctrl->napi)))) { + struct napi_struct *napi = &rx_ctrl->napi; + + if (likely(napi_schedule_prep(napi))) { bnad_disable_rx_irq(bnad, ccb); - __napi_schedule((&rx_ctrl->napi)); + __napi_schedule(napi); } BNAD_UPDATE_CTR(bnad, netif_rx_schedule); } @@ -602,12 +605,11 @@ bnad_msix_mbox_handler(int irq, void *data) { u32 intr_status; unsigned long flags; - struct net_device *netdev = data; - struct bnad *bnad; + struct bnad *bnad = (struct bnad *)data; - bnad = netdev_priv(netdev); + if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) + return IRQ_HANDLED; - /* BNA_ISR_GET(bnad); Inc Ref count */ spin_lock_irqsave(&bnad->bna_lock, flags); bna_intr_status_get(&bnad->bna, intr_status); @@ -617,7 +619,6 @@ bnad_msix_mbox_handler(int irq, void *data) spin_unlock_irqrestore(&bnad->bna_lock, flags); - /* BNAD_ISR_PUT(bnad); Dec Ref count */ return IRQ_HANDLED; } @@ -627,8 +628,7 @@ bnad_isr(int irq, void *data) int i, j; u32 intr_status; unsigned long flags; - struct net_device *netdev = data; - struct bnad *bnad = netdev_priv(netdev); + struct bnad *bnad = (struct bnad *)data; struct bnad_rx_info *rx_info; struct bnad_rx_ctrl *rx_ctrl; @@ -642,16 +642,21 @@ bnad_isr(int irq, void *data) spin_lock_irqsave(&bnad->bna_lock, flags); - if (BNA_IS_MBOX_ERR_INTR(intr_status)) { + if (BNA_IS_MBOX_ERR_INTR(intr_status)) bna_mbox_handler(&bnad->bna, intr_status); - if (!BNA_IS_INTX_DATA_INTR(intr_status)) { - spin_unlock_irqrestore(&bnad->bna_lock, flags); - goto done; - } - } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (!BNA_IS_INTX_DATA_INTR(intr_status)) + return IRQ_HANDLED; + /* Process data interrupts */ + /* Tx processing */ + for (i = 0; i < bnad->num_tx; i++) { + for (j = 0; j < bnad->num_txq_per_tx; j++) + bnad_tx(bnad, bnad->tx_info[i].tcb[j]); + } + /* Rx processing */ for (i = 0; i < bnad->num_rx; i++) { rx_info = &bnad->rx_info[i]; if (!rx_info->rx) @@ -663,7 +668,6 @@ bnad_isr(int irq, void *data) rx_ctrl->ccb); } } -done: return IRQ_HANDLED; } @@ -674,11 +678,7 @@ done: static void bnad_enable_mbox_irq(struct bnad *bnad) { - int irq = BNAD_GET_MBOX_IRQ(bnad); - - if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) - if (bnad->cfg_flags & BNAD_CF_MSIX) - enable_irq(irq); + clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); } @@ -690,14 +690,19 @@ bnad_enable_mbox_irq(struct bnad *bnad) static void bnad_disable_mbox_irq(struct bnad *bnad) { - int irq = BNAD_GET_MBOX_IRQ(bnad); + set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); +} - if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) - if (bnad->cfg_flags & BNAD_CF_MSIX) - disable_irq_nosync(irq); +static void +bnad_set_netdev_perm_addr(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; - BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); + memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); + if (is_zero_ether_addr(netdev->dev_addr)) + memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); } /* Control Path Handlers */ @@ -755,11 +760,14 @@ bnad_cb_port_link_status(struct bnad *bnad, if (link_up) { if (!netif_carrier_ok(bnad->netdev)) { + struct bna_tcb *tcb = bnad->tx_info[0].tcb[0]; + if (!tcb) + return; pr_warn("bna: %s link up\n", bnad->netdev->name); netif_carrier_on(bnad->netdev); BNAD_UPDATE_CTR(bnad, link_toggle); - if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) { + if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { /* Force an immediate Transmit Schedule */ pr_info("bna: %s TX_STARTED\n", bnad->netdev->name); @@ -807,6 +815,18 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) { struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tcb->txq->tx->priv; + struct bnad_unmap_q *unmap_q = tcb->unmap_q; + + while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) + cpu_relax(); + + bnad_free_all_txbufs(bnad, tcb); + + unmap_q->producer_index = 0; + unmap_q->consumer_index = 0; + + smp_mb__before_clear_bit(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); tx_info->tcb[tcb->id] = NULL; } @@ -822,6 +842,12 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb) } static void +bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb) +{ + bnad_free_all_rxbufs(bnad, rcb); +} + +static void bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) { struct bnad_rx_info *rx_info = @@ -849,7 +875,7 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb) if (tx_info != &bnad->tx_info[0]) return; - clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags); + clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); netif_stop_queue(bnad->netdev); pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name); } @@ -857,30 +883,15 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb) static void bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb) { - if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) - return; - - if (netif_carrier_ok(bnad->netdev)) { - pr_info("bna: %s TX_STARTED\n", bnad->netdev->name); - netif_wake_queue(bnad->netdev); - BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); - } -} - -static void -bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) -{ - struct bnad_unmap_q *unmap_q; + struct bnad_unmap_q *unmap_q = tcb->unmap_q; - if (!tcb || (!tcb->unmap_q)) + if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) return; - unmap_q = tcb->unmap_q; - if (!unmap_q->unmap_array) - return; + clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags); - if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) - return; + while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) + cpu_relax(); bnad_free_all_txbufs(bnad, tcb); @@ -889,21 +900,45 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) smp_mb__before_clear_bit(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + + /* + * Workaround for first device enable failure & we + * get a 0 MAC address. We try to get the MAC address + * again here. + */ + if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { + bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr); + bnad_set_netdev_perm_addr(bnad); + } + + set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); + + if (netif_carrier_ok(bnad->netdev)) { + pr_info("bna: %s TX_STARTED\n", bnad->netdev->name); + netif_wake_queue(bnad->netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } +} + +static void +bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) +{ + /* Delay only once for the whole Tx Path Shutdown */ + if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags)) + mdelay(BNAD_TXRX_SYNC_MDELAY); } static void bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_ccb *ccb) { - bnad_cq_cmpl_init(bnad, ccb); - - bnad_free_rxbufs(bnad, ccb->rcb[0]); clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); - if (ccb->rcb[1]) { - bnad_free_rxbufs(bnad, ccb->rcb[1]); + if (ccb->rcb[1]) clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); - } + + if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags)) + mdelay(BNAD_TXRX_SYNC_MDELAY); } static void @@ -911,6 +946,13 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb) { struct bnad_unmap_q *unmap_q = rcb->unmap_q; + clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags); + + if (rcb == rcb->cq->ccb->rcb[0]) + bnad_cq_cmpl_init(bnad, rcb->cq->ccb); + + bnad_free_all_rxbufs(bnad, rcb); + set_bit(BNAD_RXQ_STARTED, &rcb->flags); /* Now allocate & post buffers for this RCB */ @@ -1047,7 +1089,7 @@ bnad_mbox_irq_free(struct bnad *bnad, spin_unlock_irqrestore(&bnad->bna_lock, flags); irq = BNAD_GET_MBOX_IRQ(bnad); - free_irq(irq, bnad->netdev); + free_irq(irq, bnad); kfree(intr_info->idl); } @@ -1061,7 +1103,7 @@ static int bnad_mbox_irq_alloc(struct bnad *bnad, struct bna_intr_info *intr_info) { - int err; + int err = 0; unsigned long flags; u32 irq; irq_handler_t irq_handler; @@ -1096,22 +1138,17 @@ bnad_mbox_irq_alloc(struct bnad *bnad, */ set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); + err = request_irq(irq, irq_handler, flags, - bnad->mbox_irq_name, bnad->netdev); + bnad->mbox_irq_name, bnad); if (err) { kfree(intr_info->idl); intr_info->idl = NULL; - return err; } - spin_lock_irqsave(&bnad->bna_lock, flags); - - if (bnad->cfg_flags & BNAD_CF_MSIX) - disable_irq_nosync(irq); - - spin_unlock_irqrestore(&bnad->bna_lock, flags); - return 0; + return err; } static void @@ -1388,13 +1425,24 @@ bnad_ioc_hb_check(unsigned long data) } static void -bnad_ioc_sem_timeout(unsigned long data) +bnad_iocpf_timeout(unsigned long data) +{ + struct bnad *bnad = (struct bnad *)data; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_iocpf_sem_timeout(unsigned long data) { struct bnad *bnad = (struct bnad *)data; unsigned long flags; spin_lock_irqsave(&bnad->bna_lock, flags); - bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc); + bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc); spin_unlock_irqrestore(&bnad->bna_lock, flags); } @@ -1555,62 +1603,19 @@ poll_exit: return rcvd; } -static int -bnad_napi_poll_txrx(struct napi_struct *napi, int budget) -{ - struct bnad_rx_ctrl *rx_ctrl = - container_of(napi, struct bnad_rx_ctrl, napi); - struct bna_ccb *ccb; - struct bnad *bnad; - int rcvd = 0; - int i, j; - - ccb = rx_ctrl->ccb; - - bnad = ccb->bnad; - - if (!netif_carrier_ok(bnad->netdev)) - goto poll_exit; - - /* Handle Tx Completions, if any */ - for (i = 0; i < bnad->num_tx; i++) { - for (j = 0; j < bnad->num_txq_per_tx; j++) - bnad_tx(bnad, bnad->tx_info[i].tcb[j]); - } - - /* Handle Rx Completions */ - rcvd = bnad_poll_cq(bnad, ccb, budget); - if (rcvd == budget) - return rcvd; -poll_exit: - napi_complete((napi)); - - BNAD_UPDATE_CTR(bnad, netif_rx_complete); - - bnad_enable_txrx_irqs(bnad); - return rcvd; -} - static void bnad_napi_enable(struct bnad *bnad, u32 rx_id) { - int (*napi_poll) (struct napi_struct *, int); struct bnad_rx_ctrl *rx_ctrl; int i; - unsigned long flags; - - spin_lock_irqsave(&bnad->bna_lock, flags); - if (bnad->cfg_flags & BNAD_CF_MSIX) - napi_poll = bnad_napi_poll_rx; - else - napi_poll = bnad_napi_poll_txrx; - spin_unlock_irqrestore(&bnad->bna_lock, flags); /* Initialize & enable NAPI */ for (i = 0; i < bnad->num_rxp_per_rx; i++) { rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; + netif_napi_add(bnad->netdev, &rx_ctrl->napi, - napi_poll, 64); + bnad_napi_poll_rx, 64); + napi_enable(&rx_ctrl->napi); } } @@ -1825,6 +1830,7 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id) /* Initialize the Rx event handlers */ rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; + rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy; rx_cbfn.rcb_destroy_cbfn = NULL; rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; @@ -1968,6 +1974,27 @@ bnad_enable_default_bcast(struct bnad *bnad) return 0; } +/* Called with bnad_conf_lock() held */ +static void +bnad_restore_vlans(struct bnad *bnad, u32 rx_id) +{ + u16 vlan_id; + unsigned long flags; + + if (!bnad->vlan_grp) + return; + + BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1))); + + for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) { + if (!vlan_group_get_device(bnad->vlan_grp, vlan_id)) + continue; + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + } +} + /* Statistics utilities */ void bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) @@ -2152,16 +2179,6 @@ bnad_q_num_adjust(struct bnad *bnad, int msix_vectors) bnad->num_rxp_per_rx = 1; } -static void -bnad_set_netdev_perm_addr(struct bnad *bnad) -{ - struct net_device *netdev = bnad->netdev; - - memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); - if (is_zero_ether_addr(netdev->dev_addr)) - memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); -} - /* Enable / disable device */ static void bnad_device_disable(struct bnad *bnad) @@ -2353,6 +2370,9 @@ bnad_open(struct net_device *netdev) /* Enable broadcast */ bnad_enable_default_bcast(bnad); + /* Restore VLANs, if any */ + bnad_restore_vlans(bnad, 0); + /* Set the UCAST address */ spin_lock_irqsave(&bnad->bna_lock, flags); bnad_mac_addr_set_locked(bnad, netdev->dev_addr); @@ -2433,21 +2453,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } + tx_id = 0; + + tx_info = &bnad->tx_info[tx_id]; + tcb = tx_info->tcb[tx_id]; + unmap_q = tcb->unmap_q; + /* * Takes care of the Tx that is scheduled between clearing the flag * and the netif_stop_queue() call. */ - if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) { + if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { dev_kfree_skb(skb); return NETDEV_TX_OK; } - tx_id = 0; - - tx_info = &bnad->tx_info[tx_id]; - tcb = tx_info->tcb[tx_id]; - unmap_q = tcb->unmap_q; - vectors = 1 + skb_shinfo(skb)->nr_frags; if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) { dev_kfree_skb(skb); @@ -2462,7 +2482,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) tcb->consumer_index && !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { acked = bnad_free_txbufs(bnad, tcb); - bna_ib_ack(tcb->i_dbell, acked); + if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + bna_ib_ack(tcb->i_dbell, acked); smp_mb__before_clear_bit(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); } else { @@ -2624,6 +2645,10 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) tcb->producer_index = txq_prod; smp_mb(); + + if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + return NETDEV_TX_OK; + bna_txq_prod_indx_doorbell(tcb); if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index) @@ -3032,7 +3057,7 @@ static int __devinit bnad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pcidev_id) { - bool using_dac; + bool using_dac = false; int err; struct bnad *bnad; struct bna *bna; @@ -3066,7 +3091,7 @@ bnad_pci_probe(struct pci_dev *pdev, /* * PCI initialization * Output : using_dac = 1 for 64 bit DMA - * = 0 for 32 bit DMA + * = 0 for 32 bit DMA */ err = bnad_pci_init(bnad, pdev, &using_dac); if (err) @@ -3084,6 +3109,9 @@ bnad_pci_probe(struct pci_dev *pdev, /* Initialize netdev structure, set up ethtool ops */ bnad_netdev_init(bnad, using_dac); + /* Set link to down state */ + netif_carrier_off(netdev); + bnad_enable_msix(bnad); /* Get resource requirement form bna */ @@ -3115,11 +3143,13 @@ bnad_pci_probe(struct pci_dev *pdev, ((unsigned long)bnad)); setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check, ((unsigned long)bnad)); - setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout, + setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout, + ((unsigned long)bnad)); + setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout, ((unsigned long)bnad)); /* Now start the timer before calling IOC */ - mod_timer(&bnad->bna.device.ioc.ioc_timer, + mod_timer(&bnad->bna.device.ioc.iocpf_timer, jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); /* @@ -3137,11 +3167,6 @@ bnad_pci_probe(struct pci_dev *pdev, mutex_unlock(&bnad->conf_mutex); - /* - * Make sure the link appears down to the stack - */ - netif_carrier_off(netdev); - /* Finally, reguister with net_device layer */ err = register_netdev(netdev); if (err) { |