diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2011-03-17 13:46:58 -0600 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2011-03-17 13:48:06 -0600 |
commit | 2dd22997679a88874c131f6e6ffb963e6d43b3a6 (patch) | |
tree | bfe1707dda7e755b8b550c6796e2649813bcfbb9 /drivers/net/vxge/vxge-main.c | |
parent | 36885ff0e6563687e6152da6d311abbf83c0198f (diff) | |
parent | 7b7adc4a016a1decb806eb71ecab98721fa7f146 (diff) |
Merge remote-tracking branch 'origin' into spi/next
Pull in Linus' tree to pick up changes required for the langwell gpio fixes
Diffstat (limited to 'drivers/net/vxge/vxge-main.c')
-rw-r--r-- | drivers/net/vxge/vxge-main.c | 234 |
1 files changed, 168 insertions, 66 deletions
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index c81a6512c68..395423aeec0 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c @@ -371,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, struct vxge_hw_ring_rxd_info ext_info; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", ring->ndev->name, __func__, __LINE__); - ring->pkts_processed = 0; - - vxge_hw_ring_replenish(ringh); do { prefetch((char *)dtr + L1_CACHE_BYTES); @@ -1588,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) return ret; } +/* Configure CI */ +static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev) +{ + int i = 0; + + /* Enable CI for RTI */ + if (vdev->config.intr_type == MSI_X) { + for (i = 0; i < vdev->no_of_vpath; i++) { + struct __vxge_hw_ring *hw_ring; + + hw_ring = vdev->vpaths[i].ring.handle; + vxge_hw_vpath_dynamic_rti_ci_set(hw_ring); + } + } + + /* Enable CI for TTI */ + for (i = 0; i < vdev->no_of_vpath; i++) { + struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle; + vxge_hw_vpath_tti_ci_set(hw_fifo); + /* + * For Inta (with or without napi), Set CI ON for only one + * vpath. (Have only one free running timer). + */ + if ((vdev->config.intr_type == INTA) && (i == 0)) + break; + } + + return; +} + static int do_vxge_reset(struct vxgedev *vdev, int event) { enum vxge_hw_status status; @@ -1753,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) netif_tx_wake_all_queues(vdev->ndev); } + /* configure CI */ + vxge_config_ci_for_tti_rti(vdev); + out: vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); @@ -1793,22 +1823,29 @@ static void vxge_reset(struct work_struct *work) */ static int vxge_poll_msix(struct napi_struct *napi, int budget) { - struct vxge_ring *ring = - container_of(napi, struct vxge_ring, napi); + struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi); + int pkts_processed; int budget_org = budget; - ring->budget = budget; + ring->budget = budget; + ring->pkts_processed = 0; vxge_hw_vpath_poll_rx(ring->handle); + pkts_processed = ring->pkts_processed; if (ring->pkts_processed < budget_org) { napi_complete(napi); + /* Re enable the Rx interrupts for the vpath */ vxge_hw_channel_msix_unmask( (struct __vxge_hw_channel *)ring->handle, ring->rx_vector_no); + mmiowb(); } - return ring->pkts_processed; + /* We are copying and returning the local variable, in case if after + * clearing the msix interrupt above, if the interrupt fires right + * away which can preempt this NAPI thread */ + return pkts_processed; } static int vxge_poll_inta(struct napi_struct *napi, int budget) @@ -1824,6 +1861,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget) for (i = 0; i < vdev->no_of_vpath; i++) { ring = &vdev->vpaths[i].ring; ring->budget = budget; + ring->pkts_processed = 0; vxge_hw_vpath_poll_rx(ring->handle); pkts_processed += ring->pkts_processed; budget -= ring->pkts_processed; @@ -2054,6 +2092,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev) netdev_get_tx_queue(vdev->ndev, 0); vpath->fifo.indicate_max_pkts = vdev->config.fifo_indicate_max_pkts; + vpath->fifo.tx_vector_no = 0; vpath->ring.rx_vector_no = 0; vpath->ring.rx_csum = vdev->rx_csum; vpath->ring.rx_hwts = vdev->rx_hwts; @@ -2079,6 +2118,61 @@ static int vxge_open_vpaths(struct vxgedev *vdev) return VXGE_HW_OK; } +/** + * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing + * if the interrupts are not within a range + * @fifo: pointer to transmit fifo structure + * Description: The function changes boundary timer and restriction timer + * value depends on the traffic + * Return Value: None + */ +static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo) +{ + fifo->interrupt_count++; + if (jiffies > fifo->jiffies + HZ / 100) { + struct __vxge_hw_fifo *hw_fifo = fifo->handle; + + fifo->jiffies = jiffies; + if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT && + hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) { + hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL; + vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); + } else if (hw_fifo->rtimer != 0) { + hw_fifo->rtimer = 0; + vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); + } + fifo->interrupt_count = 0; + } +} + +/** + * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing + * if the interrupts are not within a range + * @ring: pointer to receive ring structure + * Description: The function increases of decreases the packet counts within + * the ranges of traffic utilization, if the interrupts due to this ring are + * not within a fixed range. + * Return Value: Nothing + */ +static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) +{ + ring->interrupt_count++; + if (jiffies > ring->jiffies + HZ / 100) { + struct __vxge_hw_ring *hw_ring = ring->handle; + + ring->jiffies = jiffies; + if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT && + hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) { + hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL; + vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); + } else if (hw_ring->rtimer != 0) { + hw_ring->rtimer = 0; + vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); + } + ring->interrupt_count = 0; + } +} + /* * vxge_isr_napi * @irq: the irq of the device. @@ -2139,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) #ifdef CONFIG_PCI_MSI -static irqreturn_t -vxge_tx_msix_handle(int irq, void *dev_id) +static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) { struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; + adaptive_coalesce_tx_interrupts(fifo); + + vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle, + fifo->tx_vector_no); + + vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle, + fifo->tx_vector_no); + VXGE_COMPLETE_VPATH_TX(fifo); + vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle, + fifo->tx_vector_no); + + mmiowb(); + return IRQ_HANDLED; } -static irqreturn_t -vxge_rx_msix_napi_handle(int irq, void *dev_id) +static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id) { struct vxge_ring *ring = (struct vxge_ring *)dev_id; - /* MSIX_IDX for Rx is 1 */ + adaptive_coalesce_rx_interrupts(ring); + vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, - ring->rx_vector_no); + ring->rx_vector_no); + + vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle, + ring->rx_vector_no); napi_schedule(&ring->napi); return IRQ_HANDLED; @@ -2173,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id) VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; for (i = 0; i < vdev->no_of_vpath; i++) { + /* Reduce the chance of loosing alarm interrupts by masking + * the vector. A pending bit will be set if an alarm is + * generated and on unmask the interrupt will be fired. + */ vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); + vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id); + mmiowb(); status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, vdev->exec_mode); if (status == VXGE_HW_OK) { - vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, - msix_id); + msix_id); + mmiowb(); continue; } vxge_debug_intr(VXGE_ERR, @@ -2299,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev) vpath->ring.rx_vector_no = (vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE) + 1; + vpath->fifo.tx_vector_no = (vpath->device_id * + VXGE_HW_VPATH_MSIX_ACTIVE); + vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, VXGE_ALARM_MSIX_ID); } @@ -2474,8 +2592,9 @@ INTA_MODE: "%s:vxge:INTA", vdev->ndev->name); vxge_hw_device_set_intr_type(vdev->devh, VXGE_HW_INTR_MODE_IRQLINE); - vxge_hw_vpath_tti_ci_set(vdev->devh, - vdev->vpaths[0].device_id); + + vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle); + ret = request_irq((int) vdev->pdev->irq, vxge_isr_napi, IRQF_SHARED, vdev->desc[0], vdev); @@ -2745,6 +2864,10 @@ static int vxge_open(struct net_device *dev) } netif_tx_start_all_queues(vdev->ndev); + + /* configure CI */ + vxge_config_ci_for_tti_rti(vdev); + goto out0; out2: @@ -3264,19 +3387,6 @@ static const struct net_device_ops vxge_netdev_ops = { #endif }; -static int __devinit vxge_device_revision(struct vxgedev *vdev) -{ - int ret; - u8 revision; - - ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision); - if (ret) - return -EIO; - - vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION); - return 0; -} - static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, struct vxge_config *config, int high_dma, int no_of_vpath, @@ -3316,10 +3426,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, memcpy(&vdev->config, config, sizeof(struct vxge_config)); vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ vdev->rx_hwts = 0; - - ret = vxge_device_revision(vdev); - if (ret < 0) - goto _out1; + vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION); SET_NETDEV_DEV(ndev, &vdev->pdev->dev); @@ -3348,7 +3455,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, vxge_debug_init(VXGE_ERR, "%s: vpath memory allocation failed", vdev->ndev->name); - ret = -ENODEV; + ret = -ENOMEM; goto _out1; } @@ -3369,11 +3476,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, if (vdev->config.gro_enable) ndev->features |= NETIF_F_GRO; - if (register_netdev(ndev)) { + ret = register_netdev(ndev); + if (ret) { vxge_debug_init(vxge_hw_device_trace_level_get(hldev), "%s: %s : device registration failed!", ndev->name, __func__); - ret = -ENODEV; goto _out2; } @@ -3444,6 +3551,11 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev) /* in 2.6 will call stop() if device is up */ unregister_netdev(dev); + kfree(vdev->vpaths); + + /* we are safe to free it now */ + free_netdev(dev); + vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", buf); vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, @@ -3799,7 +3911,7 @@ static void __devinit vxge_device_config_init( break; case MSI_X: - device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; + device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT; break; } @@ -4335,10 +4447,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) goto _exit1; } - if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) { + ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME); + if (ret) { vxge_debug_init(VXGE_ERR, "%s : request regions failed", __func__); - ret = -ENODEV; goto _exit1; } @@ -4446,7 +4558,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) if (!img[i].is_valid) break; vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version " - "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i, + "%d.%d.%d.%d", VXGE_DRIVER_NAME, i, VXGE_EPROM_IMG_MAJOR(img[i].version), VXGE_EPROM_IMG_MINOR(img[i].version), VXGE_EPROM_IMG_FIX(img[i].version), @@ -4643,8 +4755,9 @@ _exit6: _exit5: vxge_device_unregister(hldev); _exit4: - pci_disable_sriov(pdev); + pci_set_drvdata(pdev, NULL); vxge_hw_device_terminate(hldev); + pci_disable_sriov(pdev); _exit3: iounmap(attr.bar0); _exit2: @@ -4655,7 +4768,7 @@ _exit0: kfree(ll_config); kfree(device_config); driver_config->config_dev_cnt--; - pci_set_drvdata(pdev, NULL); + driver_config->total_dev_cnt--; return ret; } @@ -4668,45 +4781,34 @@ _exit0: static void __devexit vxge_remove(struct pci_dev *pdev) { struct __vxge_hw_device *hldev; - struct vxgedev *vdev = NULL; - struct net_device *dev; - int i = 0; + struct vxgedev *vdev; + int i; hldev = pci_get_drvdata(pdev); - if (hldev == NULL) return; - dev = hldev->ndev; - vdev = netdev_priv(dev); + vdev = netdev_priv(hldev->ndev); vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__); - vxge_debug_init(vdev->level_trace, "%s : removing PCI device...", __func__); - vxge_device_unregister(hldev); - for (i = 0; i < vdev->no_of_vpath; i++) { + for (i = 0; i < vdev->no_of_vpath; i++) vxge_free_mac_add_list(&vdev->vpaths[i]); - vdev->vpaths[i].mcast_addr_cnt = 0; - vdev->vpaths[i].mac_addr_cnt = 0; - } - - kfree(vdev->vpaths); + vxge_device_unregister(hldev); + pci_set_drvdata(pdev, NULL); + /* Do not call pci_disable_sriov here, as it will break child devices */ + vxge_hw_device_terminate(hldev); iounmap(vdev->bar0); - - /* we are safe to free it now */ - free_netdev(dev); + pci_release_region(pdev, 0); + pci_disable_device(pdev); + driver_config->config_dev_cnt--; + driver_config->total_dev_cnt--; vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered", __func__, __LINE__); - - vxge_hw_device_terminate(hldev); - - pci_disable_device(pdev); - pci_release_region(pdev, 0); - pci_set_drvdata(pdev, NULL); vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__, __LINE__); } |