diff options
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 447 |
1 files changed, 233 insertions, 214 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 8be6faee43e..02833af8a0b 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -31,7 +31,7 @@ char e1000_driver_name[] = "e1000"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; -#define DRV_VERSION "7.3.21-k5-NAPI" +#define DRV_VERSION "7.3.21-k6-NAPI" const char e1000_driver_version[] = DRV_VERSION; static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; @@ -214,6 +214,17 @@ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); /** + * e1000_get_hw_dev - return device + * used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** * e1000_init_module - Driver Registration Routine * * e1000_init_module is the first routine called when the driver is @@ -223,18 +234,17 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static int __init e1000_init_module(void) { int ret; - printk(KERN_INFO "%s - version %s\n", - e1000_driver_string, e1000_driver_version); + pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); - printk(KERN_INFO "%s\n", e1000_copyright); + pr_info("%s\n", e1000_copyright); ret = pci_register_driver(&e1000_driver); if (copybreak != COPYBREAK_DEFAULT) { if (copybreak == 0) - printk(KERN_INFO "e1000: copybreak disabled\n"); + pr_info("copybreak disabled\n"); else - printk(KERN_INFO "e1000: copybreak enabled for " - "packets <= %u bytes\n", copybreak); + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); } return ret; } @@ -265,8 +275,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, netdev); if (err) { - DPRINTK(PROBE, ERR, - "Unable to allocate interrupt Error: %d\n", err); + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); } return err; @@ -383,8 +392,6 @@ static void e1000_configure(struct e1000_adapter *adapter) adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); } - - adapter->tx_queue_len = netdev->tx_queue_len; } int e1000_up(struct e1000_adapter *adapter) @@ -503,7 +510,6 @@ void e1000_down(struct e1000_adapter *adapter) del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netdev->tx_queue_len = adapter->tx_queue_len; adapter->link_speed = 0; adapter->link_duplex = 0; netif_carrier_off(netdev); @@ -651,7 +657,7 @@ void e1000_reset(struct e1000_adapter *adapter) ew32(WUC, 0); if (e1000_init_hw(hw)) - DPRINTK(PROBE, ERR, "Hardware Error\n"); + e_dev_err("Hardware Error\n"); e1000_update_mng_vlan(adapter); /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ @@ -692,8 +698,7 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter) data = kmalloc(eeprom.len, GFP_KERNEL); if (!data) { - printk(KERN_ERR "Unable to allocate memory to dump EEPROM" - " data\n"); + pr_err("Unable to allocate memory to dump EEPROM data\n"); return; } @@ -705,30 +710,25 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter) csum_new += data[i] + (data[i + 1] << 8); csum_new = EEPROM_SUM - csum_new; - printk(KERN_ERR "/*********************/\n"); - printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old); - printk(KERN_ERR "Calculated : 0x%04x\n", csum_new); + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); - printk(KERN_ERR "Offset Values\n"); - printk(KERN_ERR "======== ======\n"); + pr_err("Offset Values\n"); + pr_err("======== ======\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); - printk(KERN_ERR "Include this output when contacting your support " - "provider.\n"); - printk(KERN_ERR "This is not a software error! Something bad " - "happened to your hardware or\n"); - printk(KERN_ERR "EEPROM image. Ignoring this " - "problem could result in further problems,\n"); - printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n"); - printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, " - "which is invalid\n"); - printk(KERN_ERR "and requires you to set the proper MAC " - "address manually before continuing\n"); - printk(KERN_ERR "to enable this network device.\n"); - printk(KERN_ERR "Please inspect the EEPROM dump and report the issue " - "to your hardware vendor\n"); - printk(KERN_ERR "or Intel Customer Support.\n"); - printk(KERN_ERR "/*********************/\n"); + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); kfree(data); } @@ -826,16 +826,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (err) return err; - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); if (err) { - E1000_ERR("No usable DMA configuration, " - "aborting\n"); + pr_err("No usable DMA config, aborting\n"); goto err_dma; } } @@ -925,7 +925,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* initialize eeprom parameters */ if (e1000_init_eeprom_params(hw)) { - E1000_ERR("EEPROM initialization failed\n"); + e_err(probe, "EEPROM initialization failed\n"); goto err_eeprom; } @@ -936,7 +936,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* make sure the EEPROM is good */ if (e1000_validate_eeprom_checksum(hw) < 0) { - DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); e1000_dump_eeprom(adapter); /* * set MAC address to all zeroes to invalidate and temporary @@ -950,14 +950,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev, } else { /* copy the MAC address out of the EEPROM */ if (e1000_read_mac_addr(hw)) - DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); + e_err(probe, "EEPROM Read Error\n"); } /* don't block initalization here due to bad MAC address */ memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->perm_addr)) - DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); + e_err(probe, "Invalid MAC Address\n"); e1000_get_bus_info(hw); @@ -1038,17 +1038,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev, adapter->wol = adapter->eeprom_wol; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); - /* print bus type/speed/width info */ - DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", - ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), - ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" : - (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : - (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" : - (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), - ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit")); - - printk("%pM\n", netdev->dev_addr); - /* reset the hardware with the new settings */ e1000_reset(adapter); @@ -1057,10 +1046,20 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (err) goto err_register; + /* print bus type/speed/width info */ + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), + ((hw->bus_speed == e1000_bus_speed_133) ? 133 : + (hw->bus_speed == e1000_bus_speed_120) ? 120 : + (hw->bus_speed == e1000_bus_speed_100) ? 100 : + (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), + ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), + netdev->dev_addr); + /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); cards_found++; return 0; @@ -1160,7 +1159,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) /* identify the MAC */ if (e1000_set_mac_type(hw)) { - DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); + e_err(probe, "Unknown MAC Type\n"); return -EIO; } @@ -1193,7 +1192,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) adapter->num_rx_queues = 1; if (e1000_alloc_queues(adapter)) { - DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); + e_err(probe, "Unable to allocate memory for queues\n"); return -ENOMEM; } @@ -1387,8 +1386,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, size = sizeof(struct e1000_buffer) * txdr->count; txdr->buffer_info = vmalloc(size); if (!txdr->buffer_info) { - DPRINTK(PROBE, ERR, - "Unable to allocate memory for the transmit descriptor ring\n"); + e_err(probe, "Unable to allocate memory for the Tx descriptor " + "ring\n"); return -ENOMEM; } memset(txdr->buffer_info, 0, size); @@ -1398,12 +1397,13 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, txdr->size = txdr->count * sizeof(struct e1000_tx_desc); txdr->size = ALIGN(txdr->size, 4096); - txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); if (!txdr->desc) { setup_tx_desc_die: vfree(txdr->buffer_info); - DPRINTK(PROBE, ERR, - "Unable to allocate memory for the transmit descriptor ring\n"); + e_err(probe, "Unable to allocate memory for the Tx descriptor " + "ring\n"); return -ENOMEM; } @@ -1411,29 +1411,32 @@ setup_tx_desc_die: if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { void *olddesc = txdr->desc; dma_addr_t olddma = txdr->dma; - DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes " - "at %p\n", txdr->size, txdr->desc); + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); /* Try again, without freeing the previous */ - txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); /* Failed allocation, critical failure */ if (!txdr->desc) { - pci_free_consistent(pdev, txdr->size, olddesc, olddma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); goto setup_tx_desc_die; } if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { /* give up */ - pci_free_consistent(pdev, txdr->size, txdr->desc, - txdr->dma); - pci_free_consistent(pdev, txdr->size, olddesc, olddma); - DPRINTK(PROBE, ERR, - "Unable to allocate aligned memory " - "for the transmit descriptor ring\n"); + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); vfree(txdr->buffer_info); return -ENOMEM; } else { /* Free old allocation, new allocation was successful */ - pci_free_consistent(pdev, txdr->size, olddesc, olddma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); } } memset(txdr->desc, 0, txdr->size); @@ -1459,8 +1462,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); if (err) { - DPRINTK(PROBE, ERR, - "Allocation for Tx Queue %u failed\n", i); + e_err(probe, "Allocation for Tx Queue %u failed\n", i); for (i-- ; i >= 0; i--) e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); @@ -1580,8 +1582,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, size = sizeof(struct e1000_buffer) * rxdr->count; rxdr->buffer_info = vmalloc(size); if (!rxdr->buffer_info) { - DPRINTK(PROBE, ERR, - "Unable to allocate memory for the receive descriptor ring\n"); + e_err(probe, "Unable to allocate memory for the Rx descriptor " + "ring\n"); return -ENOMEM; } memset(rxdr->buffer_info, 0, size); @@ -1593,11 +1595,12 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, rxdr->size = rxdr->count * desc_len; rxdr->size = ALIGN(rxdr->size, 4096); - rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); if (!rxdr->desc) { - DPRINTK(PROBE, ERR, - "Unable to allocate memory for the receive descriptor ring\n"); + e_err(probe, "Unable to allocate memory for the Rx descriptor " + "ring\n"); setup_rx_desc_die: vfree(rxdr->buffer_info); return -ENOMEM; @@ -1607,31 +1610,33 @@ setup_rx_desc_die: if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { void *olddesc = rxdr->desc; dma_addr_t olddma = rxdr->dma; - DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes " - "at %p\n", rxdr->size, rxdr->desc); + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); /* Try again, without freeing the previous */ - rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); /* Failed allocation, critical failure */ if (!rxdr->desc) { - pci_free_consistent(pdev, rxdr->size, olddesc, olddma); - DPRINTK(PROBE, ERR, - "Unable to allocate memory " - "for the receive descriptor ring\n"); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate memory for the Rx " + "descriptor ring\n"); goto setup_rx_desc_die; } if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { /* give up */ - pci_free_consistent(pdev, rxdr->size, rxdr->desc, - rxdr->dma); - pci_free_consistent(pdev, rxdr->size, olddesc, olddma); - DPRINTK(PROBE, ERR, - "Unable to allocate aligned memory " - "for the receive descriptor ring\n"); + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); goto setup_rx_desc_die; } else { /* Free old allocation, new allocation was successful */ - pci_free_consistent(pdev, rxdr->size, olddesc, olddma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); } } memset(rxdr->desc, 0, rxdr->size); @@ -1658,8 +1663,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); if (err) { - DPRINTK(PROBE, ERR, - "Allocation for Rx Queue %u failed\n", i); + e_err(probe, "Allocation for Rx Queue %u failed\n", i); for (i-- ; i >= 0; i--) e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); @@ -1807,7 +1811,8 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter, vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; - pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); tx_ring->desc = NULL; } @@ -1832,12 +1837,12 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, { if (buffer_info->dma) { if (buffer_info->mapped_as_page) - pci_unmap_page(adapter->pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_TODEVICE); + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); else - pci_unmap_single(adapter->pdev, buffer_info->dma, + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, buffer_info->length, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { @@ -1915,7 +1920,8 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter, vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; - pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); rx_ring->desc = NULL; } @@ -1955,14 +1961,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->dma && adapter->clean_rx == e1000_clean_rx_irq) { - pci_unmap_single(pdev, buffer_info->dma, + dma_unmap_single(&pdev->dev, buffer_info->dma, buffer_info->length, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); } else if (buffer_info->dma && adapter->clean_rx == e1000_clean_jumbo_rx_irq) { - pci_unmap_page(pdev, buffer_info->dma, - buffer_info->length, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_FROM_DEVICE); } buffer_info->dma = 0; @@ -2101,7 +2107,6 @@ static void e1000_set_rx_mode(struct net_device *netdev) struct e1000_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; bool use_uc = false; - struct dev_addr_list *mc_ptr; u32 rctl; u32 hash_value; int i, rar_entries = E1000_RAR_ENTRIES; @@ -2109,7 +2114,7 @@ static void e1000_set_rx_mode(struct net_device *netdev) u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); if (!mcarray) { - DPRINTK(PROBE, ERR, "memory allocation failed\n"); + e_err(probe, "memory allocation failed\n"); return; } @@ -2159,19 +2164,17 @@ static void e1000_set_rx_mode(struct net_device *netdev) e1000_rar_set(hw, ha->addr, i++); } - WARN_ON(i == rar_entries); - - netdev_for_each_mc_addr(mc_ptr, netdev) { + netdev_for_each_mc_addr(ha, netdev) { if (i == rar_entries) { /* load any remaining addresses into the hash table */ u32 hash_reg, hash_bit, mta; - hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr); + hash_value = e1000_hash_mc_addr(hw, ha->addr); hash_reg = (hash_value >> 5) & 0x7F; hash_bit = hash_value & 0x1F; mta = (1 << hash_bit); mcarray[hash_reg] |= mta; } else { - e1000_rar_set(hw, mc_ptr->da_addr, i++); + e1000_rar_set(hw, ha->addr, i++); } } @@ -2305,30 +2308,26 @@ static void e1000_watchdog(unsigned long data) &adapter->link_duplex); ctrl = er32(CTRL); - printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, " - "Flow Control: %s\n", - netdev->name, - adapter->link_speed, - adapter->link_duplex == FULL_DUPLEX ? - "Full Duplex" : "Half Duplex", - ((ctrl & E1000_CTRL_TFCE) && (ctrl & - E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & - E1000_CTRL_RFCE) ? "RX" : ((ctrl & - E1000_CTRL_TFCE) ? "TX" : "None" ))); - - /* tweak tx_queue_len according to speed/duplex - * and adjust the timeout factor */ - netdev->tx_queue_len = adapter->tx_queue_len; + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: txb2b = false; - netdev->tx_queue_len = 10; adapter->tx_timeout_factor = 16; break; case SPEED_100: txb2b = false; - netdev->tx_queue_len = 100; /* maybe add some timeout factor ? */ break; } @@ -2348,8 +2347,8 @@ static void e1000_watchdog(unsigned long data) if (netif_carrier_ok(netdev)) { adapter->link_speed = 0; adapter->link_duplex = 0; - printk(KERN_INFO "e1000: %s NIC Link is Down\n", - netdev->name); + pr_info("%s NIC Link is Down\n", + netdev->name); netif_carrier_off(netdev); if (!test_bit(__E1000_DOWN, &adapter->flags)) @@ -2388,6 +2387,22 @@ link_up: } } + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* + * Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + /* Cause software interrupt to ensure rx ring is cleaned */ ew32(ICS, E1000_ICS_RXDMT0); @@ -2532,8 +2547,6 @@ set_itr_now: adapter->itr = new_itr; ew32(ITR, 1000000000 / (new_itr * 256)); } - - return; } #define E1000_TX_FLAGS_CSUM 0x00000001 @@ -2639,8 +2652,8 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, break; default: if (unlikely(net_ratelimit())) - DPRINTK(DRV, WARNING, - "checksum_partial proto=%x!\n", skb->protocol); + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); break; } @@ -2722,9 +2735,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter, /* set time_stamp *before* dma to help avoid a possible race */ buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = false; - buffer_info->dma = pci_map_single(pdev, skb->data + offset, - size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, buffer_info->dma)) + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; buffer_info->next_to_watch = i; @@ -2768,10 +2782,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter, buffer_info->length = size; buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = true; - buffer_info->dma = pci_map_page(pdev, frag->page, + buffer_info->dma = dma_map_page(&pdev->dev, frag->page, offset, size, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, buffer_info->dma)) + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; buffer_info->next_to_watch = i; @@ -2937,7 +2951,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; - unsigned int len = skb->len - skb->data_len; + unsigned int len = skb_headlen(skb); unsigned int nr_frags; unsigned int mss; int count = 0; @@ -2983,12 +2997,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* fall through */ pull_size = min((unsigned int)4, skb->data_len); if (!__pskb_pull_tail(skb, pull_size)) { - DPRINTK(DRV, ERR, - "__pskb_pull_tail failed.\n"); + e_err(drv, "__pskb_pull_tail " + "failed.\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - len = skb->len - skb->data_len; + len = skb_headlen(skb); break; default: /* do nothing */ @@ -3132,7 +3146,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { - DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); + e_err(probe, "Invalid MTU setting\n"); return -EINVAL; } @@ -3140,7 +3154,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) switch (hw->mac_type) { case e1000_undefined ... e1000_82542_rev2_1: if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { - DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); + e_err(probe, "Jumbo Frames not supported.\n"); return -EINVAL; } break; @@ -3178,8 +3192,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; - printk(KERN_INFO "e1000: %s changing MTU from %d to %d\n", - netdev->name, netdev->mtu, new_mtu); + pr_info("%s changing MTU from %d to %d\n", + netdev->name, netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) @@ -3492,17 +3506,17 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, !(er32(STATUS) & E1000_STATUS_TXOFF)) { /* detected Tx unit hang */ - DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" - " Tx Queue <%lu>\n" - " TDH <%x>\n" - " TDT <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "buffer_info[next_to_clean]\n" - " time_stamp <%lx>\n" - " next_to_watch <%x>\n" - " jiffies <%lx>\n" - " next_to_watch.status <%x>\n", + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", (unsigned long)((tx_ring - adapter->tx_ring) / sizeof(struct e1000_tx_ring)), readl(hw->hw_addr + tx_ring->tdh), @@ -3642,8 +3656,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, cleaned = true; cleaned_count++; - pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, buffer_info->dma, + buffer_info->length, DMA_FROM_DEVICE); buffer_info->dma = 0; length = le16_to_cpu(rx_desc->length); @@ -3741,7 +3755,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* eth type trans needs skb->data to point to something */ if (!pskb_may_pull(skb, ETH_HLEN)) { - DPRINTK(DRV, ERR, "pskb_may_pull failed.\n"); + e_err(drv, "pskb_may_pull failed.\n"); dev_kfree_skb(skb); goto next_desc; } @@ -3776,6 +3790,31 @@ next_desc: return cleaned; } +/* + * this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static void e1000_check_copybreak(struct net_device *netdev, + struct e1000_buffer *buffer_info, + u32 length, struct sk_buff **skb) +{ + struct sk_buff *new_skb; + + if (length > copybreak) + return; + + new_skb = netdev_alloc_skb_ip_align(netdev, length); + if (!new_skb) + return; + + skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, + (*skb)->data - NET_IP_ALIGN, + length + NET_IP_ALIGN); + /* save the skb in buffer_info as good */ + buffer_info->skb = *skb; + *skb = new_skb; +} + /** * e1000_clean_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure @@ -3825,8 +3864,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, cleaned = true; cleaned_count++; - pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, buffer_info->dma, + buffer_info->length, DMA_FROM_DEVICE); buffer_info->dma = 0; length = le16_to_cpu(rx_desc->length); @@ -3841,8 +3880,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (adapter->discarding) { /* All receives must fit into a single buffer */ - E1000_DBG("%s: Receive packet consumed multiple" - " buffers\n", netdev->name); + e_dbg("Receive packet consumed multiple buffers\n"); /* recycle */ buffer_info->skb = skb; if (status & E1000_RXD_STAT_EOP) @@ -3875,26 +3913,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, total_rx_bytes += length; total_rx_packets++; - /* code added for copybreak, this should improve - * performance for small packets with large amounts - * of reassembly being done in the stack */ - if (length < copybreak) { - struct sk_buff *new_skb = - netdev_alloc_skb_ip_align(netdev, length); - if (new_skb) { - skb_copy_to_linear_data_offset(new_skb, - -NET_IP_ALIGN, - (skb->data - - NET_IP_ALIGN), - (length + - NET_IP_ALIGN)); - /* save the skb in buffer_info as good */ - buffer_info->skb = skb; - skb = new_skb; - } - /* else just continue with the old one */ - } - /* end copybreak code */ + e1000_check_copybreak(netdev, buffer_info, length, &skb); + skb_put(skb, length); /* Receive Checksum Offload */ @@ -3972,8 +3992,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, /* Fix for errata 23, can't cross 64kB boundary */ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { struct sk_buff *oldskb = skb; - DPRINTK(PROBE, ERR, "skb align check failed: %u bytes " - "at %p\n", bufsz, skb->data); + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, skb->data); /* Try again, without freeing the previous */ skb = netdev_alloc_skb_ip_align(netdev, bufsz); /* Failed allocation, critical failure */ @@ -4006,11 +4026,11 @@ check_page: } if (!buffer_info->dma) { - buffer_info->dma = pci_map_page(pdev, + buffer_info->dma = dma_map_page(&pdev->dev, buffer_info->page, 0, - buffer_info->length, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, buffer_info->dma)) { + buffer_info->length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { put_page(buffer_info->page); dev_kfree_skb(skb); buffer_info->page = NULL; @@ -4081,8 +4101,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, /* Fix for errata 23, can't cross 64kB boundary */ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { struct sk_buff *oldskb = skb; - DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " - "at %p\n", bufsz, skb->data); + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, skb->data); /* Try again, without freeing the previous */ skb = netdev_alloc_skb_ip_align(netdev, bufsz); /* Failed allocation, critical failure */ @@ -4106,11 +4126,11 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, buffer_info->skb = skb; buffer_info->length = adapter->rx_buffer_len; map_skb: - buffer_info->dma = pci_map_single(pdev, + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, buffer_info->length, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, buffer_info->dma)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { dev_kfree_skb(skb); buffer_info->skb = NULL; buffer_info->dma = 0; @@ -4127,16 +4147,15 @@ map_skb: if (!e1000_check_64k_bound(adapter, (void *)(unsigned long)buffer_info->dma, adapter->rx_buffer_len)) { - DPRINTK(RX_ERR, ERR, - "dma align check failed: %u bytes at %p\n", - adapter->rx_buffer_len, - (void *)(unsigned long)buffer_info->dma); + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); dev_kfree_skb(skb); buffer_info->skb = NULL; - pci_unmap_single(pdev, buffer_info->dma, + dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_buffer_len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); buffer_info->dma = 0; adapter->alloc_rx_buff_failed++; @@ -4342,7 +4361,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw) int ret_val = pci_set_mwi(adapter->pdev); if (ret_val) - DPRINTK(PROBE, ERR, "Error in setting MWI\n"); + e_err(probe, "Error in setting MWI\n"); } void e1000_pci_clear_mwi(struct e1000_hw *hw) @@ -4473,7 +4492,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) /* Fiber NICs only allow 1000 gbps Full duplex */ if ((hw->media_type == e1000_media_type_fiber) && spddplx != (SPEED_1000 + DUPLEX_FULL)) { - DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); + e_err(probe, "Unsupported Speed/Duplex configuration\n"); return -EINVAL; } @@ -4496,7 +4515,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: - DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); + e_err(probe, "Unsupported Speed/Duplex configuration\n"); return -EINVAL; } return 0; @@ -4619,7 +4638,7 @@ static int e1000_resume(struct pci_dev *pdev) else err = pci_enable_device_mem(pdev); if (err) { - printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n"); + pr_err("Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); @@ -4722,7 +4741,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) else err = pci_enable_device_mem(pdev); if (err) { - printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n"); + pr_err("Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); @@ -4753,7 +4772,7 @@ static void e1000_io_resume(struct pci_dev *pdev) if (netif_running(netdev)) { if (e1000_up(adapter)) { - printk("e1000: can't bring device back up after reset\n"); + pr_info("can't bring device back up after reset\n"); return; } } |