From 16ecf85a5ca7345efbcbb2de76607db0f7ec9049 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 27 Aug 2008 01:14:46 -0700 Subject: e1000: fix stack size Here's the patch. It shrinks the stack from 1152 bytes to 192 bytes (the first version, that only did the e1000_option part, got it down to 600 bytes). About half comes from not using multiple "e1000_option" structures, the other half comes from turning the "e1000_opt_list[]" arrays into "static const" instead, so that gcc doesn't copy them onto the stack. Signed-off-by: Linus Torvalds Reveiewed-by: Auke Kok Tested-by: Emil Tantilov Signed-off-by: Jeff Kirsher Signed-off-by: Linus Torvalds --- drivers/net/e1000/e1000_param.c | 81 +++++++++++++++++++++++------------------ 1 file changed, 45 insertions(+), 36 deletions(-) (limited to 'drivers/net/e1000') diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c index b9f90a5d3d4..213437d1315 100644 --- a/drivers/net/e1000/e1000_param.c +++ b/drivers/net/e1000/e1000_param.c @@ -208,7 +208,7 @@ struct e1000_option { } r; struct { /* list_option info */ int nr; - struct e1000_opt_list { int i; char *str; } *p; + const struct e1000_opt_list { int i; char *str; } *p; } l; } arg; }; @@ -242,7 +242,7 @@ static int __devinit e1000_validate_option(unsigned int *value, break; case list_option: { int i; - struct e1000_opt_list *ent; + const struct e1000_opt_list *ent; for (i = 0; i < opt->arg.l.nr; i++) { ent = &opt->arg.l.p[i]; @@ -279,7 +279,9 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter); void __devinit e1000_check_options(struct e1000_adapter *adapter) { + struct e1000_option opt; int bd = adapter->bd_number; + if (bd >= E1000_MAX_NIC) { DPRINTK(PROBE, NOTICE, "Warning: no configuration for board #%i\n", bd); @@ -287,19 +289,21 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) } { /* Transmit Descriptor Count */ - struct e1000_option opt = { + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { .type = range_option, .name = "Transmit Descriptors", .err = "using default of " __MODULE_STRING(E1000_DEFAULT_TXD), .def = E1000_DEFAULT_TXD, - .arg = { .r = { .min = E1000_MIN_TXD }} + .arg = { .r = { + .min = E1000_MIN_TXD, + .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD + }} }; - struct e1000_tx_ring *tx_ring = adapter->tx_ring; - int i; - e1000_mac_type mac_type = adapter->hw.mac_type; - opt.arg.r.max = mac_type < e1000_82544 ? - E1000_MAX_TXD : E1000_MAX_82544_TXD; if (num_TxDescriptors > bd) { tx_ring->count = TxDescriptors[bd]; @@ -313,19 +317,21 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) tx_ring[i].count = tx_ring->count; } { /* Receive Descriptor Count */ - struct e1000_option opt = { + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { .type = range_option, .name = "Receive Descriptors", .err = "using default of " __MODULE_STRING(E1000_DEFAULT_RXD), .def = E1000_DEFAULT_RXD, - .arg = { .r = { .min = E1000_MIN_RXD }} + .arg = { .r = { + .min = E1000_MIN_RXD, + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD + }} }; - struct e1000_rx_ring *rx_ring = adapter->rx_ring; - int i; - e1000_mac_type mac_type = adapter->hw.mac_type; - opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : - E1000_MAX_82544_RXD; if (num_RxDescriptors > bd) { rx_ring->count = RxDescriptors[bd]; @@ -339,7 +345,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) rx_ring[i].count = rx_ring->count; } { /* Checksum Offload Enable/Disable */ - struct e1000_option opt = { + opt = (struct e1000_option) { .type = enable_option, .name = "Checksum Offload", .err = "defaulting to Enabled", @@ -363,7 +369,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) { E1000_FC_FULL, "Flow Control Enabled" }, { E1000_FC_DEFAULT, "Flow Control Hardware Default" }}; - struct e1000_option opt = { + opt = (struct e1000_option) { .type = list_option, .name = "Flow Control", .err = "reading default settings from EEPROM", @@ -381,7 +387,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) } } { /* Transmit Interrupt Delay */ - struct e1000_option opt = { + opt = (struct e1000_option) { .type = range_option, .name = "Transmit Interrupt Delay", .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), @@ -399,7 +405,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) } } { /* Transmit Absolute Interrupt Delay */ - struct e1000_option opt = { + opt = (struct e1000_option) { .type = range_option, .name = "Transmit Absolute Interrupt Delay", .err = "using default of " __MODULE_STRING(DEFAULT_TADV), @@ -417,7 +423,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) } } { /* Receive Interrupt Delay */ - struct e1000_option opt = { + opt = (struct e1000_option) { .type = range_option, .name = "Receive Interrupt Delay", .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), @@ -435,7 +441,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) } } { /* Receive Absolute Interrupt Delay */ - struct e1000_option opt = { + opt = (struct e1000_option) { .type = range_option, .name = "Receive Absolute Interrupt Delay", .err = "using default of " __MODULE_STRING(DEFAULT_RADV), @@ -453,7 +459,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) } } { /* Interrupt Throttling Rate */ - struct e1000_option opt = { + opt = (struct e1000_option) { .type = range_option, .name = "Interrupt Throttling Rate (ints/sec)", .err = "using default of " __MODULE_STRING(DEFAULT_ITR), @@ -497,7 +503,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) } } { /* Smart Power Down */ - struct e1000_option opt = { + opt = (struct e1000_option) { .type = enable_option, .name = "PHY Smart Power Down", .err = "defaulting to Disabled", @@ -513,7 +519,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) } } { /* Kumeran Lock Loss Workaround */ - struct e1000_option opt = { + opt = (struct e1000_option) { .type = enable_option, .name = "Kumeran Lock Loss Workaround", .err = "defaulting to Enabled", @@ -578,16 +584,18 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter) static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) { + struct e1000_option opt; unsigned int speed, dplx, an; int bd = adapter->bd_number; { /* Speed */ - struct e1000_opt_list speed_list[] = {{ 0, "" }, - { SPEED_10, "" }, - { SPEED_100, "" }, - { SPEED_1000, "" }}; + static const struct e1000_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; - struct e1000_option opt = { + opt = (struct e1000_option) { .type = list_option, .name = "Speed", .err = "parameter ignored", @@ -604,11 +612,12 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) } } { /* Duplex */ - struct e1000_opt_list dplx_list[] = {{ 0, "" }, - { HALF_DUPLEX, "" }, - { FULL_DUPLEX, "" }}; + static const struct e1000_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; - struct e1000_option opt = { + opt = (struct e1000_option) { .type = list_option, .name = "Duplex", .err = "parameter ignored", @@ -637,7 +646,7 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) "parameter ignored\n"); adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; } else { /* Autoneg */ - struct e1000_opt_list an_list[] = + static const struct e1000_opt_list an_list[] = #define AA "AutoNeg advertising " {{ 0x01, AA "10/HD" }, { 0x02, AA "10/FD" }, @@ -671,7 +680,7 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; - struct e1000_option opt = { + opt = (struct e1000_option) { .type = list_option, .name = "AutoNeg", .err = "parameter ignored", -- cgit v1.2.3-70-g09d2 From 78566fecbb12a7616ae9a88b2ffbc8062c4a89e3 Mon Sep 17 00:00:00 2001 From: Christopher Li Date: Fri, 5 Sep 2008 14:04:05 -0700 Subject: e1000: prevent corruption of EEPROM/NVM Andrey reports e1000 corruption, and that a patch in vmware's ESX fixed it. The EEPROM corruption is triggered by concurrent access of the EEPROM read/write. Putting a lock around it solve the problem. [akpm@linux-foundation.org: use DEFINE_SPINLOCK to avoid confusing lockdep] Signed-off-by: Christopher Li Reported-by: Andrey Borzenkov Cc: Zach Amsden Cc: Pratap Subrahmanyam Cc: Jeff Kirsher Cc: Jesse Brandeburg Cc: Bruce Allan Cc: PJ Waskiewicz Cc: John Ronciak Cc: Jeff Garzik Signed-off-by: Andrew Morton Signed-off-by: Jeff Garzik --- drivers/net/e1000/e1000_hw.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'drivers/net/e1000') diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 9d6edf3e73f..d04eef53571 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -144,6 +144,8 @@ static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer); static u8 e1000_calculate_mng_checksum(char *buffer, u32 length); static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex); static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw); +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); /* IGP cable length table */ static const @@ -168,6 +170,8 @@ u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, 124}; +static DEFINE_SPINLOCK(e1000_eeprom_lock); + /****************************************************************************** * Set the phy type member in the hw struct. * @@ -4903,6 +4907,15 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) * words - number of words to read *****************************************************************************/ s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + spin_lock(&e1000_eeprom_lock); + ret = e1000_do_read_eeprom(hw, offset, words, data); + spin_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_eeprom_info *eeprom = &hw->eeprom; u32 i = 0; @@ -5235,6 +5248,16 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) * EEPROM will most likely contain an invalid checksum. *****************************************************************************/ s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + spin_lock(&e1000_eeprom_lock); + ret = e1000_do_write_eeprom(hw, offset, words, data); + spin_unlock(&e1000_eeprom_lock); + return ret; +} + + +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_eeprom_info *eeprom = &hw->eeprom; s32 status = 0; -- cgit v1.2.3-70-g09d2 From 630b25cdf4e3f8c0a11eb04fc8436cc36653cd58 Mon Sep 17 00:00:00 2001 From: "Brandeburg, Jesse" Date: Tue, 16 Sep 2008 13:01:28 -0700 Subject: e1000: remove unused Kconfig option for disabling packet split Since the e1000/e1000e split, no hardware supported by e1000 supports packet split, just remove the Kconfig option and associated code from the driver. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Garzik --- drivers/net/Kconfig | 9 - drivers/net/e1000/e1000.h | 17 -- drivers/net/e1000/e1000_main.c | 416 +---------------------------------------- 3 files changed, 9 insertions(+), 433 deletions(-) (limited to 'drivers/net/e1000') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f6fd0be9ef5..2d6a060d92e 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1938,15 +1938,6 @@ config E1000 To compile this driver as a module, choose M here. The module will be called e1000. -config E1000_DISABLE_PACKET_SPLIT - bool "Disable Packet Split for PCI express adapters" - depends on E1000 - help - Say Y here if you want to use the legacy receive path for PCI express - hardware. - - If in doubt, say N. - config E1000E tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" depends on PCI && (!SPARC32 || BROKEN) diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 19e317eaf5b..62f62970f97 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h @@ -155,8 +155,6 @@ do { \ #endif #define E1000_MNG_VLAN_NONE (-1) -/* Number of packet split data buffers (not including the header buffer) */ -#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ @@ -168,14 +166,6 @@ struct e1000_buffer { u16 next_to_watch; }; -struct e1000_ps_page { - struct page *ps_page[PS_PAGE_BUFFERS]; -}; - -struct e1000_ps_page_dma { - u64 ps_page_dma[PS_PAGE_BUFFERS]; -}; - struct e1000_tx_ring { /* pointer to the descriptor ring memory */ void *desc; @@ -213,9 +203,6 @@ struct e1000_rx_ring { unsigned int next_to_clean; /* array of buffer information structs */ struct e1000_buffer *buffer_info; - /* arrays of page information for packet split */ - struct e1000_ps_page *ps_page; - struct e1000_ps_page_dma *ps_page_dma; /* cpu for rx queue */ int cpu; @@ -228,8 +215,6 @@ struct e1000_rx_ring { ((((R)->next_to_clean > (R)->next_to_use) \ ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) -#define E1000_RX_DESC_PS(R, i) \ - (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) #define E1000_RX_DESC_EXT(R, i) \ (&(((union e1000_rx_desc_extended *)((R).desc))[i])) #define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) @@ -311,10 +296,8 @@ struct e1000_adapter { u32 rx_int_delay; u32 rx_abs_int_delay; bool rx_csum; - unsigned int rx_ps_pages; u32 gorcl; u64 gorcl_old; - u16 rx_ps_bsize0; /* OS defined structs */ struct net_device *netdev; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index ad6da7b67e5..2ab44db29fa 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -137,15 +137,9 @@ static int e1000_clean(struct napi_struct *napi, int budget); static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int *work_done, int work_to_do); -static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int *work_done, int work_to_do); static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int cleaned_count); -static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int cleaned_count); static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); @@ -1331,7 +1325,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; - adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; @@ -1815,26 +1808,6 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, } memset(rxdr->buffer_info, 0, size); - rxdr->ps_page = kcalloc(rxdr->count, sizeof(struct e1000_ps_page), - GFP_KERNEL); - if (!rxdr->ps_page) { - vfree(rxdr->buffer_info); - DPRINTK(PROBE, ERR, - "Unable to allocate memory for the receive descriptor ring\n"); - return -ENOMEM; - } - - rxdr->ps_page_dma = kcalloc(rxdr->count, - sizeof(struct e1000_ps_page_dma), - GFP_KERNEL); - if (!rxdr->ps_page_dma) { - vfree(rxdr->buffer_info); - kfree(rxdr->ps_page); - DPRINTK(PROBE, ERR, - "Unable to allocate memory for the receive descriptor ring\n"); - return -ENOMEM; - } - if (hw->mac_type <= e1000_82547_rev_2) desc_len = sizeof(struct e1000_rx_desc); else @@ -1852,8 +1825,6 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, "Unable to allocate memory for the receive descriptor ring\n"); setup_rx_desc_die: vfree(rxdr->buffer_info); - kfree(rxdr->ps_page); - kfree(rxdr->ps_page_dma); return -ENOMEM; } @@ -1932,11 +1903,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) static void e1000_setup_rctl(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u32 rctl, rfctl; - u32 psrctl = 0; -#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT - u32 pages = 0; -#endif + u32 rctl; rctl = er32(RCTL); @@ -1988,55 +1955,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) break; } -#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT - /* 82571 and greater support packet-split where the protocol - * header is placed in skb->data and the packet data is - * placed in pages hanging off of skb_shinfo(skb)->nr_frags. - * In the case of a non-split, skb->data is linearly filled, - * followed by the page buffers. Therefore, skb->data is - * sized to hold the largest protocol header. - */ - /* allocations using alloc_page take too long for regular MTU - * so only enable packet split for jumbo frames */ - pages = PAGE_USE_COUNT(adapter->netdev->mtu); - if ((hw->mac_type >= e1000_82571) && (pages <= 3) && - PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE)) - adapter->rx_ps_pages = pages; - else - adapter->rx_ps_pages = 0; -#endif - if (adapter->rx_ps_pages) { - /* Configure extra packet-split registers */ - rfctl = er32(RFCTL); - rfctl |= E1000_RFCTL_EXTEN; - /* disable packet split support for IPv6 extension headers, - * because some malformed IPv6 headers can hang the RX */ - rfctl |= (E1000_RFCTL_IPV6_EX_DIS | - E1000_RFCTL_NEW_IPV6_EXT_DIS); - - ew32(RFCTL, rfctl); - - rctl |= E1000_RCTL_DTYP_PS; - - psrctl |= adapter->rx_ps_bsize0 >> - E1000_PSRCTL_BSIZE0_SHIFT; - - switch (adapter->rx_ps_pages) { - case 3: - psrctl |= PAGE_SIZE << - E1000_PSRCTL_BSIZE3_SHIFT; - case 2: - psrctl |= PAGE_SIZE << - E1000_PSRCTL_BSIZE2_SHIFT; - case 1: - psrctl |= PAGE_SIZE >> - E1000_PSRCTL_BSIZE1_SHIFT; - break; - } - - ew32(PSRCTL, psrctl); - } - ew32(RCTL, rctl); } @@ -2053,18 +1971,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 rdlen, rctl, rxcsum, ctrl_ext; - if (adapter->rx_ps_pages) { - /* this is a 32 byte descriptor */ - rdlen = adapter->rx_ring[0].count * - sizeof(union e1000_rx_desc_packet_split); - adapter->clean_rx = e1000_clean_rx_irq_ps; - adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; - } else { - rdlen = adapter->rx_ring[0].count * - sizeof(struct e1000_rx_desc); - adapter->clean_rx = e1000_clean_rx_irq; - adapter->alloc_rx_buf = e1000_alloc_rx_buffers; - } + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; /* disable receives while setting up the descriptors */ rctl = er32(RCTL); @@ -2109,28 +2019,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) /* Enable 82543 Receive Checksum Offload for TCP and UDP */ if (hw->mac_type >= e1000_82543) { rxcsum = er32(RXCSUM); - if (adapter->rx_csum) { + if (adapter->rx_csum) rxcsum |= E1000_RXCSUM_TUOFL; - - /* Enable 82571 IPv4 payload checksum for UDP fragments - * Must be used in conjunction with packet-split. */ - if ((hw->mac_type >= e1000_82571) && - (adapter->rx_ps_pages)) { - rxcsum |= E1000_RXCSUM_IPPCSE; - } - } else { - rxcsum &= ~E1000_RXCSUM_TUOFL; + else /* don't need to clear IPPCSE as it defaults to 0 */ - } + rxcsum &= ~E1000_RXCSUM_TUOFL; ew32(RXCSUM, rxcsum); } - /* enable early receives on 82573, only takes effect if using > 2048 - * byte total frame size. for example only for jumbo frames */ -#define E1000_ERT_2048 0x100 - if (hw->mac_type == e1000_82573) - ew32(ERT, E1000_ERT_2048); - /* Enable Receives */ ew32(RCTL, rctl); } @@ -2256,10 +2152,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter, vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; - kfree(rx_ring->ps_page); - rx_ring->ps_page = NULL; - kfree(rx_ring->ps_page_dma); - rx_ring->ps_page_dma = NULL; pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); @@ -2292,11 +2184,9 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, { struct e1000_hw *hw = &adapter->hw; struct e1000_buffer *buffer_info; - struct e1000_ps_page *ps_page; - struct e1000_ps_page_dma *ps_page_dma; struct pci_dev *pdev = adapter->pdev; unsigned long size; - unsigned int i, j; + unsigned int i; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { @@ -2310,25 +2200,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; } - ps_page = &rx_ring->ps_page[i]; - ps_page_dma = &rx_ring->ps_page_dma[i]; - for (j = 0; j < adapter->rx_ps_pages; j++) { - if (!ps_page->ps_page[j]) break; - pci_unmap_page(pdev, - ps_page_dma->ps_page_dma[j], - PAGE_SIZE, PCI_DMA_FROMDEVICE); - ps_page_dma->ps_page_dma[j] = 0; - put_page(ps_page->ps_page[j]); - ps_page->ps_page[j] = NULL; - } } size = sizeof(struct e1000_buffer) * rx_ring->count; memset(rx_ring->buffer_info, 0, size); - size = sizeof(struct e1000_ps_page) * rx_ring->count; - memset(rx_ring->ps_page, 0, size); - size = sizeof(struct e1000_ps_page_dma) * rx_ring->count; - memset(rx_ring->ps_page_dma, 0, size); /* Zero out the descriptor ring */ @@ -4234,181 +4109,6 @@ next_desc: return cleaned; } -/** - * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split - * @adapter: board private structure - **/ - -static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int *work_done, int work_to_do) -{ - union e1000_rx_desc_packet_split *rx_desc, *next_rxd; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct e1000_buffer *buffer_info, *next_buffer; - struct e1000_ps_page *ps_page; - struct e1000_ps_page_dma *ps_page_dma; - struct sk_buff *skb; - unsigned int i, j; - u32 length, staterr; - int cleaned_count = 0; - bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; - - i = rx_ring->next_to_clean; - rx_desc = E1000_RX_DESC_PS(*rx_ring, i); - staterr = le32_to_cpu(rx_desc->wb.middle.status_error); - buffer_info = &rx_ring->buffer_info[i]; - - while (staterr & E1000_RXD_STAT_DD) { - ps_page = &rx_ring->ps_page[i]; - ps_page_dma = &rx_ring->ps_page_dma[i]; - - if (unlikely(*work_done >= work_to_do)) - break; - (*work_done)++; - - skb = buffer_info->skb; - - /* in the packet split case this is header only */ - prefetch(skb->data - NET_IP_ALIGN); - - if (++i == rx_ring->count) i = 0; - next_rxd = E1000_RX_DESC_PS(*rx_ring, i); - prefetch(next_rxd); - - next_buffer = &rx_ring->buffer_info[i]; - - cleaned = true; - cleaned_count++; - pci_unmap_single(pdev, buffer_info->dma, - buffer_info->length, - PCI_DMA_FROMDEVICE); - - if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) { - E1000_DBG("%s: Packet Split buffers didn't pick up" - " the full packet\n", netdev->name); - dev_kfree_skb_irq(skb); - goto next_desc; - } - - if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { - dev_kfree_skb_irq(skb); - goto next_desc; - } - - length = le16_to_cpu(rx_desc->wb.middle.length0); - - if (unlikely(!length)) { - E1000_DBG("%s: Last part of the packet spanning" - " multiple descriptors\n", netdev->name); - dev_kfree_skb_irq(skb); - goto next_desc; - } - - /* Good Receive */ - skb_put(skb, length); - - { - /* this looks ugly, but it seems compiler issues make it - more efficient than reusing j */ - int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); - - /* page alloc/put takes too long and effects small packet - * throughput, so unsplit small packets and save the alloc/put*/ - if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) { - u8 *vaddr; - /* there is no documentation about how to call - * kmap_atomic, so we can't hold the mapping - * very long */ - pci_dma_sync_single_for_cpu(pdev, - ps_page_dma->ps_page_dma[0], - PAGE_SIZE, - PCI_DMA_FROMDEVICE); - vaddr = kmap_atomic(ps_page->ps_page[0], - KM_SKB_DATA_SOFTIRQ); - memcpy(skb_tail_pointer(skb), vaddr, l1); - kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); - pci_dma_sync_single_for_device(pdev, - ps_page_dma->ps_page_dma[0], - PAGE_SIZE, PCI_DMA_FROMDEVICE); - /* remove the CRC */ - l1 -= 4; - skb_put(skb, l1); - goto copydone; - } /* if */ - } - - for (j = 0; j < adapter->rx_ps_pages; j++) { - length = le16_to_cpu(rx_desc->wb.upper.length[j]); - if (!length) - break; - pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], - PAGE_SIZE, PCI_DMA_FROMDEVICE); - ps_page_dma->ps_page_dma[j] = 0; - skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0, - length); - ps_page->ps_page[j] = NULL; - skb->len += length; - skb->data_len += length; - skb->truesize += length; - } - - /* strip the ethernet crc, problem is we're using pages now so - * this whole operation can get a little cpu intensive */ - pskb_trim(skb, skb->len - 4); - -copydone: - total_rx_bytes += skb->len; - total_rx_packets++; - - e1000_rx_checksum(adapter, staterr, - le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); - skb->protocol = eth_type_trans(skb, netdev); - - if (likely(rx_desc->wb.upper.header_status & - cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))) - adapter->rx_hdr_split++; - - if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { - vlan_hwaccel_receive_skb(skb, adapter->vlgrp, - le16_to_cpu(rx_desc->wb.middle.vlan)); - } else { - netif_receive_skb(skb); - } - - netdev->last_rx = jiffies; - -next_desc: - rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); - buffer_info->skb = NULL; - - /* return some buffers to hardware, one at a time is too slow */ - if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { - adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - buffer_info = next_buffer; - - staterr = le32_to_cpu(rx_desc->wb.middle.status_error); - } - rx_ring->next_to_clean = i; - - cleaned_count = E1000_DESC_UNUSED(rx_ring); - if (cleaned_count) - adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); - - adapter->total_rx_packets += total_rx_packets; - adapter->total_rx_bytes += total_rx_bytes; - adapter->net_stats.rx_bytes += total_rx_bytes; - adapter->net_stats.rx_packets += total_rx_packets; - return cleaned; -} - /** * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended * @adapter: address of board private structure @@ -4520,104 +4220,6 @@ map_skb: } } -/** - * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split - * @adapter: address of board private structure - **/ - -static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int cleaned_count) -{ - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - union e1000_rx_desc_packet_split *rx_desc; - struct e1000_buffer *buffer_info; - struct e1000_ps_page *ps_page; - struct e1000_ps_page_dma *ps_page_dma; - struct sk_buff *skb; - unsigned int i, j; - - i = rx_ring->next_to_use; - buffer_info = &rx_ring->buffer_info[i]; - ps_page = &rx_ring->ps_page[i]; - ps_page_dma = &rx_ring->ps_page_dma[i]; - - while (cleaned_count--) { - rx_desc = E1000_RX_DESC_PS(*rx_ring, i); - - for (j = 0; j < PS_PAGE_BUFFERS; j++) { - if (j < adapter->rx_ps_pages) { - if (likely(!ps_page->ps_page[j])) { - ps_page->ps_page[j] = - alloc_page(GFP_ATOMIC); - if (unlikely(!ps_page->ps_page[j])) { - adapter->alloc_rx_buff_failed++; - goto no_buffers; - } - ps_page_dma->ps_page_dma[j] = - pci_map_page(pdev, - ps_page->ps_page[j], - 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); - } - /* Refresh the desc even if buffer_addrs didn't - * change because each write-back erases - * this info. - */ - rx_desc->read.buffer_addr[j+1] = - cpu_to_le64(ps_page_dma->ps_page_dma[j]); - } else - rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0); - } - - skb = netdev_alloc_skb(netdev, - adapter->rx_ps_bsize0 + NET_IP_ALIGN); - - if (unlikely(!skb)) { - adapter->alloc_rx_buff_failed++; - break; - } - - /* Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - - buffer_info->skb = skb; - buffer_info->length = adapter->rx_ps_bsize0; - buffer_info->dma = pci_map_single(pdev, skb->data, - adapter->rx_ps_bsize0, - PCI_DMA_FROMDEVICE); - - rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); - - if (unlikely(++i == rx_ring->count)) i = 0; - buffer_info = &rx_ring->buffer_info[i]; - ps_page = &rx_ring->ps_page[i]; - ps_page_dma = &rx_ring->ps_page_dma[i]; - } - -no_buffers: - if (likely(rx_ring->next_to_use != i)) { - rx_ring->next_to_use = i; - if (unlikely(i-- == 0)) i = (rx_ring->count - 1); - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). */ - wmb(); - /* Hardware increments by 16 bytes, but packet split - * descriptors are 32 bytes...so we increment tail - * twice as much. - */ - writel(i<<1, hw->hw_addr + rx_ring->rdt); - } -} - /** * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. * @adapter: -- cgit v1.2.3-70-g09d2 From 3ed30676f5bc9960c67644fa37c5fdc36ae47b5b Mon Sep 17 00:00:00 2001 From: Dave Graham Date: Thu, 9 Oct 2008 14:29:26 -0700 Subject: e1000: don't generate bad checksums for tcp packets with 0 csum When offloading transmit checksums only, the driver was not correctly configuring the hardware to handle the case of a zero checksum. For UDP the correct behavior is to leave it alone, but for tcp the checksum must be changed from 0x0000 to 0xFFFF. The hardware takes care of this case but only if it is told the packet is tcp. same patch as e1000e Signed-off-by: Dave Graham Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/e1000/e1000_main.c | 55 +++++++++++++++++++++++++++--------------- 1 file changed, 36 insertions(+), 19 deletions(-) (limited to 'drivers/net/e1000') diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 2ab44db29fa..3bafaede791 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2873,32 +2873,49 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_buffer *buffer_info; unsigned int i; u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; - if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { - css = skb_transport_offset(skb); + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; - i = tx_ring->next_to_use; - buffer_info = &tx_ring->buffer_info[i]; - context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + switch (skb->protocol) { + case __constant_htons(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case __constant_htons(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + DPRINTK(DRV, WARNING, + "checksum_partial proto=%x!\n", skb->protocol); + break; + } - context_desc->lower_setup.ip_config = 0; - context_desc->upper_setup.tcp_fields.tucss = css; - context_desc->upper_setup.tcp_fields.tucso = - css + skb->csum_offset; - context_desc->upper_setup.tcp_fields.tucse = 0; - context_desc->tcp_seg_setup.data = 0; - context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); + css = skb_transport_offset(skb); - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); - if (unlikely(++i == tx_ring->count)) i = 0; - tx_ring->next_to_use = i; + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); - return true; - } + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; - return false; + if (unlikely(++i == tx_ring->count)) i = 0; + tx_ring->next_to_use = i; + + return true; } #define E1000_MAX_TXD_PWR 12 -- cgit v1.2.3-70-g09d2