summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/e100.c165
1 files changed, 139 insertions, 26 deletions
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 1b68dd5a49b..4a47df5a9ff 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -155,9 +155,9 @@
#define DRV_NAME "e100"
#define DRV_EXT "-NAPI"
-#define DRV_VERSION "3.3.6-k2"DRV_EXT
+#define DRV_VERSION "3.4.8-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
-#define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation"
+#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
#define PFX DRV_NAME ": "
#define E100_WATCHDOG_PERIOD (2 * HZ)
@@ -210,11 +210,17 @@ static struct pci_device_id e100_id_table[] = {
INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
+ INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
+ INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
+ INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
+ INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
+ INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
{ 0, }
};
MODULE_DEVICE_TABLE(pci, e100_id_table);
@@ -269,6 +275,12 @@ enum scb_status {
rus_mask = 0x3C,
};
+enum ru_state {
+ RU_SUSPENDED = 0,
+ RU_RUNNING = 1,
+ RU_UNINITIALIZED = -1,
+};
+
enum scb_stat_ack {
stat_ack_not_ours = 0x00,
stat_ack_sw_gen = 0x04,
@@ -510,7 +522,7 @@ struct nic {
struct rx *rx_to_use;
struct rx *rx_to_clean;
struct rfd blank_rfd;
- int ru_running;
+ enum ru_state ru_running;
spinlock_t cb_lock ____cacheline_aligned;
spinlock_t cmd_lock;
@@ -539,6 +551,7 @@ struct nic {
struct timer_list watchdog;
struct timer_list blink_timer;
struct mii_if_info mii;
+ struct work_struct tx_timeout_task;
enum loopback loopback;
struct mem *mem;
@@ -770,7 +783,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
return 0;
}
-#define E100_WAIT_SCB_TIMEOUT 40
+#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
{
unsigned long flags;
@@ -840,6 +853,10 @@ static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
* because the controller is too busy, so
* let's just queue the command and try again
* when another command is scheduled. */
+ if(err == -ENOSPC) {
+ //request a reset
+ schedule_work(&nic->tx_timeout_task);
+ }
break;
} else {
nic->cuc_cmd = cuc_resume;
@@ -884,7 +901,7 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
static void e100_get_defaults(struct nic *nic)
{
- struct param_range rfds = { .min = 64, .max = 256, .count = 64 };
+ struct param_range rfds = { .min = 16, .max = 256, .count = 64 };
struct param_range cbs = { .min = 64, .max = 256, .count = 64 };
pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
@@ -899,8 +916,9 @@ static void e100_get_defaults(struct nic *nic)
/* Quadwords to DMA into FIFO before starting frame transmit */
nic->tx_threshold = 0xE0;
- nic->tx_command = cpu_to_le16(cb_tx | cb_i | cb_tx_sf |
- ((nic->mac >= mac_82558_D101_A4) ? cb_cid : 0));
+ /* no interrupt for every tx completion, delay = 256us if not 557*/
+ nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
+ ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
/* Template for a freshly allocated RFD */
nic->blank_rfd.command = cpu_to_le16(cb_el);
@@ -964,7 +982,8 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
if(nic->flags & multicast_all)
config->multicast_all = 0x1; /* 1=accept, 0=no */
- if(!(nic->flags & wol_magic))
+ /* disable WoL when up */
+ if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
config->magic_packet_disable = 0x1; /* 1=off, 0=on */
if(nic->mac >= mac_82558_D101_A4) {
@@ -1203,7 +1222,9 @@ static void e100_update_stats(struct nic *nic)
}
}
- e100_exec_cmd(nic, cuc_dump_reset, 0);
+
+ if(e100_exec_cmd(nic, cuc_dump_reset, 0))
+ DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
}
static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
@@ -1279,12 +1300,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
cb->command = nic->tx_command;
+ /* interrupt every 16 packets regardless of delay */
+ if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i;
cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
cb->u.tcb.tcb_byte_count = 0;
cb->u.tcb.threshold = nic->tx_threshold;
cb->u.tcb.tbd_count = 1;
cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE));
+ // check for mapping failure?
cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
}
@@ -1297,7 +1321,8 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
Issue a NOP command followed by a 1us delay before
issuing the Tx command. */
- e100_exec_cmd(nic, cuc_nop, 0);
+ if(e100_exec_cmd(nic, cuc_nop, 0))
+ DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
udelay(1);
}
@@ -1415,12 +1440,18 @@ static int e100_alloc_cbs(struct nic *nic)
return 0;
}
-static inline void e100_start_receiver(struct nic *nic)
+static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
{
+ if(!nic->rxs) return;
+ if(RU_SUSPENDED != nic->ru_running) return;
+
+ /* handle init time starts */
+ if(!rx) rx = nic->rxs;
+
/* (Re)start RU if suspended or idle and RFA is non-NULL */
- if(!nic->ru_running && nic->rx_to_clean->skb) {
- e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr);
- nic->ru_running = 1;
+ if(rx->skb) {
+ e100_exec_cmd(nic, ruc_start, rx->dma_addr);
+ nic->ru_running = RU_RUNNING;
}
}
@@ -1437,6 +1468,13 @@ static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ if(pci_dma_mapping_error(rx->dma_addr)) {
+ dev_kfree_skb_any(rx->skb);
+ rx->skb = 0;
+ rx->dma_addr = 0;
+ return -ENOMEM;
+ }
+
/* Link the RFD to end of RFA by linking previous RFD to
* this one, and clearing EL bit of previous. */
if(rx->prev->skb) {
@@ -1471,7 +1509,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
/* If data isn't ready, nothing to indicate */
if(unlikely(!(rfd_status & cb_complete)))
- return -EAGAIN;
+ return -ENODATA;
/* Get actual data size */
actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
@@ -1482,6 +1520,10 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
pci_unmap_single(nic->pdev, rx->dma_addr,
RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
+ /* this allows for a fast restart without re-enabling interrupts */
+ if(le16_to_cpu(rfd->command) & cb_el)
+ nic->ru_running = RU_SUSPENDED;
+
/* Pull off the RFD and put the actual data (minus eth hdr) */
skb_reserve(skb, sizeof(struct rfd));
skb_put(skb, actual_size);
@@ -1514,20 +1556,45 @@ static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done,
unsigned int work_to_do)
{
struct rx *rx;
+ int restart_required = 0;
+ struct rx *rx_to_start = NULL;
+
+ /* are we already rnr? then pay attention!!! this ensures that
+ * the state machine progression never allows a start with a
+ * partially cleaned list, avoiding a race between hardware
+ * and rx_to_clean when in NAPI mode */
+ if(RU_SUSPENDED == nic->ru_running)
+ restart_required = 1;
/* Indicate newly arrived packets */
for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
- if(e100_rx_indicate(nic, rx, work_done, work_to_do))
+ int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
+ if(-EAGAIN == err) {
+ /* hit quota so have more work to do, restart once
+ * cleanup is complete */
+ restart_required = 0;
+ break;
+ } else if(-ENODATA == err)
break; /* No more to clean */
}
+ /* save our starting point as the place we'll restart the receiver */
+ if(restart_required)
+ rx_to_start = nic->rx_to_clean;
+
/* Alloc new skbs to refill list */
for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
if(unlikely(e100_rx_alloc_skb(nic, rx)))
break; /* Better luck next time (see watchdog) */
}
- e100_start_receiver(nic);
+ if(restart_required) {
+ // ack the rnr?
+ writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
+ e100_start_receiver(nic, rx_to_start);
+ if(work_done)
+ (*work_done)++;
+ }
}
static void e100_rx_clean_list(struct nic *nic)
@@ -1535,6 +1602,8 @@ static void e100_rx_clean_list(struct nic *nic)
struct rx *rx;
unsigned int i, count = nic->params.rfds.count;
+ nic->ru_running = RU_UNINITIALIZED;
+
if(nic->rxs) {
for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
if(rx->skb) {
@@ -1548,7 +1617,6 @@ static void e100_rx_clean_list(struct nic *nic)
}
nic->rx_to_use = nic->rx_to_clean = NULL;
- nic->ru_running = 0;
}
static int e100_rx_alloc_list(struct nic *nic)
@@ -1557,6 +1625,7 @@ static int e100_rx_alloc_list(struct nic *nic)
unsigned int i, count = nic->params.rfds.count;
nic->rx_to_use = nic->rx_to_clean = NULL;
+ nic->ru_running = RU_UNINITIALIZED;
if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC)))
return -ENOMEM;
@@ -1572,6 +1641,7 @@ static int e100_rx_alloc_list(struct nic *nic)
}
nic->rx_to_use = nic->rx_to_clean = nic->rxs;
+ nic->ru_running = RU_SUSPENDED;
return 0;
}
@@ -1593,7 +1663,7 @@ static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs)
/* We hit Receive No Resource (RNR); restart RU after cleaning */
if(stat_ack & stat_ack_rnr)
- nic->ru_running = 0;
+ nic->ru_running = RU_SUSPENDED;
e100_disable_irq(nic);
netif_rx_schedule(netdev);
@@ -1663,6 +1733,7 @@ static int e100_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+#ifdef CONFIG_PM
static int e100_asf(struct nic *nic)
{
/* ASF can be enabled from eeprom */
@@ -1671,6 +1742,7 @@ static int e100_asf(struct nic *nic)
!(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
}
+#endif
static int e100_up(struct nic *nic)
{
@@ -1683,13 +1755,16 @@ static int e100_up(struct nic *nic)
if((err = e100_hw_init(nic)))
goto err_clean_cbs;
e100_set_multicast_list(nic->netdev);
- e100_start_receiver(nic);
+ e100_start_receiver(nic, 0);
mod_timer(&nic->watchdog, jiffies);
if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ,
nic->netdev->name, nic->netdev)))
goto err_no_irq;
- e100_enable_irq(nic);
netif_wake_queue(nic->netdev);
+ netif_poll_enable(nic->netdev);
+ /* enable ints _after_ enabling poll, preventing a race between
+ * disable ints+schedule */
+ e100_enable_irq(nic);
return 0;
err_no_irq:
@@ -1703,11 +1778,13 @@ err_rx_clean_list:
static void e100_down(struct nic *nic)
{
+ /* wait here for poll to complete */
+ netif_poll_disable(nic->netdev);
+ netif_stop_queue(nic->netdev);
e100_hw_reset(nic);
free_irq(nic->pdev->irq, nic->netdev);
del_timer_sync(&nic->watchdog);
netif_carrier_off(nic->netdev);
- netif_stop_queue(nic->netdev);
e100_clean_cbs(nic);
e100_rx_clean_list(nic);
}
@@ -1716,6 +1793,15 @@ static void e100_tx_timeout(struct net_device *netdev)
{
struct nic *nic = netdev_priv(netdev);
+ /* Reset outside of interrupt context, to avoid request_irq
+ * in interrupt context */
+ schedule_work(&nic->tx_timeout_task);
+}
+
+static void e100_tx_timeout_task(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+
DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
readb(&nic->csr->scb.status));
e100_down(netdev_priv(netdev));
@@ -1749,7 +1835,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
BMCR_LOOPBACK);
- e100_start_receiver(nic);
+ e100_start_receiver(nic, 0);
if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) {
err = -ENOMEM;
@@ -1869,7 +1955,6 @@ static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
else
nic->flags &= ~wol_magic;
- pci_enable_wake(nic->pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
e100_exec_cb(nic, NULL, e100_configure);
return 0;
@@ -2223,6 +2308,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
e100_get_defaults(nic);
+ /* locks must be initialized before calling hw_reset */
spin_lock_init(&nic->cb_lock);
spin_lock_init(&nic->cmd_lock);
@@ -2240,6 +2326,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
nic->blink_timer.function = e100_blink_led;
nic->blink_timer.data = (unsigned long)nic;
+ INIT_WORK(&nic->tx_timeout_task,
+ (void (*)(void *))e100_tx_timeout_task, netdev);
+
if((err = e100_alloc(nic))) {
DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
goto err_out_iounmap;
@@ -2263,7 +2352,8 @@ static int __devinit e100_probe(struct pci_dev *pdev,
(nic->eeprom[eeprom_id] & eeprom_id_wol))
nic->flags |= wol_magic;
- pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
+ /* ack any pending wake events, disable PME */
+ pci_enable_wake(pdev, 0, 0);
strcpy(netdev->name, "eth%d");
if((err = register_netdev(netdev))) {
@@ -2335,7 +2425,10 @@ static int e100_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- e100_hw_init(nic);
+ /* ack any pending wake events, disable PME */
+ pci_enable_wake(pdev, 0, 0);
+ if(e100_hw_init(nic))
+ DPRINTK(HW, ERR, "e100_hw_init failed\n");
netif_device_attach(netdev);
if(netif_running(netdev))
@@ -2345,6 +2438,21 @@ static int e100_resume(struct pci_dev *pdev)
}
#endif
+
+static void e100_shutdown(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nic *nic = netdev_priv(netdev);
+
+#ifdef CONFIG_PM
+ pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
+#else
+ pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
+#endif
+}
+
+
static struct pci_driver e100_driver = {
.name = DRV_NAME,
.id_table = e100_id_table,
@@ -2354,6 +2462,11 @@ static struct pci_driver e100_driver = {
.suspend = e100_suspend,
.resume = e100_resume,
#endif
+
+ .driver = {
+ .shutdown = e100_shutdown,
+ }
+
};
static int __init e100_init_module(void)