summaryrefslogtreecommitdiffstats
path: root/drivers/net/e100.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-01-31 11:25:51 +1100
committerPaul Mackerras <paulus@samba.org>2008-01-31 11:25:51 +1100
commitbd45ac0c5daae35e7c71138172e63df5cf644cf6 (patch)
tree5eb5a599bf6a9d7a8a34e802db932aa9e9555de4 /drivers/net/e100.c
parent4eece4ccf997c0e6d8fdad3d842e37b16b8d705f (diff)
parent5bdeae46be6dfe9efa44a548bd622af325f4bdb4 (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'drivers/net/e100.c')
-rw-r--r--drivers/net/e100.c204
1 files changed, 136 insertions, 68 deletions
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index b87402bc830..51cf577035b 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -106,6 +106,13 @@
* the RFD, the RFD must be dma_sync'ed to maintain a consistent
* view from software and hardware.
*
+ * In order to keep updates to the RFD link field from colliding with
+ * hardware writes to mark packets complete, we use the feature that
+ * hardware will not write to a size 0 descriptor and mark the previous
+ * packet as end-of-list (EL). After updating the link, we remove EL
+ * and only then restore the size such that hardware may use the
+ * previous-to-end RFD.
+ *
* Under typical operation, the receive unit (RU) is start once,
* and the controller happily fills RFDs as frames arrive. If
* replacement RFDs cannot be allocated, or the RU goes non-active,
@@ -281,6 +288,7 @@ struct csr {
};
enum scb_status {
+ rus_no_res = 0x08,
rus_ready = 0x10,
rus_mask = 0x3C,
};
@@ -393,12 +401,12 @@ enum cb_command {
};
struct rfd {
- u16 status;
- u16 command;
- u32 link;
- u32 rbd;
- u16 actual_size;
- u16 size;
+ __le16 status;
+ __le16 command;
+ __le32 link;
+ __le32 rbd;
+ __le16 actual_size;
+ __le16 size;
};
struct rx {
@@ -453,19 +461,19 @@ struct config {
#define E100_MAX_MULTICAST_ADDRS 64
struct multi {
- u16 count;
+ __le16 count;
u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
};
/* Important: keep total struct u32-aligned */
#define UCODE_SIZE 134
struct cb {
- u16 status;
- u16 command;
- u32 link;
+ __le16 status;
+ __le16 command;
+ __le32 link;
union {
u8 iaaddr[ETH_ALEN];
- u32 ucode[UCODE_SIZE];
+ __le32 ucode[UCODE_SIZE];
struct config config;
struct multi multi;
struct {
@@ -474,12 +482,12 @@ struct cb {
u8 threshold;
u8 tbd_count;
struct {
- u32 buf_addr;
- u16 size;
+ __le32 buf_addr;
+ __le16 size;
u16 eol;
} tbd;
} tcb;
- u32 dump_buffer_addr;
+ __le32 dump_buffer_addr;
} u;
struct cb *next, *prev;
dma_addr_t dma_addr;
@@ -491,15 +499,15 @@ enum loopback {
};
struct stats {
- u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
+ __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
tx_multiple_collisions, tx_total_collisions;
- u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
+ __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
rx_short_frame_errors;
- u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
- u16 xmt_tco_frames, rcv_tco_frames;
- u32 complete;
+ __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
+ __le16 xmt_tco_frames, rcv_tco_frames;
+ __le32 complete;
};
struct mem {
@@ -544,7 +552,7 @@ struct nic {
struct cb *cb_to_use;
struct cb *cb_to_send;
struct cb *cb_to_clean;
- u16 tx_command;
+ __le16 tx_command;
/* End: frequently used values: keep adjacent for cache effect */
enum {
@@ -585,7 +593,7 @@ struct nic {
u16 leds;
u16 eeprom_wc;
- u16 eeprom[256];
+ __le16 eeprom[256];
spinlock_t mdio_lock;
};
@@ -663,7 +671,7 @@ static int e100_self_test(struct nic *nic)
return 0;
}
-static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
+static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
{
u32 cmd_addr_data[3];
u8 ctrl;
@@ -672,7 +680,7 @@ static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
/* Three cmds: write/erase enable, write data, write/erase disable */
cmd_addr_data[0] = op_ewen << (addr_len - 2);
cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
- cpu_to_le16(data);
+ le16_to_cpu(data);
cmd_addr_data[2] = op_ewds << (addr_len - 2);
/* Bit-bang cmds to write word to eeprom */
@@ -701,7 +709,7 @@ static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
};
/* General technique stolen from the eepro100 driver - very clever */
-static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
+static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
{
u32 cmd_addr_data;
u16 data = 0;
@@ -738,7 +746,7 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
iowrite8(0, &nic->csr->eeprom_ctrl_lo);
e100_write_flush(nic); udelay(4);
- return le16_to_cpu(data);
+ return cpu_to_le16(data);
};
/* Load entire EEPROM image into driver cache and validate checksum */
@@ -753,13 +761,12 @@ static int e100_eeprom_load(struct nic *nic)
for(addr = 0; addr < nic->eeprom_wc; addr++) {
nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
if(addr < nic->eeprom_wc - 1)
- checksum += cpu_to_le16(nic->eeprom[addr]);
+ checksum += le16_to_cpu(nic->eeprom[addr]);
}
/* The checksum, stored in the last word, is calculated such that
* the sum of words should be 0xBABA */
- checksum = le16_to_cpu(0xBABA - checksum);
- if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
+ if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
if (!eeprom_bad_csum_allow)
return -EAGAIN;
@@ -786,8 +793,8 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
/* The checksum, stored in the last word, is calculated such that
* the sum of words should be 0xBABA */
for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
- checksum += cpu_to_le16(nic->eeprom[addr]);
- nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
+ checksum += le16_to_cpu(nic->eeprom[addr]);
+ nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
nic->eeprom[nic->eeprom_wc - 1]);
@@ -952,7 +959,7 @@ static void e100_get_defaults(struct nic *nic)
((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
/* Template for a freshly allocated RFD */
- nic->blank_rfd.command = cpu_to_le16(cb_el);
+ nic->blank_rfd.command = 0;
nic->blank_rfd.rbd = 0xFFFFFFFF;
nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
@@ -1485,15 +1492,15 @@ static void e100_update_stats(struct nic *nic)
struct net_device *dev = nic->netdev;
struct net_device_stats *ns = &dev->stats;
struct stats *s = &nic->mem->stats;
- u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
- (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
+ __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
+ (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
&s->complete;
/* Device's stats reporting may take several microseconds to
* complete, so where always waiting for results of the
* previous command. */
- if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
+ if(*complete == cpu_to_le32(cuc_dump_reset_complete)) {
*complete = 0;
nic->tx_frames = le32_to_cpu(s->tx_good_frames);
nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
@@ -1783,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
- if(pci_dma_mapping_error(rx->dma_addr)) {
+ if (pci_dma_mapping_error(rx->dma_addr)) {
dev_kfree_skb_any(rx->skb);
rx->skb = NULL;
rx->dma_addr = 0;
@@ -1791,15 +1798,11 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
}
/* Link the RFD to end of RFA by linking previous RFD to
- * this one, and clearing EL bit of previous. */
- if(rx->prev->skb) {
+ * this one. We are safe to touch the previous RFD because
+ * it is protected by the before last buffer's el bit being set */
+ if (rx->prev->skb) {
struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
- put_unaligned(cpu_to_le32(rx->dma_addr),
- (u32 *)&prev_rfd->link);
- wmb();
- prev_rfd->command &= ~cpu_to_le16(cb_el);
- pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
- sizeof(struct rfd), PCI_DMA_TODEVICE);
+ put_unaligned(cpu_to_le32(rx->dma_addr), &prev_rfd->link);
}
return 0;
@@ -1824,8 +1827,19 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
/* If data isn't ready, nothing to indicate */
- if(unlikely(!(rfd_status & cb_complete)))
+ if (unlikely(!(rfd_status & cb_complete))) {
+ /* If the next buffer has the el bit, but we think the receiver
+ * is still running, check to see if it really stopped while
+ * we had interrupts off.
+ * This allows for a fast restart without re-enabling
+ * interrupts */
+ if ((le16_to_cpu(rfd->command) & cb_el) &&
+ (RU_RUNNING == nic->ru_running))
+
+ if (readb(&nic->csr->scb.status) & rus_no_res)
+ nic->ru_running = RU_SUSPENDED;
return -ENODATA;
+ }
/* Get actual data size */
actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
@@ -1836,9 +1850,18 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
pci_unmap_single(nic->pdev, rx->dma_addr,
RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
- /* this allows for a fast restart without re-enabling interrupts */
- if(le16_to_cpu(rfd->command) & cb_el)
+ /* If this buffer has the el bit, but we think the receiver
+ * is still running, check to see if it really stopped while
+ * we had interrupts off.
+ * This allows for a fast restart without re-enabling interrupts.
+ * This can happen when the RU sees the size change but also sees
+ * the el bit set. */
+ if ((le16_to_cpu(rfd->command) & cb_el) &&
+ (RU_RUNNING == nic->ru_running)) {
+
+ if (readb(&nic->csr->scb.status) & rus_no_res)
nic->ru_running = RU_SUSPENDED;
+ }
/* Pull off the RFD and put the actual data (minus eth hdr) */
skb_reserve(skb, sizeof(struct rfd));
@@ -1870,31 +1893,30 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
unsigned int work_to_do)
{
struct rx *rx;
- int restart_required = 0;
- struct rx *rx_to_start = NULL;
-
- /* are we already rnr? then pay attention!!! this ensures that
- * the state machine progression never allows a start with a
- * partially cleaned list, avoiding a race between hardware
- * and rx_to_clean when in NAPI mode */
- if(RU_SUSPENDED == nic->ru_running)
- restart_required = 1;
+ int restart_required = 0, err = 0;
+ struct rx *old_before_last_rx, *new_before_last_rx;
+ struct rfd *old_before_last_rfd, *new_before_last_rfd;
/* Indicate newly arrived packets */
for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
- int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
- if(-EAGAIN == err) {
- /* hit quota so have more work to do, restart once
- * cleanup is complete */
- restart_required = 0;
+ err = e100_rx_indicate(nic, rx, work_done, work_to_do);
+ /* Hit quota or no more to clean */
+ if (-EAGAIN == err || -ENODATA == err)
break;
- } else if(-ENODATA == err)
- break; /* No more to clean */
}
- /* save our starting point as the place we'll restart the receiver */
- if(restart_required)
- rx_to_start = nic->rx_to_clean;
+
+ /* On EAGAIN, hit quota so have more work to do, restart once
+ * cleanup is complete.
+ * Else, are we already rnr? then pay attention!!! this ensures that
+ * the state machine progression never allows a start with a
+ * partially cleaned list, avoiding a race between hardware
+ * and rx_to_clean when in NAPI mode */
+ if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
+ restart_required = 1;
+
+ old_before_last_rx = nic->rx_to_use->prev->prev;
+ old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
/* Alloc new skbs to refill list */
for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
@@ -1902,10 +1924,42 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
break; /* Better luck next time (see watchdog) */
}
+ new_before_last_rx = nic->rx_to_use->prev->prev;
+ if (new_before_last_rx != old_before_last_rx) {
+ /* Set the el-bit on the buffer that is before the last buffer.
+ * This lets us update the next pointer on the last buffer
+ * without worrying about hardware touching it.
+ * We set the size to 0 to prevent hardware from touching this
+ * buffer.
+ * When the hardware hits the before last buffer with el-bit
+ * and size of 0, it will RNR interrupt, the RUS will go into
+ * the No Resources state. It will not complete nor write to
+ * this buffer. */
+ new_before_last_rfd =
+ (struct rfd *)new_before_last_rx->skb->data;
+ new_before_last_rfd->size = 0;
+ new_before_last_rfd->command |= cpu_to_le16(cb_el);
+ pci_dma_sync_single_for_device(nic->pdev,
+ new_before_last_rx->dma_addr, sizeof(struct rfd),
+ PCI_DMA_TODEVICE);
+
+ /* Now that we have a new stopping point, we can clear the old
+ * stopping point. We must sync twice to get the proper
+ * ordering on the hardware side of things. */
+ old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
+ pci_dma_sync_single_for_device(nic->pdev,
+ old_before_last_rx->dma_addr, sizeof(struct rfd),
+ PCI_DMA_TODEVICE);
+ old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
+ pci_dma_sync_single_for_device(nic->pdev,
+ old_before_last_rx->dma_addr, sizeof(struct rfd),
+ PCI_DMA_TODEVICE);
+ }
+
if(restart_required) {
// ack the rnr?
writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
- e100_start_receiver(nic, rx_to_start);
+ e100_start_receiver(nic, nic->rx_to_clean);
if(work_done)
(*work_done)++;
}
@@ -1937,6 +1991,7 @@ static int e100_rx_alloc_list(struct nic *nic)
{
struct rx *rx;
unsigned int i, count = nic->params.rfds.count;
+ struct rfd *before_last;
nic->rx_to_use = nic->rx_to_clean = NULL;
nic->ru_running = RU_UNINITIALIZED;
@@ -1952,6 +2007,19 @@ static int e100_rx_alloc_list(struct nic *nic)
return -ENOMEM;
}
}
+ /* Set the el-bit on the buffer that is before the last buffer.
+ * This lets us update the next pointer on the last buffer without
+ * worrying about hardware touching it.
+ * We set the size to 0 to prevent hardware from touching this buffer.
+ * When the hardware hits the before last buffer with el-bit and size
+ * of 0, it will RNR interrupt, the RU will go into the No Resources
+ * state. It will not complete nor write to this buffer. */
+ rx = nic->rxs->prev->prev;
+ before_last = (struct rfd *)rx->skb->data;
+ before_last->command |= cpu_to_le16(cb_el);
+ before_last->size = 0;
+ pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
+ sizeof(struct rfd), PCI_DMA_TODEVICE);
nic->rx_to_use = nic->rx_to_clean = nic->rxs;
nic->ru_running = RU_SUSPENDED;
@@ -2369,7 +2437,7 @@ static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
"Mac loopback (offline)",
"Phy loopback (offline)",
};
-#define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
+#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
static void e100_diag_test(struct net_device *netdev,
struct ethtool_test *test, u64 *data)
@@ -2431,7 +2499,7 @@ static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
};
#define E100_NET_STATS_LEN 21
-#define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
+#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
static int e100_get_sset_count(struct net_device *netdev, int sset)
{