diff options
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r-- | drivers/net/skge.c | 511 |
1 files changed, 305 insertions, 206 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 0208258e782..716467879b9 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -37,12 +37,13 @@ #include <linux/delay.h> #include <linux/crc32.h> #include <linux/dma-mapping.h> +#include <linux/mii.h> #include <asm/irq.h> #include "skge.h" #define DRV_NAME "skge" -#define DRV_VERSION "1.0" +#define DRV_VERSION "1.2" #define PFX DRV_NAME " " #define DEFAULT_TX_RING_SIZE 128 @@ -88,8 +89,8 @@ MODULE_DEVICE_TABLE(pci, skge_id_table); static int skge_up(struct net_device *dev); static int skge_down(struct net_device *dev); static void skge_tx_clean(struct skge_port *skge); -static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); -static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); static void genesis_get_stats(struct skge_port *skge, u64 *data); static void yukon_get_stats(struct skge_port *skge, u64 *data); static void yukon_init(struct skge_hw *hw, int port); @@ -105,44 +106,31 @@ static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 }; -/* Don't need to look at whole 16K. - * last interesting register is descriptor poll timer. - */ -#define SKGE_REGS_LEN (29*128) - static int skge_get_regs_len(struct net_device *dev) { - return SKGE_REGS_LEN; + return 0x4000; } /* - * Returns copy of control register region - * I/O region is divided into banks and certain regions are unreadable + * Returns copy of whole control register region + * Note: skip RAM address register because accessing it will + * cause bus hangs! */ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { const struct skge_port *skge = netdev_priv(dev); - unsigned long offs; const void __iomem *io = skge->hw->regs; - static const unsigned long bankmap - = (1<<0) | (1<<2) | (1<<8) | (1<<9) - | (1<<12) | (1<<13) | (1<<14) | (1<<15) | (1<<16) - | (1<<17) | (1<<20) | (1<<21) | (1<<22) | (1<<23) - | (1<<24) | (1<<25) | (1<<26) | (1<<27) | (1<<28); regs->version = 1; - for (offs = 0; offs < regs->len; offs += 128) { - u32 len = min_t(u32, 128, regs->len - offs); + memset(p, 0, regs->len); + memcpy_fromio(p, io, B3_RAM_ADDR); - if (bankmap & (1<<(offs/128))) - memcpy_fromio(p + offs, io + offs, len); - else - memset(p + offs, 0, len); - } + memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, + regs->len - B3_RI_WTO_R1); } -/* Wake on Lan only supported on Yukon chps with rev 1 or above */ +/* Wake on Lan only supported on Yukon chips with rev 1 or above */ static int wol_supported(const struct skge_hw *hw) { return !((hw->chip_id == CHIP_ID_GENESIS || @@ -182,8 +170,8 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) return 0; } -/* Determine supported/adverised modes based on hardware. - * Note: ethtoool ADVERTISED_xxx == SUPPORTED_xxx +/* Determine supported/advertised modes based on hardware. + * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx */ static u32 skge_supported_modes(const struct skge_hw *hw) { @@ -544,13 +532,13 @@ static inline u32 hwkhz(const struct skge_hw *hw) return 78215; /* or: 78.125 MHz */ } -/* Chip hz to microseconds */ +/* Chip HZ to microseconds */ static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) { return (ticks * 1000) / hwkhz(hw); } -/* Microseconds to chip hz */ +/* Microseconds to chip HZ */ static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) { return hwkhz(hw) * usec / 1000; @@ -743,6 +731,7 @@ static struct ethtool_ops skge_ethtool_ops = { .phys_id = skge_phys_id, .get_stats_count = skge_get_stats_count, .get_ethtool_stats = skge_get_ethtool_stats, + .get_perm_addr = ethtool_op_get_perm_addr, }; /* @@ -775,17 +764,6 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base) return 0; } -static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size) -{ - struct sk_buff *skb = dev_alloc_skb(size); - - if (likely(skb)) { - skb->dev = dev; - skb_reserve(skb, NET_IP_ALIGN); - } - return skb; -} - /* Allocate and setup a new buffer for receiving */ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, struct sk_buff *skb, unsigned int bufsize) @@ -858,16 +836,17 @@ static int skge_rx_fill(struct skge_port *skge) { struct skge_ring *ring = &skge->rx_ring; struct skge_element *e; - unsigned int bufsize = skge->rx_buf_size; e = ring->start; do { - struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize); + struct sk_buff *skb; + skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN); if (!skb) return -ENOMEM; - skge_rx_setup(skge, e, skb, bufsize); + skb_reserve(skb, NET_IP_ALIGN); + skge_rx_setup(skge, e, skb, skge->rx_buf_size); } while ( (e = e->next) != ring->start); ring->to_clean = ring->start; @@ -905,32 +884,37 @@ static void skge_link_down(struct skge_port *skge) printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name); } -static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) +static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) { int i; - u16 v; xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); - v = xm_read16(hw, port, XM_PHY_DATA); + xm_read16(hw, port, XM_PHY_DATA); /* Need to wait for external PHY */ for (i = 0; i < PHY_RETRIES; i++) { udelay(1); - if (xm_read16(hw, port, XM_MMU_CMD) - & XM_MMU_PHY_RDY) + if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) goto ready; } - printk(KERN_WARNING PFX "%s: phy read timed out\n", - hw->dev[port]->name); - return 0; + return -ETIMEDOUT; ready: - v = xm_read16(hw, port, XM_PHY_DATA); + *val = xm_read16(hw, port, XM_PHY_DATA); + + return 0; +} +static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) +{ + u16 v = 0; + if (__xm_phy_read(hw, port, reg, &v)) + printk(KERN_WARNING PFX "%s: phy read timed out\n", + hw->dev[port]->name); return v; } -static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) { int i; @@ -940,19 +924,11 @@ static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) goto ready; udelay(1); } - printk(KERN_WARNING PFX "%s: phy write failed to come ready\n", - hw->dev[port]->name); - + return -EIO; ready: xm_write16(hw, port, XM_PHY_DATA, val); - for (i = 0; i < PHY_RETRIES; i++) { - udelay(1); - if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) - return; - } - printk(KERN_WARNING PFX "%s: phy write timed out\n", - hw->dev[port]->name); + return 0; } static void genesis_init(struct skge_hw *hw) @@ -1187,7 +1163,7 @@ static void bcom_phy_init(struct skge_port *skge, int jumbo) xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); - /* Use link status change interrrupt */ + /* Use link status change interrupt */ xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); bcom_check_link(hw, port); @@ -1227,7 +1203,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port) skge_write32(hw, B2_GP_IO, r); skge_read32(hw, B2_GP_IO); - /* Enable GMII interfac */ + /* Enable GMII interface */ xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); bcom_phy_init(skge, jumbo); @@ -1278,7 +1254,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port) * that jumbo frames larger than 8192 bytes will be * truncated. Disabling all bad frame filtering causes * the RX FIFO to operate in streaming mode, in which - * case the XMAC will start transfering frames out of the + * case the XMAC will start transferring frames out of the * RX FIFO as soon as the FIFO threshold is reached. */ xm_write32(hw, port, XM_MODE, XM_DEF_MODE); @@ -1345,7 +1321,7 @@ static void genesis_stop(struct skge_port *skge) port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); /* - * If the transfer stucks at the MAC the STOP command will not + * If the transfer sticks at the MAC the STOP command will not * terminate if we don't flush the XMAC's transmit FIFO ! */ xm_write32(hw, port, XM_MODE, @@ -1422,42 +1398,6 @@ static void genesis_mac_intr(struct skge_hw *hw, int port) } } -static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) -{ - int i; - - gma_write16(hw, port, GM_SMI_DATA, val); - gma_write16(hw, port, GM_SMI_CTRL, - GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); - for (i = 0; i < PHY_RETRIES; i++) { - udelay(1); - - if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) - break; - } -} - -static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) -{ - int i; - - gma_write16(hw, port, GM_SMI_CTRL, - GM_SMI_CT_PHY_AD(hw->phy_addr) - | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); - - for (i = 0; i < PHY_RETRIES; i++) { - udelay(1); - if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) - goto ready; - } - - printk(KERN_WARNING PFX "%s: phy read timeout\n", - hw->dev[port]->name); - return 0; - ready: - return gma_read16(hw, port, GM_SMI_DATA); -} - static void genesis_link_up(struct skge_port *skge) { struct skge_hw *hw = skge->hw; @@ -1571,7 +1511,55 @@ static inline void bcom_phy_intr(struct skge_port *skge) } -/* Marvell Phy Initailization */ +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +{ + int i; + + gma_write16(hw, port, GM_SMI_DATA, val); + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); + for (i = 0; i < PHY_RETRIES; i++) { + udelay(1); + + if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) + return 0; + } + + printk(KERN_WARNING PFX "%s: phy write timeout\n", + hw->dev[port]->name); + return -EIO; +} + +static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) +{ + int i; + + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(hw->phy_addr) + | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); + + for (i = 0; i < PHY_RETRIES; i++) { + udelay(1); + if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) + goto ready; + } + + return -ETIMEDOUT; + ready: + *val = gma_read16(hw, port, GM_SMI_DATA); + return 0; +} + +static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) +{ + u16 v = 0; + if (__gm_phy_read(hw, port, reg, &v)) + printk(KERN_WARNING PFX "%s: phy read timeout\n", + hw->dev[port]->name); + return v; +} + +/* Marvell Phy Initialization */ static void yukon_init(struct skge_hw *hw, int port) { struct skge_port *skge = netdev_priv(hw->dev[port]); @@ -1666,6 +1654,22 @@ static void yukon_reset(struct skge_hw *hw, int port) | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); } +/* Apparently, early versions of Yukon-Lite had wrong chip_id? */ +static int is_yukon_lite_a0(struct skge_hw *hw) +{ + u32 reg; + int ret; + + if (hw->chip_id != CHIP_ID_YUKON) + return 0; + + reg = skge_read32(hw, B2_FAR); + skge_write8(hw, B2_FAR + 3, 0xff); + ret = (skge_read8(hw, B2_FAR + 3) != 0); + skge_write32(hw, B2_FAR, reg); + return ret; +} + static void yukon_mac_init(struct skge_hw *hw, int port) { struct skge_port *skge = netdev_priv(hw->dev[port]); @@ -1781,9 +1785,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port) /* Configure Rx MAC FIFO */ skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); reg = GMF_OPER_ON | GMF_RX_F_FL_ON; - if (hw->chip_id == CHIP_ID_YUKON_LITE && - hw->chip_rev >= CHIP_REV_YU_LITE_A3) + + /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ + if (is_yukon_lite_a0(hw)) reg &= ~GMF_RX_F_FL_ON; + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); /* @@ -1798,6 +1804,25 @@ static void yukon_mac_init(struct skge_hw *hw, int port) skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); } +/* Go into power down mode */ +static void yukon_suspend(struct skge_hw *hw, int port) +{ + u16 ctrl; + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl |= PHY_M_PC_POL_R_DIS; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + ctrl |= PHY_CT_RESET; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* switch IEEE compatible power down mode on */ + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + ctrl |= PHY_CT_PDOWN; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); +} + static void yukon_stop(struct skge_port *skge) { struct skge_hw *hw = skge->hw; @@ -1811,14 +1836,7 @@ static void yukon_stop(struct skge_port *skge) & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); gma_read16(hw, port, GM_GP_CTRL); - if (hw->chip_id == CHIP_ID_YUKON_LITE && - hw->chip_rev >= CHIP_REV_YU_LITE_A3) { - u32 io = skge_read32(hw, B2_GP_IO); - - io |= GP_DIR_9 | GP_IO_9; - skge_write32(hw, B2_GP_IO, io); - skge_read32(hw, B2_GP_IO); - } + yukon_suspend(hw, port); /* set GPHY Control reset */ skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); @@ -2001,6 +2019,51 @@ static void yukon_phy_intr(struct skge_port *skge) /* XXX restart autonegotiation? */ } +/* Basic MII support */ +static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int err = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -ENODEV; /* Phy still in reset */ + + switch(cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + + /* fallthru */ + case SIOCGMIIREG: { + u16 val = 0; + spin_lock_bh(&hw->phy_lock); + if (hw->chip_id == CHIP_ID_GENESIS) + err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); + else + err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); + spin_unlock_bh(&hw->phy_lock); + data->val_out = val; + break; + } + + case SIOCSMIIREG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_bh(&hw->phy_lock); + if (hw->chip_id == CHIP_ID_GENESIS) + err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, + data->val_in); + else + err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, + data->val_in); + spin_unlock_bh(&hw->phy_lock); + break; + } + return err; +} + static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) { u32 end; @@ -2093,7 +2156,7 @@ static int skge_up(struct net_device *dev) hw->intr_mask |= portirqmask[port]; skge_write32(hw, B0_IMSK, hw->intr_mask); - /* Initialze MAC */ + /* Initialize MAC */ spin_lock_bh(&hw->phy_lock); if (hw->chip_id == CHIP_ID_GENESIS) genesis_mac_init(hw, port); @@ -2237,14 +2300,12 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) td->dma_hi = map >> 32; if (skb->ip_summed == CHECKSUM_HW) { - const struct iphdr *ip - = (const struct iphdr *) (skb->data + ETH_HLEN); int offset = skb->h.raw - skb->data; /* This seems backwards, but it is what the sk98lin * does. Looks like hardware is wrong? */ - if (ip->protocol == IPPROTO_UDP + if (skb->h.ipiph->protocol == IPPROTO_UDP && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) control = BMU_TCP_CHECK; else @@ -2413,7 +2474,7 @@ static void yukon_set_multicast(struct net_device *dev) reg = gma_read16(hw, port, GM_RX_CTRL); reg |= GM_RXCR_UCF_ENA; - if (dev->flags & IFF_PROMISC) /* promiscious */ + if (dev->flags & IFF_PROMISC) /* promiscuous */ reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); else if (dev->flags & IFF_ALLMULTI) /* all multicast */ memset(filter, 0xff, sizeof(filter)); @@ -2442,6 +2503,14 @@ static void yukon_set_multicast(struct net_device *dev) gma_write16(hw, port, GM_RX_CTRL, reg); } +static inline u16 phy_length(const struct skge_hw *hw, u32 status) +{ + if (hw->chip_id == CHIP_ID_GENESIS) + return status >> XMR_FS_LEN_SHIFT; + else + return status >> GMR_FS_LEN_SHIFT; +} + static inline int bad_phy_status(const struct skge_hw *hw, u32 status) { if (hw->chip_id == CHIP_ID_GENESIS) @@ -2451,80 +2520,99 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status) (status & GMR_FS_RX_OK) == 0; } -static void skge_rx_error(struct skge_port *skge, int slot, - u32 control, u32 status) -{ - if (netif_msg_rx_err(skge)) - printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n", - skge->netdev->name, slot, control, status); - - if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) - skge->net_stats.rx_length_errors++; - else if (skge->hw->chip_id == CHIP_ID_GENESIS) { - if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) - skge->net_stats.rx_length_errors++; - if (status & XMR_FS_FRA_ERR) - skge->net_stats.rx_frame_errors++; - if (status & XMR_FS_FCS_ERR) - skge->net_stats.rx_crc_errors++; - } else { - if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) - skge->net_stats.rx_length_errors++; - if (status & GMR_FS_FRAGMENT) - skge->net_stats.rx_frame_errors++; - if (status & GMR_FS_CRC_ERR) - skge->net_stats.rx_crc_errors++; - } -} /* Get receive buffer from descriptor. * Handles copy of small buffers and reallocation failures */ static inline struct sk_buff *skge_rx_get(struct skge_port *skge, struct skge_element *e, - unsigned int len) + u32 control, u32 status, u16 csum) { - struct sk_buff *nskb, *skb; + struct sk_buff *skb; + u16 len = control & BMU_BBC; + + if (unlikely(netif_msg_rx_status(skge))) + printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", + skge->netdev->name, e - skge->rx_ring.start, + status, len); + + if (len > skge->rx_buf_size) + goto error; + + if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) + goto error; + + if (bad_phy_status(skge->hw, status)) + goto error; + + if (phy_length(skge->hw, status) != len) + goto error; if (len < RX_COPY_THRESHOLD) { - nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN); - if (unlikely(!nskb)) - return NULL; + skb = dev_alloc_skb(len + 2); + if (!skb) + goto resubmit; + skb_reserve(skb, 2); pci_dma_sync_single_for_cpu(skge->hw->pdev, pci_unmap_addr(e, mapaddr), len, PCI_DMA_FROMDEVICE); - memcpy(nskb->data, e->skb->data, len); + memcpy(skb->data, e->skb->data, len); pci_dma_sync_single_for_device(skge->hw->pdev, pci_unmap_addr(e, mapaddr), len, PCI_DMA_FROMDEVICE); - - if (skge->rx_csum) { - struct skge_rx_desc *rd = e->desc; - nskb->csum = le16_to_cpu(rd->csum2); - nskb->ip_summed = CHECKSUM_HW; - } skge_rx_reuse(e, skge->rx_buf_size); - return nskb; } else { - nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size); - if (unlikely(!nskb)) - return NULL; + struct sk_buff *nskb; + nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN); + if (!nskb) + goto resubmit; pci_unmap_single(skge->hw->pdev, pci_unmap_addr(e, mapaddr), pci_unmap_len(e, maplen), PCI_DMA_FROMDEVICE); skb = e->skb; - if (skge->rx_csum) { - struct skge_rx_desc *rd = e->desc; - skb->csum = le16_to_cpu(rd->csum2); - skb->ip_summed = CHECKSUM_HW; - } - + prefetch(skb->data); skge_rx_setup(skge, e, nskb, skge->rx_buf_size); - return skb; } + + skb_put(skb, len); + skb->dev = skge->netdev; + if (skge->rx_csum) { + skb->csum = csum; + skb->ip_summed = CHECKSUM_HW; + } + + skb->protocol = eth_type_trans(skb, skge->netdev); + + return skb; +error: + + if (netif_msg_rx_err(skge)) + printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n", + skge->netdev->name, e - skge->rx_ring.start, + control, status); + + if (skge->hw->chip_id == CHIP_ID_GENESIS) { + if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) + skge->net_stats.rx_length_errors++; + if (status & XMR_FS_FRA_ERR) + skge->net_stats.rx_frame_errors++; + if (status & XMR_FS_FCS_ERR) + skge->net_stats.rx_crc_errors++; + } else { + if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) + skge->net_stats.rx_length_errors++; + if (status & GMR_FS_FRAGMENT) + skge->net_stats.rx_frame_errors++; + if (status & GMR_FS_CRC_ERR) + skge->net_stats.rx_crc_errors++; + } + +resubmit: + skge_rx_reuse(e, skge->rx_buf_size); + return NULL; } @@ -2537,35 +2625,19 @@ static int skge_poll(struct net_device *dev, int *budget) unsigned int to_do = min(dev->quota, *budget); unsigned int work_done = 0; - for (e = ring->to_clean; work_done < to_do; e = e->next) { + for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { struct skge_rx_desc *rd = e->desc; struct sk_buff *skb; - u32 control, len, status; + u32 control; rmb(); control = rd->control; if (control & BMU_OWN) break; - len = control & BMU_BBC; - status = rd->status; - - if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) - || bad_phy_status(hw, status))) { - skge_rx_error(skge, e - ring->start, control, status); - skge_rx_reuse(e, skge->rx_buf_size); - continue; - } - - if (netif_msg_rx_status(skge)) - printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", - dev->name, e - ring->start, rd->status, len); - - skb = skge_rx_get(skge, e, len); + skb = skge_rx_get(skge, e, control, rd->status, + le16_to_cpu(rd->csum2)); if (likely(skb)) { - skb_put(skb, len); - skb->protocol = eth_type_trans(skb, dev); - dev->last_rx = jiffies; netif_receive_skb(skb); @@ -2586,11 +2658,11 @@ static int skge_poll(struct net_device *dev, int *budget) if (work_done >= to_do) return 1; /* not done */ - local_irq_disable(); - __netif_rx_complete(dev); + netif_rx_complete(dev); hw->intr_mask |= portirqmask[skge->port]; skge_write32(hw, B0_IMSK, hw->intr_mask); - local_irq_enable(); + skge_read32(hw, B0_IMSK); + return 0; } @@ -2602,7 +2674,7 @@ static inline void skge_tx_intr(struct net_device *dev) struct skge_element *e; spin_lock(&skge->tx_lock); - for (e = ring->to_clean; e != ring->to_use; e = e->next) { + for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) { struct skge_tx_desc *td = e->desc; u32 control; @@ -2725,7 +2797,7 @@ static void skge_error_irq(struct skge_hw *hw) } /* - * Interrrupt from PHY are handled in tasklet (soft irq) + * Interrupt from PHY are handled in tasklet (soft irq) * because accessing phy registers requires spin wait which might * cause excess interrupt latency. */ @@ -2755,6 +2827,14 @@ static void skge_extirq(unsigned long data) local_irq_enable(); } +static inline void skge_wakeup(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + prefetch(skge->rx_ring.to_clean); + netif_rx_schedule(dev); +} + static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) { struct skge_hw *hw = dev_id; @@ -2766,12 +2846,12 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) status &= hw->intr_mask; if (status & IS_R1_F) { hw->intr_mask &= ~IS_R1_F; - netif_rx_schedule(hw->dev[0]); + skge_wakeup(hw->dev[0]); } if (status & IS_R2_F) { hw->intr_mask &= ~IS_R2_F; - netif_rx_schedule(hw->dev[1]); + skge_wakeup(hw->dev[1]); } if (status & IS_XA1_F) @@ -2831,21 +2911,29 @@ static void skge_netpoll(struct net_device *dev) static int skge_set_mac_address(struct net_device *dev, void *p) { struct skge_port *skge = netdev_priv(dev); - struct sockaddr *addr = p; - int err = 0; + struct skge_hw *hw = skge->hw; + unsigned port = skge->port; + const struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - skge_down(dev); + spin_lock_bh(&hw->phy_lock); memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); - memcpy_toio(skge->hw->regs + B2_MAC_1 + skge->port*8, + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); - memcpy_toio(skge->hw->regs + B2_MAC_2 + skge->port*8, + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); - if (dev->flags & IFF_UP) - err = skge_up(dev); - return err; + + if (hw->chip_id == CHIP_ID_GENESIS) + xm_outaddr(hw, port, XM_SA, dev->dev_addr); + else { + gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); + gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); + } + spin_unlock_bh(&hw->phy_lock); + + return 0; } static const struct { @@ -2878,6 +2966,7 @@ static const char *skge_board_name(const struct skge_hw *hw) */ static int skge_reset(struct skge_hw *hw) { + u32 reg; u16 ctst; u8 t8, mac_cfg, pmd_type, phy_type; int i; @@ -2956,6 +3045,7 @@ static int skge_reset(struct skge_hw *hw) /* switch power to VCC (WA for VAUX problem) */ skge_write8(hw, B0_POWER_CTRL, PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); + /* avoid boards with stuck Hardware error bits */ if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { @@ -2963,6 +3053,14 @@ static int skge_reset(struct skge_hw *hw) hw->intr_mask &= ~IS_HW_ERR; } + /* Clear PHY COMA */ + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®); + reg &= ~PCI_PHY_COMA; + pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + for (i = 0; i < hw->ports; i++) { skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); @@ -3033,6 +3131,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, SET_NETDEV_DEV(dev, &hw->pdev->dev); dev->open = skge_up; dev->stop = skge_down; + dev->do_ioctl = skge_ioctl; dev->hard_start_xmit = skge_xmit_frame; dev->get_stats = skge_get_stats; if (hw->chip_id == CHIP_ID_GENESIS) @@ -3082,6 +3181,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, /* read the mac address */ memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); /* device is off until link detection */ netif_carrier_off(dev); @@ -3131,7 +3231,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, } #ifdef __BIG_ENDIAN - /* byte swap decriptors in hardware */ + /* byte swap descriptors in hardware */ { u32 reg; @@ -3142,14 +3242,13 @@ static int __devinit skge_probe(struct pci_dev *pdev, #endif err = -ENOMEM; - hw = kmalloc(sizeof(*hw), GFP_KERNEL); + hw = kzalloc(sizeof(*hw), GFP_KERNEL); if (!hw) { printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", pci_name(pdev)); goto err_out_free_regions; } - memset(hw, 0, sizeof(*hw)); hw->pdev = pdev; spin_lock_init(&hw->phy_lock); tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); @@ -3172,7 +3271,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, if (err) goto err_out_free_irq; - printk(KERN_INFO PFX "addr 0x%lx irq %d chip %s rev %d\n", + printk(KERN_INFO PFX DRV_VERSION " addr 0x%lx irq %d chip %s rev %d\n", pci_resource_start(pdev, 0), pdev->irq, skge_board_name(hw), hw->chip_rev); |