summaryrefslogtreecommitdiffstats
path: root/drivers/net/arm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/arm')
-rw-r--r--drivers/net/arm/Kconfig10
-rw-r--r--drivers/net/arm/Makefile3
-rw-r--r--drivers/net/arm/am79c961a.c27
-rw-r--r--drivers/net/arm/at91_ether.c11
-rw-r--r--drivers/net/arm/ep93xx_eth.c10
-rw-r--r--drivers/net/arm/ether1.c5
-rw-r--r--drivers/net/arm/ether3.c5
-rw-r--r--drivers/net/arm/etherh.c25
-rw-r--r--drivers/net/arm/ixp4xx_eth.c346
-rw-r--r--drivers/net/arm/ks8695net.c1676
-rw-r--r--drivers/net/arm/ks8695net.h107
11 files changed, 2014 insertions, 211 deletions
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index 8eda6eeb43b..2895db13bfa 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -40,6 +40,14 @@ config ARM_AT91_ETHER
If you wish to compile a kernel for the AT91RM9200 and enable
ethernet support, then you should always answer Y to this.
+config ARM_KS8695_ETHER
+ tristate "KS8695 Ethernet support"
+ depends on ARM && ARCH_KS8695
+ select MII
+ help
+ If you wish to compile a kernel for the KS8695 and want to
+ use the internal ethernet then you should answer Y to this.
+
config EP93XX_ETH
tristate "EP93xx Ethernet support"
depends on ARM && ARCH_EP93XX
@@ -51,7 +59,7 @@ config EP93XX_ETH
config IXP4XX_ETH
tristate "Intel IXP4xx Ethernet support"
depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
- select MII
+ select PHYLIB
help
Say Y here if you want to use built-in Ethernet ports
on IXP4xx processor.
diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile
index 7c812ac2b6a..c69c0cdba4a 100644
--- a/drivers/net/arm/Makefile
+++ b/drivers/net/arm/Makefile
@@ -4,9 +4,10 @@
#
obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
-obj-$(CONFIG_ARM_ETHERH) += etherh.o
+obj-$(CONFIG_ARM_ETHERH) += etherh.o ../8390.o
obj-$(CONFIG_ARM_ETHER3) += ether3.o
obj-$(CONFIG_ARM_ETHER1) += ether1.o
obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
+obj-$(CONFIG_ARM_KS8695_ETHER) += ks8695net.o
obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index aa4a5246be5..c2d012fcc29 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -208,9 +208,9 @@ am79c961_init_for_open(struct net_device *dev)
/*
* Stop the chip.
*/
- spin_lock_irqsave(priv->chip_lock, flags);
+ spin_lock_irqsave(&priv->chip_lock, flags);
write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP);
- spin_unlock_irqrestore(priv->chip_lock, flags);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */
write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */
@@ -332,10 +332,10 @@ am79c961_close(struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
- spin_lock_irqsave(priv->chip_lock, flags);
+ spin_lock_irqsave(&priv->chip_lock, flags);
write_rreg (dev->base_addr, CSR0, CSR0_STOP);
write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
- spin_unlock_irqrestore(priv->chip_lock, flags);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
free_irq (dev->irq, dev);
@@ -391,7 +391,7 @@ static void am79c961_setmulticastlist (struct net_device *dev)
am79c961_mc_hash(dmi, multi_hash);
}
- spin_lock_irqsave(priv->chip_lock, flags);
+ spin_lock_irqsave(&priv->chip_lock, flags);
stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
@@ -405,9 +405,9 @@ static void am79c961_setmulticastlist (struct net_device *dev)
* Spin waiting for chip to report suspend mode
*/
while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) {
- spin_unlock_irqrestore(priv->chip_lock, flags);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
nop();
- spin_lock_irqsave(priv->chip_lock, flags);
+ spin_lock_irqsave(&priv->chip_lock, flags);
}
}
@@ -429,7 +429,7 @@ static void am79c961_setmulticastlist (struct net_device *dev)
write_rreg(dev->base_addr, CTRL1, 0);
}
- spin_unlock_irqrestore(priv->chip_lock, flags);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
}
static void am79c961_timeout(struct net_device *dev)
@@ -467,10 +467,10 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP);
priv->txhead = head;
- spin_lock_irqsave(priv->chip_lock, flags);
+ spin_lock_irqsave(&priv->chip_lock, flags);
write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
dev->trans_start = jiffies;
- spin_unlock_irqrestore(priv->chip_lock, flags);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
/*
* If the next packet is owned by the ethernet device,
@@ -532,7 +532,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
am_writeword(dev, hdraddr + 2, RMD_OWN);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
- dev->last_rx = jiffies;
priv->stats.rx_bytes += len;
priv->stats.rx_packets ++;
} else {
@@ -745,10 +744,8 @@ static int __init am79c961_probe(struct platform_device *pdev)
ret = register_netdev(dev);
if (ret == 0) {
- DECLARE_MAC_BUF(mac);
-
- printk(KERN_INFO "%s: ether address %s\n",
- dev->name, print_mac(mac, dev->dev_addr));
+ printk(KERN_INFO "%s: ether address %pM\n",
+ dev->name, dev->dev_addr);
return 0;
}
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 6f431a887e7..442938d5038 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -485,7 +485,6 @@ static void update_mac_address(struct net_device *dev)
static int set_mac_address(struct net_device *dev, void* addr)
{
struct sockaddr *address = addr;
- DECLARE_MAC_BUF(mac);
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
@@ -493,8 +492,8 @@ static int set_mac_address(struct net_device *dev, void* addr)
memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
update_mac_address(dev);
- printk("%s: Setting MAC address to %s\n", dev->name,
- print_mac(mac, dev->dev_addr));
+ printk("%s: Setting MAC address to %pM\n", dev->name,
+ dev->dev_addr);
return 0;
}
@@ -894,7 +893,6 @@ static void at91ether_rx(struct net_device *dev)
memcpy(skb_put(skb, pktlen), p_recv, pktlen);
skb->protocol = eth_type_trans(skb, dev);
- dev->last_rx = jiffies;
dev->stats.rx_bytes += pktlen;
netif_rx(skb);
}
@@ -978,7 +976,6 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
struct at91_private *lp;
unsigned int val;
int res;
- DECLARE_MAC_BUF(mac);
dev = alloc_etherdev(sizeof(struct at91_private));
if (!dev)
@@ -1084,11 +1081,11 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy");
/* Display ethernet banner */
- printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%s)\n",
+ printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n",
dev->name, (uint) dev->base_addr, dev->irq,
at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
- print_mac(mac, dev->dev_addr));
+ dev->dev_addr);
if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
else if (phy_type == MII_LXT971A_ID)
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 1267444d79d..3ec20cc18b0 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -259,8 +259,6 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, dev);
- dev->last_rx = jiffies;
-
netif_receive_skb(skb);
ep->stats.rx_packets++;
@@ -300,7 +298,7 @@ poll_some_more:
int more = 0;
spin_lock_irq(&ep->rx_lock);
- __netif_rx_complete(dev, napi);
+ __netif_rx_complete(napi);
wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
if (ep93xx_have_more_rx(ep)) {
wrl(ep, REG_INTEN, REG_INTEN_TX);
@@ -309,7 +307,7 @@ poll_some_more:
}
spin_unlock_irq(&ep->rx_lock);
- if (more && netif_rx_reschedule(dev, napi))
+ if (more && netif_rx_reschedule(napi))
goto poll_some_more;
}
@@ -417,9 +415,9 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
if (status & REG_INTSTS_RX) {
spin_lock(&ep->rx_lock);
- if (likely(netif_rx_schedule_prep(dev, &ep->napi))) {
+ if (likely(netif_rx_schedule_prep(&ep->napi))) {
wrl(ep, REG_INTEN, REG_INTEN_TX);
- __netif_rx_schedule(dev, &ep->napi);
+ __netif_rx_schedule(&ep->napi);
}
spin_unlock(&ep->rx_lock);
}
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index 3bb9e293e2e..e380de45446 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -996,7 +996,6 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct net_device *dev;
int i, ret = 0;
- DECLARE_MAC_BUF(mac);
ether1_banner();
@@ -1044,8 +1043,8 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
if (ret)
goto free;
- printk(KERN_INFO "%s: ether1 in slot %d, %s\n",
- dev->name, ec->slot_no, print_mac(mac, dev->dev_addr));
+ printk(KERN_INFO "%s: ether1 in slot %d, %pM\n",
+ dev->name, ec->slot_no, dev->dev_addr);
ecard_set_drvdata(ec, dev);
return 0;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 67e96ae8503..21a7bef12d3 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -776,7 +776,6 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
const struct ether3_data *data = id->data;
struct net_device *dev;
int bus_type, ret;
- DECLARE_MAC_BUF(mac);
ether3_banner();
@@ -859,8 +858,8 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
if (ret)
goto free;
- printk("%s: %s in slot %d, %s\n",
- dev->name, data->name, ec->slot_no, print_mac(mac, dev->dev_addr));
+ printk("%s: %s in slot %d, %pM\n",
+ dev->name, data->name, ec->slot_no, dev->dev_addr);
ecard_set_drvdata(ec, dev);
return 0;
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 5c5f1e470d3..54b52e5b182 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -637,6 +637,22 @@ static const struct ethtool_ops etherh_ethtool_ops = {
.get_drvinfo = etherh_get_drvinfo,
};
+static const struct net_device_ops etherh_netdev_ops = {
+ .ndo_open = etherh_open,
+ .ndo_stop = etherh_close,
+ .ndo_set_config = etherh_set_config,
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ei_poll,
+#endif
+};
+
static u32 etherh_regoffsets[16];
static u32 etherm_regoffsets[16];
@@ -648,7 +664,6 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
struct net_device *dev;
struct etherh_priv *eh;
int ret;
- DECLARE_MAC_BUF(mac);
etherh_banner();
@@ -664,9 +679,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
SET_NETDEV_DEV(dev, &ec->dev);
- dev->open = etherh_open;
- dev->stop = etherh_close;
- dev->set_config = etherh_set_config;
+ dev->netdev_ops = &etherh_netdev_ops;
dev->irq = ec->irq;
dev->ethtool_ops = &etherh_ethtool_ops;
@@ -746,8 +759,8 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
if (ret)
goto free;
- printk(KERN_INFO "%s: %s in slot %d, %s\n",
- dev->name, data->name, ec->slot_no, print_mac(mac, dev->dev_addr));
+ printk(KERN_INFO "%s: %s in slot %d, %pM\n",
+ dev->name, data->name, ec->slot_no, dev->dev_addr);
ecard_set_drvdata(ec, dev);
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index e2d702b8b2e..5fce1d5c1a1 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -30,12 +30,11 @@
#include <linux/etherdevice.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/mii.h>
+#include <linux/phy.h>
#include <linux/platform_device.h>
#include <mach/npe.h>
#include <mach/qmgr.h>
-#define DEBUG_QUEUES 0
#define DEBUG_DESC 0
#define DEBUG_RX 0
#define DEBUG_TX 0
@@ -59,7 +58,6 @@
#define NAPI_WEIGHT 16
#define MDIO_INTERVAL (3 * HZ)
#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
-#define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */
#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
#define NPE_ID(port_id) ((port_id) >> 4)
@@ -164,15 +162,14 @@ struct port {
struct npe *npe;
struct net_device *netdev;
struct napi_struct napi;
- struct net_device_stats stat;
- struct mii_if_info mii;
- struct delayed_work mdio_thread;
+ struct phy_device *phydev;
struct eth_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
u32 desc_tab_phys;
int id; /* logical port ID */
- u16 mii_bmcr;
+ int speed, duplex;
+ u8 firmware[4];
};
/* NPE message structure */
@@ -243,19 +240,20 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
static spinlock_t mdio_lock;
static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
+struct mii_bus *mdio_bus;
static int ports_open;
static struct port *npe_port_tab[MAX_NPES];
static struct dma_pool *dma_pool;
-static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
- int write, u16 cmd)
+static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
+ int write, u16 cmd)
{
int cycles = 0;
if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
- printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name);
- return 0;
+ printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
+ return -1;
}
if (write) {
@@ -274,107 +272,119 @@ static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
}
if (cycles == MAX_MDIO_RETRIES) {
- printk(KERN_ERR "%s: MII write failed\n", dev->name);
- return 0;
+ printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
+ phy_id);
+ return -1;
}
#if DEBUG_MDIO
- printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name,
- cycles);
+ printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
+ phy_id, write ? "write" : "read", cycles);
#endif
if (write)
return 0;
if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
- printk(KERN_ERR "%s: MII read failed\n", dev->name);
- return 0;
+#if DEBUG_MDIO
+ printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
+ phy_id);
+#endif
+ return 0xFFFF; /* don't return error */
}
return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
- (__raw_readl(&mdio_regs->mdio_status[1]) << 8);
+ ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
}
-static int mdio_read(struct net_device *dev, int phy_id, int location)
+static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
{
unsigned long flags;
- u16 val;
+ int ret;
spin_lock_irqsave(&mdio_lock, flags);
- val = mdio_cmd(dev, phy_id, location, 0, 0);
+ ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
spin_unlock_irqrestore(&mdio_lock, flags);
- return val;
+#if DEBUG_MDIO
+ printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
+ phy_id, location, ret);
+#endif
+ return ret;
}
-static void mdio_write(struct net_device *dev, int phy_id, int location,
- int val)
+static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
+ u16 val)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&mdio_lock, flags);
- mdio_cmd(dev, phy_id, location, 1, val);
+ ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
spin_unlock_irqrestore(&mdio_lock, flags);
+#if DEBUG_MDIO
+ printk(KERN_DEBUG "%s #%i: MII read [%i] <- 0x%X, err = %i\n",
+ bus->name, phy_id, location, val, ret);
+#endif
+ return ret;
}
-static void phy_reset(struct net_device *dev, int phy_id)
+static int ixp4xx_mdio_register(void)
{
- struct port *port = netdev_priv(dev);
- int cycles = 0;
+ int err;
- mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET);
+ if (!(mdio_bus = mdiobus_alloc()))
+ return -ENOMEM;
- while (cycles < MAX_MII_RESET_RETRIES) {
- if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) {
-#if DEBUG_MDIO
- printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n",
- dev->name, cycles);
-#endif
- return;
- }
- udelay(1);
- cycles++;
- }
+ /* All MII PHY accesses use NPE-B Ethernet registers */
+ spin_lock_init(&mdio_lock);
+ mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
+ __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
+
+ mdio_bus->name = "IXP4xx MII Bus";
+ mdio_bus->read = &ixp4xx_mdio_read;
+ mdio_bus->write = &ixp4xx_mdio_write;
+ strcpy(mdio_bus->id, "0");
- printk(KERN_ERR "%s: MII reset failed\n", dev->name);
+ if ((err = mdiobus_register(mdio_bus)))
+ mdiobus_free(mdio_bus);
+ return err;
}
-static void eth_set_duplex(struct port *port)
+static void ixp4xx_mdio_remove(void)
{
- if (port->mii.full_duplex)
- __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
- &port->regs->tx_control[0]);
- else
- __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
- &port->regs->tx_control[0]);
+ mdiobus_unregister(mdio_bus);
+ mdiobus_free(mdio_bus);
}
-static void phy_check_media(struct port *port, int init)
+static void ixp4xx_adjust_link(struct net_device *dev)
{
- if (mii_check_media(&port->mii, 1, init))
- eth_set_duplex(port);
- if (port->mii.force_media) { /* mii_check_media() doesn't work */
- struct net_device *dev = port->netdev;
- int cur_link = mii_link_ok(&port->mii);
- int prev_link = netif_carrier_ok(dev);
-
- if (!prev_link && cur_link) {
- printk(KERN_INFO "%s: link up\n", dev->name);
- netif_carrier_on(dev);
- } else if (prev_link && !cur_link) {
+ struct port *port = netdev_priv(dev);
+ struct phy_device *phydev = port->phydev;
+
+ if (!phydev->link) {
+ if (port->speed) {
+ port->speed = 0;
printk(KERN_INFO "%s: link down\n", dev->name);
- netif_carrier_off(dev);
}
+ return;
}
-}
+ if (port->speed == phydev->speed && port->duplex == phydev->duplex)
+ return;
-static void mdio_thread(struct work_struct *work)
-{
- struct port *port = container_of(work, struct port, mdio_thread.work);
+ port->speed = phydev->speed;
+ port->duplex = phydev->duplex;
+
+ if (port->duplex)
+ __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
+ &port->regs->tx_control[0]);
+ else
+ __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
+ &port->regs->tx_control[0]);
- phy_check_media(port, 0);
- schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
+ printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
+ dev->name, port->speed, port->duplex ? "full" : "half");
}
@@ -412,47 +422,13 @@ static inline void debug_desc(u32 phys, struct desc *desc)
#endif
}
-static inline void debug_queue(unsigned int queue, int is_get, u32 phys)
-{
-#if DEBUG_QUEUES
- static struct {
- int queue;
- char *name;
- } names[] = {
- { TX_QUEUE(0x10), "TX#0 " },
- { TX_QUEUE(0x20), "TX#1 " },
- { TX_QUEUE(0x00), "TX#2 " },
- { RXFREE_QUEUE(0x10), "RX-free#0 " },
- { RXFREE_QUEUE(0x20), "RX-free#1 " },
- { RXFREE_QUEUE(0x00), "RX-free#2 " },
- { TXDONE_QUEUE, "TX-done " },
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(names); i++)
- if (names[i].queue == queue)
- break;
-
- printk(KERN_DEBUG "Queue %i %s%s %X\n", queue,
- i < ARRAY_SIZE(names) ? names[i].name : "",
- is_get ? "->" : "<-", phys);
-#endif
-}
-
-static inline u32 queue_get_entry(unsigned int queue)
-{
- u32 phys = qmgr_get_entry(queue);
- debug_queue(queue, 1, phys);
- return phys;
-}
-
static inline int queue_get_desc(unsigned int queue, struct port *port,
int is_tx)
{
u32 phys, tab_phys, n_desc;
struct desc *tab;
- if (!(phys = queue_get_entry(queue)))
+ if (!(phys = qmgr_get_entry(queue)))
return -1;
phys &= ~0x1F; /* mask out non-address bits */
@@ -468,7 +444,6 @@ static inline int queue_get_desc(unsigned int queue, struct port *port,
static inline void queue_put_desc(unsigned int queue, u32 phys,
struct desc *desc)
{
- debug_queue(queue, 0, phys);
debug_desc(phys, desc);
BUG_ON(phys & 0x1F);
qmgr_put_entry(queue, phys);
@@ -498,7 +473,7 @@ static void eth_rx_irq(void *pdev)
printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
#endif
qmgr_disable_irq(port->plat->rxq);
- netif_rx_schedule(dev, &port->napi);
+ netif_rx_schedule(&port->napi);
}
static int eth_poll(struct napi_struct *napi, int budget)
@@ -526,10 +501,10 @@ static int eth_poll(struct napi_struct *napi, int budget)
printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n",
dev->name);
#endif
- netif_rx_complete(dev, napi);
+ netif_rx_complete(napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_empty(rxq) &&
- netif_rx_reschedule(dev, napi)) {
+ netif_rx_reschedule(napi)) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: eth_poll"
" netif_rx_reschedule successed\n",
@@ -562,7 +537,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
#endif
if (!skb) {
- port->stat.rx_dropped++;
+ dev->stats.rx_dropped++;
/* put the desc back on RX-ready queue */
desc->buf_len = MAX_MRU;
desc->pkt_len = 0;
@@ -588,9 +563,8 @@ static int eth_poll(struct napi_struct *napi, int budget)
debug_pkt(dev, "eth_poll", skb->data, skb->len);
skb->protocol = eth_type_trans(skb, dev);
- dev->last_rx = jiffies;
- port->stat.rx_packets++;
- port->stat.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
netif_receive_skb(skb);
/* put the new buffer on RX-free queue */
@@ -618,7 +592,7 @@ static void eth_txdone_irq(void *unused)
#if DEBUG_TX
printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
#endif
- while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) {
+ while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
u32 npe_id, n_desc;
struct port *port;
struct desc *desc;
@@ -635,8 +609,8 @@ static void eth_txdone_irq(void *unused)
debug_desc(phys, desc);
if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
- port->stat.tx_packets++;
- port->stat.tx_bytes += desc->pkt_len;
+ port->netdev->stats.tx_packets++;
+ port->netdev->stats.tx_bytes += desc->pkt_len;
dma_unmap_tx(port, desc);
#if DEBUG_TX
@@ -674,7 +648,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len > MAX_MRU)) {
dev_kfree_skb(skb);
- port->stat.tx_errors++;
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
@@ -690,7 +664,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
bytes = ALIGN(offset + len, 4);
if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
dev_kfree_skb(skb);
- port->stat.tx_dropped++;
+ dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
@@ -704,7 +678,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
#else
kfree(mem);
#endif
- port->stat.tx_dropped++;
+ dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -747,12 +721,6 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
}
-static struct net_device_stats *eth_stats(struct net_device *dev)
-{
- struct port *port = netdev_priv(dev);
- return &port->stat;
-}
-
static void eth_set_mcast_list(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
@@ -786,41 +754,80 @@ static void eth_set_mcast_list(struct net_device *dev)
static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
struct port *port = netdev_priv(dev);
- unsigned int duplex_chg;
- int err;
if (!netif_running(dev))
return -EINVAL;
- err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg);
- if (duplex_chg)
- eth_set_duplex(port);
- return err;
+ return phy_mii_ioctl(port->phydev, if_mii(req), cmd);
+}
+
+/* ethtool support */
+
+static void ixp4xx_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct port *port = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
+ port->firmware[0], port->firmware[1],
+ port->firmware[2], port->firmware[3]);
+ strcpy(info->bus_info, "internal");
}
+static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct port *port = netdev_priv(dev);
+ return phy_ethtool_gset(port->phydev, cmd);
+}
+
+static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct port *port = netdev_priv(dev);
+ return phy_ethtool_sset(port->phydev, cmd);
+}
+
+static int ixp4xx_nway_reset(struct net_device *dev)
+{
+ struct port *port = netdev_priv(dev);
+ return phy_start_aneg(port->phydev);
+}
+
+static struct ethtool_ops ixp4xx_ethtool_ops = {
+ .get_drvinfo = ixp4xx_get_drvinfo,
+ .get_settings = ixp4xx_get_settings,
+ .set_settings = ixp4xx_set_settings,
+ .nway_reset = ixp4xx_nway_reset,
+ .get_link = ethtool_op_get_link,
+};
+
static int request_queues(struct port *port)
{
int err;
- err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0);
+ err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
+ "%s:RX-free", port->netdev->name);
if (err)
return err;
- err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0);
+ err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
+ "%s:RX", port->netdev->name);
if (err)
goto rel_rxfree;
- err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0);
+ err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
+ "%s:TX", port->netdev->name);
if (err)
goto rel_rx;
- err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0);
+ err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
+ "%s:TX-ready", port->netdev->name);
if (err)
goto rel_tx;
/* TX-done queue handles skbs sent out by the NPEs */
if (!ports_open) {
- err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0);
+ err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
+ "%s:TX-done", DRV_NAME);
if (err)
goto rel_txready;
}
@@ -944,10 +951,12 @@ static int eth_open(struct net_device *dev)
npe_name(npe));
return -EIO;
}
+ port->firmware[0] = msg.byte4;
+ port->firmware[1] = msg.byte5;
+ port->firmware[2] = msg.byte6;
+ port->firmware[3] = msg.byte7;
}
- mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr);
-
memset(&msg, 0, sizeof(msg));
msg.cmd = NPE_VLAN_SETRXQOSENTRY;
msg.eth_id = port->id;
@@ -985,6 +994,9 @@ static int eth_open(struct net_device *dev)
return err;
}
+ port->speed = 0; /* force "link up" message */
+ phy_start(port->phydev);
+
for (i = 0; i < ETH_ALEN; i++)
__raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
__raw_writel(0x08, &port->regs->random_seed);
@@ -1012,10 +1024,8 @@ static int eth_open(struct net_device *dev)
__raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
napi_enable(&port->napi);
- phy_check_media(port, 1);
eth_set_mcast_list(dev);
netif_start_queue(dev);
- schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
eth_rx_irq, dev);
@@ -1026,7 +1036,7 @@ static int eth_open(struct net_device *dev)
}
ports_open++;
/* we may already have RX data, enables IRQ */
- netif_rx_schedule(dev, &port->napi);
+ netif_rx_schedule(&port->napi);
return 0;
}
@@ -1106,25 +1116,31 @@ static int eth_close(struct net_device *dev)
printk(KERN_CRIT "%s: unable to disable loopback\n",
dev->name);
- port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) &
- ~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */
- mdio_write(dev, port->plat->phy, MII_BMCR,
- port->mii_bmcr | BMCR_PDOWN);
+ phy_stop(port->phydev);
if (!ports_open)
qmgr_disable_irq(TXDONE_QUEUE);
- cancel_rearming_delayed_work(&port->mdio_thread);
destroy_queues(port);
release_queues(port);
return 0;
}
+static const struct net_device_ops ixp4xx_netdev_ops = {
+ .ndo_open = eth_open,
+ .ndo_stop = eth_close,
+ .ndo_start_xmit = eth_xmit,
+ .ndo_set_multicast_list = eth_set_mcast_list,
+ .ndo_do_ioctl = eth_ioctl,
+
+};
+
static int __devinit eth_init_one(struct platform_device *pdev)
{
struct port *port;
struct net_device *dev;
struct eth_plat_info *plat = pdev->dev.platform_data;
u32 regs_phys;
+ char phy_id[BUS_ID_SIZE];
int err;
if (!(dev = alloc_etherdev(sizeof(struct port))))
@@ -1153,12 +1169,8 @@ static int __devinit eth_init_one(struct platform_device *pdev)
goto err_free;
}
- dev->open = eth_open;
- dev->hard_start_xmit = eth_xmit;
- dev->stop = eth_close;
- dev->get_stats = eth_stats;
- dev->do_ioctl = eth_ioctl;
- dev->set_multicast_list = eth_set_mcast_list;
+ dev->netdev_ops = &ixp4xx_netdev_ops;
+ dev->ethtool_ops = &ixp4xx_ethtool_ops;
dev->tx_queue_len = 100;
netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
@@ -1191,22 +1203,19 @@ static int __devinit eth_init_one(struct platform_device *pdev)
__raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
udelay(50);
- port->mii.dev = dev;
- port->mii.mdio_read = mdio_read;
- port->mii.mdio_write = mdio_write;
- port->mii.phy_id = plat->phy;
- port->mii.phy_id_mask = 0x1F;
- port->mii.reg_num_mask = 0x1F;
+ snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy);
+ port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(port->phydev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(port->phydev);
+ }
+
+ port->phydev->irq = PHY_POLL;
printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
npe_name(port->npe));
- phy_reset(dev, plat->phy);
- port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) &
- ~(BMCR_RESET | BMCR_PDOWN);
- mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN);
-
- INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread);
return 0;
err_unreg:
@@ -1232,7 +1241,7 @@ static int __devexit eth_remove_one(struct platform_device *pdev)
return 0;
}
-static struct platform_driver drv = {
+static struct platform_driver ixp4xx_eth_driver = {
.driver.name = DRV_NAME,
.probe = eth_init_one,
.remove = eth_remove_one,
@@ -1240,20 +1249,19 @@ static struct platform_driver drv = {
static int __init eth_init_module(void)
{
+ int err;
if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
return -ENOSYS;
- /* All MII PHY accesses use NPE-B Ethernet registers */
- spin_lock_init(&mdio_lock);
- mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
- __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
-
- return platform_driver_register(&drv);
+ if ((err = ixp4xx_mdio_register()))
+ return err;
+ return platform_driver_register(&ixp4xx_eth_driver);
}
static void __exit eth_cleanup_module(void)
{
- platform_driver_unregister(&drv);
+ platform_driver_unregister(&ixp4xx_eth_driver);
+ ixp4xx_mdio_remove();
}
MODULE_AUTHOR("Krzysztof Halasa");
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
new file mode 100644
index 00000000000..1cf2f949c0b
--- /dev/null
+++ b/drivers/net/arm/ks8695net.c
@@ -0,0 +1,1676 @@
+/*
+ * Micrel KS8695 (Centaur) Ethernet.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Copyright 2008 Simtec Electronics
+ * Daniel Silverstone <dsilvers@simtec.co.uk>
+ * Vincent Sanders <vince@simtec.co.uk>
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+
+#include <mach/regs-switch.h>
+#include <mach/regs-misc.h>
+
+#include "ks8695net.h"
+
+#define MODULENAME "ks8695_ether"
+#define MODULEVERSION "1.01"
+
+/*
+ * Transmit and device reset timeout, default 5 seconds.
+ */
+static int watchdog = 5000;
+
+/* Hardware structures */
+
+/**
+ * struct rx_ring_desc - Receive descriptor ring element
+ * @status: The status of the descriptor element (E.g. who owns it)
+ * @length: The number of bytes in the block pointed to by data_ptr
+ * @data_ptr: The physical address of the data block to receive into
+ * @next_desc: The physical address of the next descriptor element.
+ */
+struct rx_ring_desc {
+ __le32 status;
+ __le32 length;
+ __le32 data_ptr;
+ __le32 next_desc;
+};
+
+/**
+ * struct tx_ring_desc - Transmit descriptor ring element
+ * @owner: Who owns the descriptor
+ * @status: The number of bytes in the block pointed to by data_ptr
+ * @data_ptr: The physical address of the data block to receive into
+ * @next_desc: The physical address of the next descriptor element.
+ */
+struct tx_ring_desc {
+ __le32 owner;
+ __le32 status;
+ __le32 data_ptr;
+ __le32 next_desc;
+};
+
+/**
+ * struct ks8695_skbuff - sk_buff wrapper for rx/tx rings.
+ * @skb: The buffer in the ring
+ * @dma_ptr: The mapped DMA pointer of the buffer
+ * @length: The number of bytes mapped to dma_ptr
+ */
+struct ks8695_skbuff {
+ struct sk_buff *skb;
+ dma_addr_t dma_ptr;
+ u32 length;
+};
+
+/* Private device structure */
+
+#define MAX_TX_DESC 8
+#define MAX_TX_DESC_MASK 0x7
+#define MAX_RX_DESC 16
+#define MAX_RX_DESC_MASK 0xf
+
+#define MAX_RXBUF_SIZE 0x700
+
+#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
+#define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC)
+#define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE)
+
+/**
+ * enum ks8695_dtype - Device type
+ * @KS8695_DTYPE_WAN: This device is a WAN interface
+ * @KS8695_DTYPE_LAN: This device is a LAN interface
+ * @KS8695_DTYPE_HPNA: This device is an HPNA interface
+ */
+enum ks8695_dtype {
+ KS8695_DTYPE_WAN,
+ KS8695_DTYPE_LAN,
+ KS8695_DTYPE_HPNA,
+};
+
+/**
+ * struct ks8695_priv - Private data for the KS8695 Ethernet
+ * @in_suspend: Flag to indicate if we're suspending/resuming
+ * @ndev: The net_device for this interface
+ * @dev: The platform device object for this interface
+ * @dtype: The type of this device
+ * @io_regs: The ioremapped registers for this interface
+ * @rx_irq_name: The textual name of the RX IRQ from the platform data
+ * @tx_irq_name: The textual name of the TX IRQ from the platform data
+ * @link_irq_name: The textual name of the link IRQ from the
+ * platform data if available
+ * @rx_irq: The IRQ number for the RX IRQ
+ * @tx_irq: The IRQ number for the TX IRQ
+ * @link_irq: The IRQ number for the link IRQ if available
+ * @regs_req: The resource request for the registers region
+ * @phyiface_req: The resource request for the phy/switch region
+ * if available
+ * @phyiface_regs: The ioremapped registers for the phy/switch if available
+ * @ring_base: The base pointer of the dma coherent memory for the rings
+ * @ring_base_dma: The DMA mapped equivalent of ring_base
+ * @tx_ring: The pointer in ring_base of the TX ring
+ * @tx_ring_used: The number of slots in the TX ring which are occupied
+ * @tx_ring_next_slot: The next slot to fill in the TX ring
+ * @tx_ring_dma: The DMA mapped equivalent of tx_ring
+ * @tx_buffers: The sk_buff mappings for the TX ring
+ * @txq_lock: A lock to protect the tx_buffers tx_ring_used etc variables
+ * @rx_ring: The pointer in ring_base of the RX ring
+ * @rx_ring_dma: The DMA mapped equivalent of rx_ring
+ * @rx_buffers: The sk_buff mappings for the RX ring
+ * @next_rx_desc_read: The next RX descriptor to read from on IRQ
+ * @msg_enable: The flags for which messages to emit
+ */
+struct ks8695_priv {
+ int in_suspend;
+ struct net_device *ndev;
+ struct device *dev;
+ enum ks8695_dtype dtype;
+ void __iomem *io_regs;
+
+ const char *rx_irq_name, *tx_irq_name, *link_irq_name;
+ int rx_irq, tx_irq, link_irq;
+
+ struct resource *regs_req, *phyiface_req;
+ void __iomem *phyiface_regs;
+
+ void *ring_base;
+ dma_addr_t ring_base_dma;
+
+ struct tx_ring_desc *tx_ring;
+ int tx_ring_used;
+ int tx_ring_next_slot;
+ dma_addr_t tx_ring_dma;
+ struct ks8695_skbuff tx_buffers[MAX_TX_DESC];
+ spinlock_t txq_lock;
+
+ struct rx_ring_desc *rx_ring;
+ dma_addr_t rx_ring_dma;
+ struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
+ int next_rx_desc_read;
+
+ int msg_enable;
+};
+
+/* Register access */
+
+/**
+ * ks8695_readreg - Read from a KS8695 ethernet register
+ * @ksp: The device to read from
+ * @reg: The register to read
+ */
+static inline u32
+ks8695_readreg(struct ks8695_priv *ksp, int reg)
+{
+ return readl(ksp->io_regs + reg);
+}
+
+/**
+ * ks8695_writereg - Write to a KS8695 ethernet register
+ * @ksp: The device to write to
+ * @reg: The register to write
+ * @value: The value to write to the register
+ */
+static inline void
+ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value)
+{
+ writel(value, ksp->io_regs + reg);
+}
+
+/* Utility functions */
+
+/**
+ * ks8695_port_type - Retrieve port-type as user-friendly string
+ * @ksp: The device to return the type for
+ *
+ * Returns a string indicating which of the WAN, LAN or HPNA
+ * ports this device is likely to represent.
+ */
+static const char *
+ks8695_port_type(struct ks8695_priv *ksp)
+{
+ switch (ksp->dtype) {
+ case KS8695_DTYPE_LAN:
+ return "LAN";
+ case KS8695_DTYPE_WAN:
+ return "WAN";
+ case KS8695_DTYPE_HPNA:
+ return "HPNA";
+ }
+
+ return "UNKNOWN";
+}
+
+/**
+ * ks8695_update_mac - Update the MAC registers in the device
+ * @ksp: The device to update
+ *
+ * Updates the MAC registers in the KS8695 device from the address in the
+ * net_device structure associated with this interface.
+ */
+static void
+ks8695_update_mac(struct ks8695_priv *ksp)
+{
+ /* Update the HW with the MAC from the net_device */
+ struct net_device *ndev = ksp->ndev;
+ u32 machigh, maclow;
+
+ maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0));
+ machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0));
+
+ ks8695_writereg(ksp, KS8695_MAL, maclow);
+ ks8695_writereg(ksp, KS8695_MAH, machigh);
+
+}
+
+/**
+ * ks8695_refill_rxbuffers - Re-fill the RX buffer ring
+ * @ksp: The device to refill
+ *
+ * Iterates the RX ring of the device looking for empty slots.
+ * For each empty slot, we allocate and map a new SKB and give it
+ * to the hardware.
+ * This can be called from interrupt context safely.
+ */
+static void
+ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
+{
+ /* Run around the RX ring, filling in any missing sk_buff's */
+ int buff_n;
+
+ for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
+ if (!ksp->rx_buffers[buff_n].skb) {
+ struct sk_buff *skb = dev_alloc_skb(MAX_RXBUF_SIZE);
+ dma_addr_t mapping;
+
+ ksp->rx_buffers[buff_n].skb = skb;
+ if (skb == NULL) {
+ /* Failed to allocate one, perhaps
+ * we'll try again later.
+ */
+ break;
+ }
+
+ mapping = dma_map_single(ksp->dev, skb->data,
+ MAX_RXBUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
+ /* Failed to DMA map this SKB, try later */
+ dev_kfree_skb_irq(skb);
+ ksp->rx_buffers[buff_n].skb = NULL;
+ break;
+ }
+ ksp->rx_buffers[buff_n].dma_ptr = mapping;
+ skb->dev = ksp->ndev;
+ ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
+
+ /* Record this into the DMA ring */
+ ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
+ ksp->rx_ring[buff_n].length =
+ cpu_to_le32(MAX_RXBUF_SIZE);
+
+ wmb();
+
+ /* And give ownership over to the hardware */
+ ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
+ }
+ }
+}
+
+/* Maximum number of multicast addresses which the KS8695 HW supports */
+#define KS8695_NR_ADDRESSES 16
+
+/**
+ * ks8695_init_partial_multicast - Init the mcast addr registers
+ * @ksp: The device to initialise
+ * @addr: The multicast address list to use
+ * @nr_addr: The number of addresses in the list
+ *
+ * This routine is a helper for ks8695_set_multicast - it writes
+ * the additional-address registers in the KS8695 ethernet device
+ * and cleans up any others left behind.
+ */
+static void
+ks8695_init_partial_multicast(struct ks8695_priv *ksp,
+ struct dev_mc_list *addr,
+ int nr_addr)
+{
+ u32 low, high;
+ int i;
+
+ for (i = 0; i < nr_addr; i++, addr = addr->next) {
+ /* Ran out of addresses? */
+ if (!addr)
+ break;
+ /* Ran out of space in chip? */
+ BUG_ON(i == KS8695_NR_ADDRESSES);
+
+ low = (addr->dmi_addr[2] << 24) | (addr->dmi_addr[3] << 16) |
+ (addr->dmi_addr[4] << 8) | (addr->dmi_addr[5]);
+ high = (addr->dmi_addr[0] << 8) | (addr->dmi_addr[1]);
+
+ ks8695_writereg(ksp, KS8695_AAL_(i), low);
+ ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
+ }
+
+ /* Clear the remaining Additional Station Addresses */
+ for (; i < KS8695_NR_ADDRESSES; i++) {
+ ks8695_writereg(ksp, KS8695_AAL_(i), 0);
+ ks8695_writereg(ksp, KS8695_AAH_(i), 0);
+ }
+}
+
+/* Interrupt handling */
+
+/**
+ * ks8695_tx_irq - Transmit IRQ handler
+ * @irq: The IRQ which went off (ignored)
+ * @dev_id: The net_device for the interrupt
+ *
+ * Process the TX ring, clearing out any transmitted slots.
+ * Allows the net_device to pass us new packets once slots are
+ * freed.
+ */
+static irqreturn_t
+ks8695_tx_irq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ int buff_n;
+
+ for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
+ if (ksp->tx_buffers[buff_n].skb &&
+ !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) {
+ rmb();
+ /* An SKB which is not owned by HW is present */
+ /* Update the stats for the net_device */
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length;
+
+ /* Free the packet from the ring */
+ ksp->tx_ring[buff_n].data_ptr = 0;
+
+ /* Free the sk_buff */
+ dma_unmap_single(ksp->dev,
+ ksp->tx_buffers[buff_n].dma_ptr,
+ ksp->tx_buffers[buff_n].length,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
+ ksp->tx_buffers[buff_n].skb = NULL;
+ ksp->tx_ring_used--;
+ }
+ }
+
+ netif_wake_queue(ndev);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ks8695_rx_irq - Receive IRQ handler
+ * @irq: The IRQ which went off (ignored)
+ * @dev_id: The net_device for the interrupt
+ *
+ * Process the RX ring, passing any received packets up to the
+ * host. If we received anything other than errors, we then
+ * refill the ring.
+ */
+static irqreturn_t
+ks8695_rx_irq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int buff_n;
+ u32 flags;
+ int pktlen;
+ int last_rx_processed = -1;
+
+ buff_n = ksp->next_rx_desc_read;
+ do {
+ if (ksp->rx_buffers[buff_n].skb &&
+ !(ksp->rx_ring[buff_n].status & cpu_to_le32(RDES_OWN))) {
+ rmb();
+ flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
+ /* Found an SKB which we own, this means we
+ * received a packet
+ */
+ if ((flags & (RDES_FS | RDES_LS)) !=
+ (RDES_FS | RDES_LS)) {
+ /* This packet is not the first and
+ * the last segment. Therefore it is
+ * a "spanning" packet and we can't
+ * handle it
+ */
+ goto rx_failure;
+ }
+
+ if (flags & (RDES_ES | RDES_RE)) {
+ /* It's an error packet */
+ ndev->stats.rx_errors++;
+ if (flags & RDES_TL)
+ ndev->stats.rx_length_errors++;
+ if (flags & RDES_RF)
+ ndev->stats.rx_length_errors++;
+ if (flags & RDES_CE)
+ ndev->stats.rx_crc_errors++;
+ if (flags & RDES_RE)
+ ndev->stats.rx_missed_errors++;
+
+ goto rx_failure;
+ }
+
+ pktlen = flags & RDES_FLEN;
+ pktlen -= 4; /* Drop the CRC */
+
+ /* Retrieve the sk_buff */
+ skb = ksp->rx_buffers[buff_n].skb;
+
+ /* Clear it from the ring */
+ ksp->rx_buffers[buff_n].skb = NULL;
+ ksp->rx_ring[buff_n].data_ptr = 0;
+
+ /* Unmap the SKB */
+ dma_unmap_single(ksp->dev,
+ ksp->rx_buffers[buff_n].dma_ptr,
+ ksp->rx_buffers[buff_n].length,
+ DMA_FROM_DEVICE);
+
+ /* Relinquish the SKB to the network layer */
+ skb_put(skb, pktlen);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_rx(skb);
+
+ /* Record stats */
+ ndev->last_rx = jiffies;
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += pktlen;
+ goto rx_finished;
+
+rx_failure:
+ /* This ring entry is an error, but we can
+ * re-use the skb
+ */
+ /* Give the ring entry back to the hardware */
+ ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
+rx_finished:
+ /* And note this as processed so we can start
+ * from here next time
+ */
+ last_rx_processed = buff_n;
+ } else {
+ /* Ran out of things to process, stop now */
+ break;
+ }
+ buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
+ } while (buff_n != ksp->next_rx_desc_read);
+
+ /* And note which RX descriptor we last did anything with */
+ if (likely(last_rx_processed != -1))
+ ksp->next_rx_desc_read =
+ (last_rx_processed + 1) & MAX_RX_DESC_MASK;
+
+ /* And refill the buffers */
+ ks8695_refill_rxbuffers(ksp);
+
+ /* Kick the RX DMA engine, in case it became suspended */
+ ks8695_writereg(ksp, KS8695_DRSC, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ks8695_link_irq - Link change IRQ handler
+ * @irq: The IRQ which went off (ignored)
+ * @dev_id: The net_device for the interrupt
+ *
+ * The WAN interface can generate an IRQ when the link changes,
+ * report this to the net layer and the user.
+ */
+static irqreturn_t
+ks8695_link_irq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ u32 ctrl;
+
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+ if (ctrl & WMC_WLS) {
+ netif_carrier_on(ndev);
+ if (netif_msg_link(ksp))
+ dev_info(ksp->dev,
+ "%s: Link is now up (10%sMbps/%s-duplex)\n",
+ ndev->name,
+ (ctrl & WMC_WSS) ? "0" : "",
+ (ctrl & WMC_WDS) ? "Full" : "Half");
+ } else {
+ netif_carrier_off(ndev);
+ if (netif_msg_link(ksp))
+ dev_info(ksp->dev, "%s: Link is now down.\n",
+ ndev->name);
+ }
+
+ return IRQ_HANDLED;
+}
+
+
+/* KS8695 Device functions */
+
+/**
+ * ks8695_reset - Reset a KS8695 ethernet interface
+ * @ksp: The interface to reset
+ *
+ * Perform an engine reset of the interface and re-program it
+ * with sensible defaults.
+ */
+static void
+ks8695_reset(struct ks8695_priv *ksp)
+{
+ int reset_timeout = watchdog;
+ /* Issue the reset via the TX DMA control register */
+ ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST);
+ while (reset_timeout--) {
+ if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST))
+ break;
+ msleep(1);
+ }
+
+ if (reset_timeout == 0) {
+ dev_crit(ksp->dev,
+ "Timeout waiting for DMA engines to reset\n");
+ /* And blithely carry on */
+ }
+
+ /* Definitely wait long enough before attempting to program
+ * the engines
+ */
+ msleep(10);
+
+ /* RX: unicast and broadcast */
+ ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB);
+ /* TX: pad and add CRC */
+ ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC);
+}
+
+/**
+ * ks8695_shutdown - Shut down a KS8695 ethernet interface
+ * @ksp: The interface to shut down
+ *
+ * This disables packet RX/TX, cleans up IRQs, drains the rings,
+ * and basically places the interface into a clean shutdown
+ * state.
+ */
+static void
+ks8695_shutdown(struct ks8695_priv *ksp)
+{
+ u32 ctrl;
+ int buff_n;
+
+ /* Disable packet transmission */
+ ctrl = ks8695_readreg(ksp, KS8695_DTXC);
+ ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE);
+
+ /* Disable packet reception */
+ ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+ ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE);
+
+ /* Release the IRQs */
+ free_irq(ksp->rx_irq, ksp->ndev);
+ free_irq(ksp->tx_irq, ksp->ndev);
+ if (ksp->link_irq != -1)
+ free_irq(ksp->link_irq, ksp->ndev);
+
+ /* Throw away any pending TX packets */
+ for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
+ if (ksp->tx_buffers[buff_n].skb) {
+ /* Remove this SKB from the TX ring */
+ ksp->tx_ring[buff_n].owner = 0;
+ ksp->tx_ring[buff_n].status = 0;
+ ksp->tx_ring[buff_n].data_ptr = 0;
+
+ /* Unmap and bin this SKB */
+ dma_unmap_single(ksp->dev,
+ ksp->tx_buffers[buff_n].dma_ptr,
+ ksp->tx_buffers[buff_n].length,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
+ ksp->tx_buffers[buff_n].skb = NULL;
+ }
+ }
+
+ /* Purge the RX buffers */
+ for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
+ if (ksp->rx_buffers[buff_n].skb) {
+ /* Remove the SKB from the RX ring */
+ ksp->rx_ring[buff_n].status = 0;
+ ksp->rx_ring[buff_n].data_ptr = 0;
+
+ /* Unmap and bin the SKB */
+ dma_unmap_single(ksp->dev,
+ ksp->rx_buffers[buff_n].dma_ptr,
+ ksp->rx_buffers[buff_n].length,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb);
+ ksp->rx_buffers[buff_n].skb = NULL;
+ }
+ }
+}
+
+
+/**
+ * ks8695_setup_irq - IRQ setup helper function
+ * @irq: The IRQ number to claim
+ * @irq_name: The name to give the IRQ claimant
+ * @handler: The function to call to handle the IRQ
+ * @ndev: The net_device to pass in as the dev_id argument to the handler
+ *
+ * Return 0 on success.
+ */
+static int
+ks8695_setup_irq(int irq, const char *irq_name,
+ irq_handler_t handler, struct net_device *ndev)
+{
+ int ret;
+
+ ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev);
+
+ if (ret) {
+ dev_err(&ndev->dev, "failure to request IRQ %d\n", irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ks8695_init_net - Initialise a KS8695 ethernet interface
+ * @ksp: The interface to initialise
+ *
+ * This routine fills the RX ring, initialises the DMA engines,
+ * allocates the IRQs and then starts the packet TX and RX
+ * engines.
+ */
+static int
+ks8695_init_net(struct ks8695_priv *ksp)
+{
+ int ret;
+ u32 ctrl;
+
+ ks8695_refill_rxbuffers(ksp);
+
+ /* Initialise the DMA engines */
+ ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma);
+ ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma);
+
+ /* Request the IRQs */
+ ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name,
+ ks8695_rx_irq, ksp->ndev);
+ if (ret)
+ return ret;
+ ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name,
+ ks8695_tx_irq, ksp->ndev);
+ if (ret)
+ return ret;
+ if (ksp->link_irq != -1) {
+ ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name,
+ ks8695_link_irq, ksp->ndev);
+ if (ret)
+ return ret;
+ }
+
+ /* Set up the ring indices */
+ ksp->next_rx_desc_read = 0;
+ ksp->tx_ring_next_slot = 0;
+ ksp->tx_ring_used = 0;
+
+ /* Bring up transmission */
+ ctrl = ks8695_readreg(ksp, KS8695_DTXC);
+ /* Enable packet transmission */
+ ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE);
+
+ /* Bring up the reception */
+ ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+ /* Enable packet reception */
+ ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE);
+ /* And start the DMA engine */
+ ks8695_writereg(ksp, KS8695_DRSC, 0);
+
+ /* All done */
+ return 0;
+}
+
+/**
+ * ks8695_release_device - HW resource release for KS8695 e-net
+ * @ksp: The device to be freed
+ *
+ * This unallocates io memory regions, dma-coherent regions etc
+ * which were allocated in ks8695_probe.
+ */
+static void
+ks8695_release_device(struct ks8695_priv *ksp)
+{
+ /* Unmap the registers */
+ iounmap(ksp->io_regs);
+ if (ksp->phyiface_regs)
+ iounmap(ksp->phyiface_regs);
+
+ /* And release the request */
+ release_resource(ksp->regs_req);
+ kfree(ksp->regs_req);
+ if (ksp->phyiface_req) {
+ release_resource(ksp->phyiface_req);
+ kfree(ksp->phyiface_req);
+ }
+
+ /* Free the ring buffers */
+ dma_free_coherent(ksp->dev, RING_DMA_SIZE,
+ ksp->ring_base, ksp->ring_base_dma);
+}
+
+/* Ethtool support */
+
+/**
+ * ks8695_get_msglevel - Get the messages enabled for emission
+ * @ndev: The network device to read from
+ */
+static u32
+ks8695_get_msglevel(struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ return ksp->msg_enable;
+}
+
+/**
+ * ks8695_set_msglevel - Set the messages enabled for emission
+ * @ndev: The network device to configure
+ * @value: The messages to set for emission
+ */
+static void
+ks8695_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ ksp->msg_enable = value;
+}
+
+/**
+ * ks8695_get_settings - Get device-specific settings.
+ * @ndev: The network device to read settings from
+ * @cmd: The ethtool structure to read into
+ */
+static int
+ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ u32 ctrl;
+
+ /* All ports on the KS8695 support these... */
+ cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_TP | SUPPORTED_MII);
+ cmd->transceiver = XCVR_INTERNAL;
+
+ /* Port specific extras */
+ switch (ksp->dtype) {
+ case KS8695_DTYPE_HPNA:
+ cmd->phy_address = 0;
+ /* not supported for HPNA */
+ cmd->autoneg = AUTONEG_DISABLE;
+
+ /* BUG: Erm, dtype hpna implies no phy regs */
+ /*
+ ctrl = readl(KS8695_MISC_VA + KS8695_HMC);
+ cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10;
+ cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF;
+ */
+ return -EOPNOTSUPP;
+ case KS8695_DTYPE_WAN:
+ cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+ cmd->port = PORT_MII;
+ cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
+ cmd->phy_address = 0;
+
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+ if ((ctrl & WMC_WAND) == 0) {
+ /* auto-negotiation is enabled */
+ cmd->advertising |= ADVERTISED_Autoneg;
+ if (ctrl & WMC_WANA100F)
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+ if (ctrl & WMC_WANA100H)
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+ if (ctrl & WMC_WANA10F)
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+ if (ctrl & WMC_WANA10H)
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+ if (ctrl & WMC_WANAP)
+ cmd->advertising |= ADVERTISED_Pause;
+ cmd->autoneg = AUTONEG_ENABLE;
+
+ cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
+ cmd->duplex = (ctrl & WMC_WDS) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ /* auto-negotiation is disabled */
+ cmd->autoneg = AUTONEG_DISABLE;
+
+ cmd->speed = (ctrl & WMC_WANF100) ?
+ SPEED_100 : SPEED_10;
+ cmd->duplex = (ctrl & WMC_WANFF) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+ break;
+ case KS8695_DTYPE_LAN:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ks8695_set_settings - Set device-specific settings.
+ * @ndev: The network device to configure
+ * @cmd: The settings to configure
+ */
+static int
+ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ u32 ctrl;
+
+ if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100))
+ return -EINVAL;
+ if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL))
+ return -EINVAL;
+ if (cmd->port != PORT_MII)
+ return -EINVAL;
+ if (cmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+ if ((cmd->autoneg != AUTONEG_DISABLE) &&
+ (cmd->autoneg != AUTONEG_ENABLE))
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ if ((cmd->advertising & (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full)) == 0)
+ return -EINVAL;
+
+ switch (ksp->dtype) {
+ case KS8695_DTYPE_HPNA:
+ /* HPNA does not support auto-negotiation. */
+ return -EINVAL;
+ case KS8695_DTYPE_WAN:
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+ ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
+ WMC_WANA10F | WMC_WANA10H);
+ if (cmd->advertising & ADVERTISED_100baseT_Full)
+ ctrl |= WMC_WANA100F;
+ if (cmd->advertising & ADVERTISED_100baseT_Half)
+ ctrl |= WMC_WANA100H;
+ if (cmd->advertising & ADVERTISED_10baseT_Full)
+ ctrl |= WMC_WANA10F;
+ if (cmd->advertising & ADVERTISED_10baseT_Half)
+ ctrl |= WMC_WANA10H;
+
+ /* force a re-negotiation */
+ ctrl |= WMC_WANR;
+ writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
+ break;
+ case KS8695_DTYPE_LAN:
+ return -EOPNOTSUPP;
+ }
+
+ } else {
+ switch (ksp->dtype) {
+ case KS8695_DTYPE_HPNA:
+ /* BUG: dtype_hpna implies no phy registers */
+ /*
+ ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC);
+
+ ctrl &= ~(HMC_HSS | HMC_HDS);
+ if (cmd->speed == SPEED_100)
+ ctrl |= HMC_HSS;
+ if (cmd->duplex == DUPLEX_FULL)
+ ctrl |= HMC_HDS;
+
+ __raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC);
+ */
+ return -EOPNOTSUPP;
+ case KS8695_DTYPE_WAN:
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+ /* disable auto-negotiation */
+ ctrl |= WMC_WAND;
+ ctrl &= ~(WMC_WANF100 | WMC_WANFF);
+
+ if (cmd->speed == SPEED_100)
+ ctrl |= WMC_WANF100;
+ if (cmd->duplex == DUPLEX_FULL)
+ ctrl |= WMC_WANFF;
+
+ writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
+ break;
+ case KS8695_DTYPE_LAN:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ks8695_nwayreset - Restart the autonegotiation on the port.
+ * @ndev: The network device to restart autoneotiation on
+ */
+static int
+ks8695_nwayreset(struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ u32 ctrl;
+
+ switch (ksp->dtype) {
+ case KS8695_DTYPE_HPNA:
+ /* No phy means no autonegotiation on hpna */
+ return -EINVAL;
+ case KS8695_DTYPE_WAN:
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+ if ((ctrl & WMC_WAND) == 0)
+ writel(ctrl | WMC_WANR,
+ ksp->phyiface_regs + KS8695_WMC);
+ else
+ /* auto-negotiation not enabled */
+ return -EINVAL;
+ break;
+ case KS8695_DTYPE_LAN:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ks8695_get_link - Retrieve link status of network interface
+ * @ndev: The network interface to retrive the link status of.
+ */
+static u32
+ks8695_get_link(struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ u32 ctrl;
+
+ switch (ksp->dtype) {
+ case KS8695_DTYPE_HPNA:
+ /* HPNA always has link */
+ return 1;
+ case KS8695_DTYPE_WAN:
+ /* WAN we can read the PHY for */
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+ return ctrl & WMC_WLS;
+ case KS8695_DTYPE_LAN:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/**
+ * ks8695_get_pause - Retrieve network pause/flow-control advertising
+ * @ndev: The device to retrieve settings from
+ * @param: The structure to fill out with the information
+ */
+static void
+ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ u32 ctrl;
+
+ switch (ksp->dtype) {
+ case KS8695_DTYPE_HPNA:
+ /* No phy link on hpna to configure */
+ return;
+ case KS8695_DTYPE_WAN:
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+ /* advertise Pause */
+ param->autoneg = (ctrl & WMC_WANAP);
+
+ /* current Rx Flow-control */
+ ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+ param->rx_pause = (ctrl & DRXC_RFCE);
+
+ /* current Tx Flow-control */
+ ctrl = ks8695_readreg(ksp, KS8695_DTXC);
+ param->tx_pause = (ctrl & DTXC_TFCE);
+ break;
+ case KS8695_DTYPE_LAN:
+ /* The LAN's "phy" is a direct-attached switch */
+ return;
+ }
+}
+
+/**
+ * ks8695_set_pause - Configure pause/flow-control
+ * @ndev: The device to configure
+ * @param: The pause parameters to set
+ *
+ * TODO: Implement this
+ */
+static int
+ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ks8695_get_drvinfo - Retrieve driver information
+ * @ndev: The network device to retrieve info about
+ * @info: The info structure to fill out.
+ */
+static void
+ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, MODULENAME, sizeof(info->driver));
+ strlcpy(info->version, MODULEVERSION, sizeof(info->version));
+ strlcpy(info->bus_info, ndev->dev.parent->bus_id,
+ sizeof(info->bus_info));
+}
+
+static struct ethtool_ops ks8695_ethtool_ops = {
+ .get_msglevel = ks8695_get_msglevel,
+ .set_msglevel = ks8695_set_msglevel,
+ .get_settings = ks8695_get_settings,
+ .set_settings = ks8695_set_settings,
+ .nway_reset = ks8695_nwayreset,
+ .get_link = ks8695_get_link,
+ .get_pauseparam = ks8695_get_pause,
+ .set_pauseparam = ks8695_set_pause,
+ .get_drvinfo = ks8695_get_drvinfo,
+};
+
+/* Network device interface functions */
+
+/**
+ * ks8695_set_mac - Update MAC in net dev and HW
+ * @ndev: The network device to update
+ * @addr: The new MAC address to set
+ */
+static int
+ks8695_set_mac(struct net_device *ndev, void *addr)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ struct sockaddr *address = addr;
+
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
+
+ ks8695_update_mac(ksp);
+
+ dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n",
+ ndev->name, ndev->dev_addr);
+
+ return 0;
+}
+
+/**
+ * ks8695_set_multicast - Set up the multicast behaviour of the interface
+ * @ndev: The net_device to configure
+ *
+ * This routine, called by the net layer, configures promiscuity
+ * and multicast reception behaviour for the interface.
+ */
+static void
+ks8695_set_multicast(struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ u32 ctrl;
+
+ ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* enable promiscuous mode */
+ ctrl |= DRXC_RA;
+ } else if (ndev->flags & ~IFF_PROMISC) {
+ /* disable promiscuous mode */
+ ctrl &= ~DRXC_RA;
+ }
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ /* enable all multicast mode */
+ ctrl |= DRXC_RM;
+ } else if (ndev->mc_count > KS8695_NR_ADDRESSES) {
+ /* more specific multicast addresses than can be
+ * handled in hardware
+ */
+ ctrl |= DRXC_RM;
+ } else {
+ /* enable specific multicasts */
+ ctrl &= ~DRXC_RM;
+ ks8695_init_partial_multicast(ksp, ndev->mc_list,
+ ndev->mc_count);
+ }
+
+ ks8695_writereg(ksp, KS8695_DRXC, ctrl);
+}
+
+/**
+ * ks8695_timeout - Handle a network tx/rx timeout.
+ * @ndev: The net_device which timed out.
+ *
+ * A network transaction timed out, reset the device.
+ */
+static void
+ks8695_timeout(struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ ks8695_shutdown(ksp);
+
+ ks8695_reset(ksp);
+
+ ks8695_update_mac(ksp);
+
+ /* We ignore the return from this since it managed to init
+ * before it probably will be okay to init again.
+ */
+ ks8695_init_net(ksp);
+
+ /* Reconfigure promiscuity etc */
+ ks8695_set_multicast(ndev);
+
+ /* And start the TX queue once more */
+ netif_start_queue(ndev);
+}
+
+/**
+ * ks8695_start_xmit - Start a packet transmission
+ * @skb: The packet to transmit
+ * @ndev: The network device to send the packet on
+ *
+ * This routine, called by the net layer, takes ownership of the
+ * sk_buff and adds it to the TX ring. It then kicks the TX DMA
+ * engine to ensure transmission begins.
+ */
+static int
+ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ int buff_n;
+ dma_addr_t dmap;
+
+ spin_lock_irq(&ksp->txq_lock);
+
+ if (ksp->tx_ring_used == MAX_TX_DESC) {
+ /* Somehow we got entered when we have no room */
+ spin_unlock_irq(&ksp->txq_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ buff_n = ksp->tx_ring_next_slot;
+
+ BUG_ON(ksp->tx_buffers[buff_n].skb);
+
+ dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ksp->dev, dmap))) {
+ /* Failed to DMA map this SKB, give it back for now */
+ spin_unlock_irq(&ksp->txq_lock);
+ dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\
+ "transmission, trying later\n", ndev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ ksp->tx_buffers[buff_n].dma_ptr = dmap;
+ /* Mapped okay, store the buffer pointer and length for later */
+ ksp->tx_buffers[buff_n].skb = skb;
+ ksp->tx_buffers[buff_n].length = skb->len;
+
+ /* Fill out the TX descriptor */
+ ksp->tx_ring[buff_n].data_ptr =
+ cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr);
+ ksp->tx_ring[buff_n].status =
+ cpu_to_le32(TDES_IC | TDES_FS | TDES_LS |
+ (skb->len & TDES_TBS));
+
+ wmb();
+
+ /* Hand it over to the hardware */
+ ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN);
+
+ if (++ksp->tx_ring_used == MAX_TX_DESC)
+ netif_stop_queue(ndev);
+
+ ndev->trans_start = jiffies;
+
+ /* Kick the TX DMA in case it decided to go IDLE */
+ ks8695_writereg(ksp, KS8695_DTSC, 0);
+
+ /* And update the next ring slot */
+ ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK;
+
+ spin_unlock_irq(&ksp->txq_lock);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ks8695_stop - Stop (shutdown) a KS8695 ethernet interface
+ * @ndev: The net_device to stop
+ *
+ * This disables the TX queue and cleans up a KS8695 ethernet
+ * device.
+ */
+static int
+ks8695_stop(struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ ks8695_shutdown(ksp);
+
+ return 0;
+}
+
+/**
+ * ks8695_open - Open (bring up) a KS8695 ethernet interface
+ * @ndev: The net_device to open
+ *
+ * This resets, configures the MAC, initialises the RX ring and
+ * DMA engines and starts the TX queue for a KS8695 ethernet
+ * device.
+ */
+static int
+ks8695_open(struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ int ret;
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ ks8695_reset(ksp);
+
+ ks8695_update_mac(ksp);
+
+ ret = ks8695_init_net(ksp);
+ if (ret) {
+ ks8695_shutdown(ksp);
+ return ret;
+ }
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+/* Platform device driver */
+
+/**
+ * ks8695_init_switch - Init LAN switch to known good defaults.
+ * @ksp: The device to initialise
+ *
+ * This initialises the LAN switch in the KS8695 to a known-good
+ * set of defaults.
+ */
+static void __devinit
+ks8695_init_switch(struct ks8695_priv *ksp)
+{
+ u32 ctrl;
+
+ /* Default value for SEC0 according to datasheet */
+ ctrl = 0x40819e00;
+
+ /* LED0 = Speed LED1 = Link/Activity */
+ ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S);
+ ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY);
+
+ /* Enable Switch */
+ ctrl |= SEC0_ENABLE;
+
+ writel(ctrl, ksp->phyiface_regs + KS8695_SEC0);
+
+ /* Defaults for SEC1 */
+ writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1);
+}
+
+/**
+ * ks8695_init_wan_phy - Initialise the WAN PHY to sensible defaults
+ * @ksp: The device to initialise
+ *
+ * This initialises a KS8695's WAN phy to sensible values for
+ * autonegotiation etc.
+ */
+static void __devinit
+ks8695_init_wan_phy(struct ks8695_priv *ksp)
+{
+ u32 ctrl;
+
+ /* Support auto-negotiation */
+ ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H |
+ WMC_WANA10F | WMC_WANA10H);
+
+ /* LED0 = Activity , LED1 = Link */
+ ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK);
+
+ /* Restart Auto-negotiation */
+ ctrl |= WMC_WANR;
+
+ writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
+
+ writel(0, ksp->phyiface_regs + KS8695_WPPM);
+ writel(0, ksp->phyiface_regs + KS8695_PPS);
+}
+
+static const struct net_device_ops ks8695_netdev_ops = {
+ .ndo_open = ks8695_open,
+ .ndo_stop = ks8695_stop,
+ .ndo_start_xmit = ks8695_start_xmit,
+ .ndo_tx_timeout = ks8695_timeout,
+ .ndo_set_mac_address = ks8695_set_mac,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = ks8695_set_multicast,
+};
+
+/**
+ * ks8695_probe - Probe and initialise a KS8695 ethernet interface
+ * @pdev: The platform device to probe
+ *
+ * Initialise a KS8695 ethernet device from platform data.
+ *
+ * This driver requires at least one IORESOURCE_MEM for the
+ * registers and two IORESOURCE_IRQ for the RX and TX IRQs
+ * respectively. It can optionally take an additional
+ * IORESOURCE_MEM for the switch or phy in the case of the lan or
+ * wan ports, and an IORESOURCE_IRQ for the link IRQ for the wan
+ * port.
+ */
+static int __devinit
+ks8695_probe(struct platform_device *pdev)
+{
+ struct ks8695_priv *ksp;
+ struct net_device *ndev;
+ struct resource *regs_res, *phyiface_res;
+ struct resource *rxirq_res, *txirq_res, *linkirq_res;
+ int ret = 0;
+ int buff_n;
+ u32 machigh, maclow;
+
+ /* Initialise a net_device */
+ ndev = alloc_etherdev(sizeof(struct ks8695_priv));
+ if (!ndev) {
+ dev_err(&pdev->dev, "could not allocate device.\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ dev_dbg(&pdev->dev, "ks8695_probe() called\n");
+
+ /* Configure our private structure a little */
+ ksp = netdev_priv(ndev);
+ memset(ksp, 0, sizeof(struct ks8695_priv));
+
+ ksp->dev = &pdev->dev;
+ ksp->ndev = ndev;
+ ksp->msg_enable = NETIF_MSG_LINK;
+
+ /* Retrieve resources */
+ regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+ rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
+
+ if (!(regs_res && rxirq_res && txirq_res)) {
+ dev_err(ksp->dev, "insufficient resources\n");
+ ret = -ENOENT;
+ goto failure;
+ }
+
+ ksp->regs_req = request_mem_region(regs_res->start,
+ resource_size(regs_res),
+ pdev->name);
+
+ if (!ksp->regs_req) {
+ dev_err(ksp->dev, "cannot claim register space\n");
+ ret = -EIO;
+ goto failure;
+ }
+
+ ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res));
+
+ if (!ksp->io_regs) {
+ dev_err(ksp->dev, "failed to ioremap registers\n");
+ ret = -EINVAL;
+ goto failure;
+ }
+
+ if (phyiface_res) {
+ ksp->phyiface_req =
+ request_mem_region(phyiface_res->start,
+ resource_size(phyiface_res),
+ phyiface_res->name);
+
+ if (!ksp->phyiface_req) {
+ dev_err(ksp->dev,
+ "cannot claim switch register space\n");
+ ret = -EIO;
+ goto failure;
+ }
+
+ ksp->phyiface_regs = ioremap(phyiface_res->start,
+ resource_size(phyiface_res));
+
+ if (!ksp->phyiface_regs) {
+ dev_err(ksp->dev,
+ "failed to ioremap switch registers\n");
+ ret = -EINVAL;
+ goto failure;
+ }
+ }
+
+ ksp->rx_irq = rxirq_res->start;
+ ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX";
+ ksp->tx_irq = txirq_res->start;
+ ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX";
+ ksp->link_irq = (linkirq_res ? linkirq_res->start : -1);
+ ksp->link_irq_name = (linkirq_res && linkirq_res->name) ?
+ linkirq_res->name : "Ethernet Link";
+
+ /* driver system setup */
+ ndev->netdev_ops = &ks8695_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
+ ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
+
+ /* Retrieve the default MAC addr from the chip. */
+ /* The bootloader should have left it in there for us. */
+
+ machigh = ks8695_readreg(ksp, KS8695_MAH);
+ maclow = ks8695_readreg(ksp, KS8695_MAL);
+
+ ndev->dev_addr[0] = (machigh >> 8) & 0xFF;
+ ndev->dev_addr[1] = machigh & 0xFF;
+ ndev->dev_addr[2] = (maclow >> 24) & 0xFF;
+ ndev->dev_addr[3] = (maclow >> 16) & 0xFF;
+ ndev->dev_addr[4] = (maclow >> 8) & 0xFF;
+ ndev->dev_addr[5] = maclow & 0xFF;
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please "
+ "set using ifconfig\n", ndev->name);
+
+ /* In order to be efficient memory-wise, we allocate both
+ * rings in one go.
+ */
+ ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE,
+ &ksp->ring_base_dma, GFP_KERNEL);
+ if (!ksp->ring_base) {
+ ret = -ENOMEM;
+ goto failure;
+ }
+
+ /* Specify the TX DMA ring buffer */
+ ksp->tx_ring = ksp->ring_base;
+ ksp->tx_ring_dma = ksp->ring_base_dma;
+
+ /* And initialise the queue's lock */
+ spin_lock_init(&ksp->txq_lock);
+
+ /* Specify the RX DMA ring buffer */
+ ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
+ ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE;
+
+ /* Zero the descriptor rings */
+ memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE);
+ memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE);
+
+ /* Build the rings */
+ for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
+ ksp->tx_ring[buff_n].next_desc =
+ cpu_to_le32(ksp->tx_ring_dma +
+ (sizeof(struct tx_ring_desc) *
+ ((buff_n + 1) & MAX_TX_DESC_MASK)));
+ }
+
+ for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
+ ksp->rx_ring[buff_n].next_desc =
+ cpu_to_le32(ksp->rx_ring_dma +
+ (sizeof(struct rx_ring_desc) *
+ ((buff_n + 1) & MAX_RX_DESC_MASK)));
+ }
+
+ /* Initialise the port (physically) */
+ if (ksp->phyiface_regs && ksp->link_irq == -1) {
+ ks8695_init_switch(ksp);
+ ksp->dtype = KS8695_DTYPE_LAN;
+ } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
+ ks8695_init_wan_phy(ksp);
+ ksp->dtype = KS8695_DTYPE_WAN;
+ } else {
+ /* No initialisation since HPNA does not have a PHY */
+ ksp->dtype = KS8695_DTYPE_HPNA;
+ }
+
+ /* And bring up the net_device with the net core */
+ platform_set_drvdata(pdev, ndev);
+ ret = register_netdev(ndev);
+
+ if (ret == 0) {
+ dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
+ ks8695_port_type(ksp), ndev->dev_addr);
+ } else {
+ /* Report the failure to register the net_device */
+ dev_err(ksp->dev, "ks8695net: failed to register netdev.\n");
+ goto failure;
+ }
+
+ /* All is well */
+ return 0;
+
+ /* Error exit path */
+failure:
+ ks8695_release_device(ksp);
+ free_netdev(ndev);
+
+ return ret;
+}
+
+/**
+ * ks8695_drv_suspend - Suspend a KS8695 ethernet platform device.
+ * @pdev: The device to suspend
+ * @state: The suspend state
+ *
+ * This routine detaches and shuts down a KS8695 ethernet device.
+ */
+static int
+ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ ksp->in_suspend = 1;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ks8695_shutdown(ksp);
+ }
+
+ return 0;
+}
+
+/**
+ * ks8695_drv_resume - Resume a KS8695 ethernet platform device.
+ * @pdev: The device to resume
+ *
+ * This routine re-initialises and re-attaches a KS8695 ethernet
+ * device.
+ */
+static int
+ks8695_drv_resume(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ ks8695_reset(ksp);
+ ks8695_init_net(ksp);
+ ks8695_set_multicast(ndev);
+ netif_device_attach(ndev);
+ }
+
+ ksp->in_suspend = 0;
+
+ return 0;
+}
+
+/**
+ * ks8695_drv_remove - Remove a KS8695 net device on driver unload.
+ * @pdev: The platform device to remove
+ *
+ * This unregisters and releases a KS8695 ethernet device.
+ */
+static int __devexit
+ks8695_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ unregister_netdev(ndev);
+ ks8695_release_device(ksp);
+ free_netdev(ndev);
+
+ dev_dbg(&pdev->dev, "released and freed device\n");
+ return 0;
+}
+
+static struct platform_driver ks8695_driver = {
+ .driver = {
+ .name = MODULENAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ks8695_probe,
+ .remove = __devexit_p(ks8695_drv_remove),
+ .suspend = ks8695_drv_suspend,
+ .resume = ks8695_drv_resume,
+};
+
+/* Module interface */
+
+static int __init
+ks8695_init(void)
+{
+ printk(KERN_INFO "%s Ethernet driver, V%s\n",
+ MODULENAME, MODULEVERSION);
+
+ return platform_driver_register(&ks8695_driver);
+}
+
+static void __exit
+ks8695_cleanup(void)
+{
+ platform_driver_unregister(&ks8695_driver);
+}
+
+module_init(ks8695_init);
+module_exit(ks8695_cleanup);
+
+MODULE_AUTHOR("Simtec Electronics")
+MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MODULENAME);
+
+module_param(watchdog, int, 0400);
+MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
diff --git a/drivers/net/arm/ks8695net.h b/drivers/net/arm/ks8695net.h
new file mode 100644
index 00000000000..80eff6ea516
--- /dev/null
+++ b/drivers/net/arm/ks8695net.h
@@ -0,0 +1,107 @@
+/*
+ * Micrel KS8695 (Centaur) Ethernet.
+ *
+ * Copyright 2008 Simtec Electronics
+ * Daniel Silverstone <dsilvers@simtec.co.uk>
+ * Vincent Sanders <vince@simtec.co.uk>
+ */
+
+#ifndef KS8695NET_H
+#define KS8695NET_H
+
+/* Receive descriptor flags */
+#define RDES_OWN (1 << 31) /* Ownership */
+#define RDES_FS (1 << 30) /* First Descriptor */
+#define RDES_LS (1 << 29) /* Last Descriptor */
+#define RDES_IPE (1 << 28) /* IP Checksum error */
+#define RDES_TCPE (1 << 27) /* TCP Checksum error */
+#define RDES_UDPE (1 << 26) /* UDP Checksum error */
+#define RDES_ES (1 << 25) /* Error summary */
+#define RDES_MF (1 << 24) /* Multicast Frame */
+#define RDES_RE (1 << 19) /* MII Error reported */
+#define RDES_TL (1 << 18) /* Frame too Long */
+#define RDES_RF (1 << 17) /* Runt Frame */
+#define RDES_CE (1 << 16) /* CRC error */
+#define RDES_FT (1 << 15) /* Frame Type */
+#define RDES_FLEN (0x7ff) /* Frame Length */
+
+#define RDES_RER (1 << 25) /* Receive End of Ring */
+#define RDES_RBS (0x7ff) /* Receive Buffer Size */
+
+/* Transmit descriptor flags */
+
+#define TDES_OWN (1 << 31) /* Ownership */
+
+#define TDES_IC (1 << 31) /* Interrupt on Completion */
+#define TDES_FS (1 << 30) /* First Segment */
+#define TDES_LS (1 << 29) /* Last Segment */
+#define TDES_IPCKG (1 << 28) /* IP Checksum generate */
+#define TDES_TCPCKG (1 << 27) /* TCP Checksum generate */
+#define TDES_UDPCKG (1 << 26) /* UDP Checksum generate */
+#define TDES_TER (1 << 25) /* Transmit End of Ring */
+#define TDES_TBS (0x7ff) /* Transmit Buffer Size */
+
+/*
+ * Network controller register offsets
+ */
+#define KS8695_DTXC (0x00) /* DMA Transmit Control */
+#define KS8695_DRXC (0x04) /* DMA Receive Control */
+#define KS8695_DTSC (0x08) /* DMA Transmit Start Command */
+#define KS8695_DRSC (0x0c) /* DMA Receive Start Command */
+#define KS8695_TDLB (0x10) /* Transmit Descriptor List
+ * Base Address
+ */
+#define KS8695_RDLB (0x14) /* Receive Descriptor List
+ * Base Address
+ */
+#define KS8695_MAL (0x18) /* MAC Station Address Low */
+#define KS8695_MAH (0x1c) /* MAC Station Address High */
+#define KS8695_AAL_(n) (0x80 + ((n)*8)) /* MAC Additional
+ * Station Address
+ * (0..15) Low
+ */
+#define KS8695_AAH_(n) (0x84 + ((n)*8)) /* MAC Additional
+ * Station Address
+ * (0..15) High
+ */
+
+
+/* DMA Transmit Control Register */
+#define DTXC_TRST (1 << 31) /* Soft Reset */
+#define DTXC_TBS (0x3f << 24) /* Transmit Burst Size */
+#define DTXC_TUCG (1 << 18) /* Transmit UDP
+ * Checksum Generate
+ */
+#define DTXC_TTCG (1 << 17) /* Transmit TCP
+ * Checksum Generate
+ */
+#define DTXC_TICG (1 << 16) /* Transmit IP
+ * Checksum Generate
+ */
+#define DTXC_TFCE (1 << 9) /* Transmit Flow
+ * Control Enable
+ */
+#define DTXC_TLB (1 << 8) /* Loopback mode */
+#define DTXC_TEP (1 << 2) /* Transmit Enable Padding */
+#define DTXC_TAC (1 << 1) /* Transmit Add CRC */
+#define DTXC_TE (1 << 0) /* TX Enable */
+
+/* DMA Receive Control Register */
+#define DRXC_RBS (0x3f << 24) /* Receive Burst Size */
+#define DRXC_RUCC (1 << 18) /* Receive UDP Checksum check */
+#define DRXC_RTCG (1 << 17) /* Receive TCP Checksum check */
+#define DRXC_RICG (1 << 16) /* Receive IP Checksum check */
+#define DRXC_RFCE (1 << 9) /* Receive Flow Control
+ * Enable
+ */
+#define DRXC_RB (1 << 6) /* Receive Broadcast */
+#define DRXC_RM (1 << 5) /* Receive Multicast */
+#define DRXC_RU (1 << 4) /* Receive Unicast */
+#define DRXC_RERR (1 << 3) /* Receive Error Frame */
+#define DRXC_RA (1 << 2) /* Receive All */
+#define DRXC_RE (1 << 0) /* RX Enable */
+
+/* Additional Station Address High */
+#define AAH_E (1 << 31) /* Address Enabled */
+
+#endif /* KS8695NET_H */